aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 16:57:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 16:57:13 -0500
commit7ed214ac2095f561a94335ca672b6c42a1ea40ff (patch)
treeda41901bff1d0d8d61170bf362384fdc61deb3ab
parent21eaab6d19ed43e82ed39c8deb7f192134fb4a0e (diff)
parent29e5507ae4ab34397f538f06b7070c81a4e4a2bf (diff)
Merge tag 'char-misc-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver patches from Greg Kroah-Hartman: "Here's the big char/misc driver patches for 3.9-rc1. Nothing major here, just lots of different driver updates (mei, hyperv, ipack, extcon, vmci, etc.). All of these have been in the linux-next tree for a while." * tag 'char-misc-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (209 commits) w1: w1_therm: Add force-pullup option for "broken" sensors w1: ds2482: Added 1-Wire pull-up support to the driver vme: add missing put_device() after device_register() fails extcon: max8997: Use workqueue to check cable state after completing boot of platform extcon: max8997: Set default UART/USB path on probe extcon: max8997: Consolidate duplicate code for checking ADC/CHG cable type extcon: max8997: Set default of ADC debounce time during initialization extcon: max8997: Remove duplicate code related to set H/W line path extcon: max8997: Move defined constant to header file extcon: max77693: Make max77693_extcon_cable static extcon: max8997: Remove unreachable code extcon: max8997: Make max8997_extcon_cable static extcon: max77693: Remove unnecessary goto statement to improve readability extcon: max77693: Convert to devm_input_allocate_device() extcon: gpio: Rename filename of extcon-gpio.c according to kernel naming style CREDITS: update email and address of Harald Hoyer extcon: arizona: Use MICDET for final microphone identification extcon: arizona: Always take the first HPDET reading as the final one extcon: arizona: Clear _trig_sts bits after jack detection extcon: arizona: Don't HPDET magic when headphones are enabled ...
-rw-r--r--CREDITS8
-rw-r--r--Documentation/DocBook/uio-howto.tmpl2
-rw-r--r--Documentation/w1/slaves/w1_therm13
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/hw_random/exynos-rng.c2
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/pcmcia/synclink_cs.c645
-rw-r--r--drivers/extcon/Kconfig4
-rw-r--r--drivers/extcon/extcon-arizona.c810
-rw-r--r--drivers/extcon/extcon-gpio.c2
-rw-r--r--drivers/extcon/extcon-max77693.c981
-rw-r--r--drivers/extcon/extcon-max8997.c734
-rw-r--r--drivers/hid/hid-hyperv.c3
-rw-r--r--drivers/hv/channel.c33
-rw-r--r--drivers/hv/channel_mgmt.c93
-rw-r--r--drivers/hv/connection.c232
-rw-r--r--drivers/hv/hv.c72
-rw-r--r--drivers/hv/hv_balloon.c63
-rw-r--r--drivers/hv/hv_util.c46
-rw-r--r--drivers/hv/hyperv_vmbus.h65
-rw-r--r--drivers/hv/ring_buffer.c130
-rw-r--r--drivers/hv/vmbus_drv.c54
-rw-r--r--drivers/ipack/devices/ipoctal.c113
-rw-r--r--drivers/mfd/wm5102-tables.c10
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/cb710/Kconfig2
-rw-r--r--drivers/misc/lattice-ecp3-config.c243
-rw-r--r--drivers/misc/mei/Kconfig15
-rw-r--r--drivers/misc/mei/Makefile6
-rw-r--r--drivers/misc/mei/amthif.c164
-rw-r--r--drivers/misc/mei/client.c729
-rw-r--r--drivers/misc/mei/client.h102
-rw-r--r--drivers/misc/mei/hbm.c669
-rw-r--r--drivers/misc/mei/hbm.h39
-rw-r--r--drivers/misc/mei/hw-me-regs.h167
-rw-r--r--drivers/misc/mei/hw-me.c576
-rw-r--r--drivers/misc/mei/hw-me.h48
-rw-r--r--drivers/misc/mei/hw.h125
-rw-r--r--drivers/misc/mei/init.c572
-rw-r--r--drivers/misc/mei/interface.c388
-rw-r--r--drivers/misc/mei/interface.h81
-rw-r--r--drivers/misc/mei/interrupt.c656
-rw-r--r--drivers/misc/mei/iorw.c366
-rw-r--r--drivers/misc/mei/main.c536
-rw-r--r--drivers/misc/mei/mei_dev.h350
-rw-r--r--drivers/misc/mei/pci-me.c396
-rw-r--r--drivers/misc/mei/wd.c77
-rw-r--r--drivers/misc/ti-st/st_core.c3
-rw-r--r--drivers/misc/vmw_vmci/Kconfig16
-rw-r--r--drivers/misc/vmw_vmci/Makefile4
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c1214
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.h182
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c500
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.h52
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c604
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.h51
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c117
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h50
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.c224
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.h25
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c759
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.c142
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.h52
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c1043
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c3425
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h191
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c229
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.h59
-rw-r--r--drivers/misc/vmw_vmci/vmci_route.c226
-rw-r--r--drivers/misc/vmw_vmci/vmci_route.h30
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ntb_netdev.c408
-rw-r--r--drivers/ntb/Kconfig13
-rw-r--r--drivers/ntb/Makefile3
-rw-r--r--drivers/ntb/ntb_hw.c1141
-rw-r--r--drivers/ntb/ntb_hw.h181
-rw-r--r--drivers/ntb/ntb_regs.h139
-rw-r--r--drivers/ntb/ntb_transport.c1441
-rw-r--r--drivers/pcmcia/i82092.c8
-rw-r--r--drivers/pcmcia/vrc4171_card.c1
-rw-r--r--drivers/scsi/storvsc_drv.c12
-rw-r--r--drivers/vme/vme.c1
-rw-r--r--drivers/w1/masters/ds1wm.c52
-rw-r--r--drivers/w1/masters/ds2482.c51
-rw-r--r--drivers/w1/masters/mxc_w1.c49
-rw-r--r--drivers/w1/masters/w1-gpio.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c36
-rw-r--r--include/linux/extcon/extcon-gpio.h (renamed from include/linux/extcon/extcon_gpio.h)0
-rw-r--r--include/linux/hyperv.h248
-rw-r--r--include/linux/mfd/arizona/core.h4
-rw-r--r--include/linux/mfd/arizona/pdata.h21
-rw-r--r--include/linux/mfd/arizona/registers.h56
-rw-r--r--include/linux/mfd/max77693-private.h86
-rw-r--r--include/linux/mfd/max77693.h9
-rw-r--r--include/linux/mfd/max8997-private.h64
-rw-r--r--include/linux/mfd/max8997.h25
-rw-r--r--include/linux/ntb.h83
-rw-r--r--include/linux/vmw_vmci_api.h82
-rw-r--r--include/linux/vmw_vmci_defs.h880
-rw-r--r--tools/hv/hv_kvp_daemon.c79
-rwxr-xr-xtools/hv/hv_set_ifconfig.sh24
107 files changed, 20749 insertions, 4115 deletions
diff --git a/CREDITS b/CREDITS
index 2346b09ca8bb..948e0fb9a70e 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1572,12 +1572,12 @@ S: Wantage, New Jersey 07461
1572S: USA 1572S: USA
1573 1573
1574N: Harald Hoyer 1574N: Harald Hoyer
1575E: harald.hoyer@parzelle.de 1575E: harald@redhat.com
1576W: http://parzelle.de/ 1576W: http://www.harald-hoyer.de
1577D: ip_masq_quake 1577D: ip_masq_quake
1578D: md boot support 1578D: md boot support
1579S: Hohe Strasse 30 1579S: Am Strand 5
1580S: D-70176 Stuttgart 1580S: D-19063 Schwerin
1581S: Germany 1581S: Germany
1582 1582
1583N: Jan Hubicka 1583N: Jan Hubicka
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index ddb05e98af0d..95618159e29b 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -984,7 +984,7 @@ int main()
984 return errno; 984 return errno;
985 } 985 }
986 configfd = open(&quot;/sys/class/uio/uio0/device/config&quot;, O_RDWR); 986 configfd = open(&quot;/sys/class/uio/uio0/device/config&quot;, O_RDWR);
987 if (uiofd &lt; 0) { 987 if (configfd &lt; 0) {
988 perror(&quot;config open:&quot;); 988 perror(&quot;config open:&quot;);
989 return errno; 989 return errno;
990 } 990 }
diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm
index 874a8ca93feb..cc62a95e4776 100644
--- a/Documentation/w1/slaves/w1_therm
+++ b/Documentation/w1/slaves/w1_therm
@@ -34,9 +34,16 @@ currently supported. The driver also doesn't support reduced
34precision (which would also reduce the conversion time). 34precision (which would also reduce the conversion time).
35 35
36The module parameter strong_pullup can be set to 0 to disable the 36The module parameter strong_pullup can be set to 0 to disable the
37strong pullup or 1 to enable. If enabled the 5V strong pullup will be 37strong pullup, 1 to enable autodetection or 2 to force strong pullup.
38enabled when the conversion is taking place provided the master driver 38In case of autodetection, the driver will use the "READ POWER SUPPLY"
39must support the strong pullup (or it falls back to a pullup 39command to check if there are pariste powered devices on the bus.
40If so, it will activate the master's strong pullup.
41In case the detection of parasite devices using this command fails
42(seems to be the case with some DS18S20) the strong pullup can
43be force-enabled.
44If the strong pullup is enabled, the master's strong pullup will be
45driven when the conversion is taking place, provided the master driver
46does support the strong pullup (or it falls back to a pullup
40resistor). The DS18b20 temperature sensor specification lists a 47resistor). The DS18b20 temperature sensor specification lists a
41maximum current draw of 1.5mA and that a 5k pullup resistor is not 48maximum current draw of 1.5mA and that a 5k pullup resistor is not
42sufficient. The strong pullup is designed to provide the additional 49sufficient. The strong pullup is designed to provide the additional
diff --git a/MAINTAINERS b/MAINTAINERS
index eac5eda52640..f5b9851755a2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5405,6 +5405,13 @@ S: Maintained
5405F: Documentation/scsi/NinjaSCSI.txt 5405F: Documentation/scsi/NinjaSCSI.txt
5406F: drivers/scsi/nsp32* 5406F: drivers/scsi/nsp32*
5407 5407
5408NTB DRIVER
5409M: Jon Mason <jon.mason@intel.com>
5410S: Supported
5411F: drivers/ntb/
5412F: drivers/net/ntb_netdev.c
5413F: include/linux/ntb.h
5414
5408NTFS FILESYSTEM 5415NTFS FILESYSTEM
5409M: Anton Altaparmakov <anton@tuxera.com> 5416M: Anton Altaparmakov <anton@tuxera.com>
5410L: linux-ntfs-dev@lists.sourceforge.net 5417L: linux-ntfs-dev@lists.sourceforge.net
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 2b4e89ba15ad..202fa6d051b9 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -152,6 +152,8 @@ source "drivers/memory/Kconfig"
152 152
153source "drivers/iio/Kconfig" 153source "drivers/iio/Kconfig"
154 154
155source "drivers/ntb/Kconfig"
156
155source "drivers/vme/Kconfig" 157source "drivers/vme/Kconfig"
156 158
157source "drivers/pwm/Kconfig" 159source "drivers/pwm/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index a8d32f1094b4..b359948fc02b 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -147,3 +147,4 @@ obj-$(CONFIG_MEMORY) += memory/
147obj-$(CONFIG_IIO) += iio/ 147obj-$(CONFIG_IIO) += iio/
148obj-$(CONFIG_VME_BUS) += vme/ 148obj-$(CONFIG_VME_BUS) += vme/
149obj-$(CONFIG_IPACK_BUS) += ipack/ 149obj-$(CONFIG_IPACK_BUS) += ipack/
150obj-$(CONFIG_NTB) += ntb/
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 4673fc4ad931..ac47631ab34f 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -163,7 +163,7 @@ static int exynos_rng_runtime_resume(struct device *dev)
163} 163}
164 164
165 165
166UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend, 166static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
167 exynos_rng_runtime_resume, NULL); 167 exynos_rng_runtime_resume, NULL);
168 168
169static struct platform_driver exynos_rng_driver = { 169static struct platform_driver exynos_rng_driver = {
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index c6fa3bc2baa8..6f6e92a3102d 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -399,7 +399,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
399{ 399{
400 unsigned long p = *ppos; 400 unsigned long p = *ppos;
401 ssize_t low_count, read, sz; 401 ssize_t low_count, read, sz;
402 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ 402 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
403 int err = 0; 403 int err = 0;
404 404
405 read = 0; 405 read = 0;
@@ -527,7 +527,7 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
527 unsigned long p = *ppos; 527 unsigned long p = *ppos;
528 ssize_t wrote = 0; 528 ssize_t wrote = 0;
529 ssize_t virtr = 0; 529 ssize_t virtr = 0;
530 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ 530 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
531 int err = 0; 531 int err = 0;
532 532
533 if (p < (unsigned long) high_memory) { 533 if (p < (unsigned long) high_memory) {
@@ -595,7 +595,7 @@ static ssize_t write_port(struct file *file, const char __user *buf,
595 size_t count, loff_t *ppos) 595 size_t count, loff_t *ppos)
596{ 596{
597 unsigned long i = *ppos; 597 unsigned long i = *ppos;
598 const char __user * tmp = buf; 598 const char __user *tmp = buf;
599 599
600 if (!access_ok(VERIFY_READ, buf, count)) 600 if (!access_ok(VERIFY_READ, buf, count))
601 return -EFAULT; 601 return -EFAULT;
@@ -729,7 +729,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
729 return ret; 729 return ret;
730} 730}
731 731
732static int open_port(struct inode * inode, struct file * filp) 732static int open_port(struct inode *inode, struct file *filp)
733{ 733{
734 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; 734 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
735} 735}
@@ -898,7 +898,7 @@ static int __init chr_dev_init(void)
898 continue; 898 continue;
899 899
900 /* 900 /*
901 * Create /dev/port? 901 * Create /dev/port?
902 */ 902 */
903 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) 903 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
904 continue; 904 continue;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index d0c9852ab875..5c5cc00ebb07 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -102,8 +102,7 @@ static MGSL_PARAMS default_params = {
102 ASYNC_PARITY_NONE /* unsigned char parity; */ 102 ASYNC_PARITY_NONE /* unsigned char parity; */
103}; 103};
104 104
105typedef struct 105typedef struct {
106{
107 int count; 106 int count;
108 unsigned char status; 107 unsigned char status;
109 char data[1]; 108 char data[1];
@@ -326,10 +325,10 @@ typedef struct _mgslpc_info {
326#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg)) 325#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg))
327 326
328#define set_reg_bits(info, reg, mask) \ 327#define set_reg_bits(info, reg, mask) \
329 write_reg(info, (reg), \ 328 write_reg(info, (reg), \
330 (unsigned char) (read_reg(info, (reg)) | (mask))) 329 (unsigned char) (read_reg(info, (reg)) | (mask)))
331#define clear_reg_bits(info, reg, mask) \ 330#define clear_reg_bits(info, reg, mask) \
332 write_reg(info, (reg), \ 331 write_reg(info, (reg), \
333 (unsigned char) (read_reg(info, (reg)) & ~(mask))) 332 (unsigned char) (read_reg(info, (reg)) & ~(mask)))
334/* 333/*
335 * interrupt enable/disable routines 334 * interrupt enable/disable routines
@@ -356,10 +355,10 @@ static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short
356} 355}
357 356
358#define port_irq_disable(info, mask) \ 357#define port_irq_disable(info, mask) \
359 { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); } 358 { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
360 359
361#define port_irq_enable(info, mask) \ 360#define port_irq_enable(info, mask) \
362 { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); } 361 { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
363 362
364static void rx_start(MGSLPC_INFO *info); 363static void rx_start(MGSLPC_INFO *info);
365static void rx_stop(MGSLPC_INFO *info); 364static void rx_stop(MGSLPC_INFO *info);
@@ -397,7 +396,7 @@ static int adapter_test(MGSLPC_INFO *info);
397 396
398static int claim_resources(MGSLPC_INFO *info); 397static int claim_resources(MGSLPC_INFO *info);
399static void release_resources(MGSLPC_INFO *info); 398static void release_resources(MGSLPC_INFO *info);
400static void mgslpc_add_device(MGSLPC_INFO *info); 399static int mgslpc_add_device(MGSLPC_INFO *info);
401static void mgslpc_remove_device(MGSLPC_INFO *info); 400static void mgslpc_remove_device(MGSLPC_INFO *info);
402 401
403static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty); 402static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty);
@@ -514,49 +513,56 @@ static const struct tty_port_operations mgslpc_port_ops = {
514 513
515static int mgslpc_probe(struct pcmcia_device *link) 514static int mgslpc_probe(struct pcmcia_device *link)
516{ 515{
517 MGSLPC_INFO *info; 516 MGSLPC_INFO *info;
518 int ret; 517 int ret;
519 518
520 if (debug_level >= DEBUG_LEVEL_INFO) 519 if (debug_level >= DEBUG_LEVEL_INFO)
521 printk("mgslpc_attach\n"); 520 printk("mgslpc_attach\n");
522 521
523 info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL); 522 info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
524 if (!info) { 523 if (!info) {
525 printk("Error can't allocate device instance data\n"); 524 printk("Error can't allocate device instance data\n");
526 return -ENOMEM; 525 return -ENOMEM;
527 } 526 }
528 527
529 info->magic = MGSLPC_MAGIC; 528 info->magic = MGSLPC_MAGIC;
530 tty_port_init(&info->port); 529 tty_port_init(&info->port);
531 info->port.ops = &mgslpc_port_ops; 530 info->port.ops = &mgslpc_port_ops;
532 INIT_WORK(&info->task, bh_handler); 531 INIT_WORK(&info->task, bh_handler);
533 info->max_frame_size = 4096; 532 info->max_frame_size = 4096;
534 info->port.close_delay = 5*HZ/10; 533 info->port.close_delay = 5*HZ/10;
535 info->port.closing_wait = 30*HZ; 534 info->port.closing_wait = 30*HZ;
536 init_waitqueue_head(&info->status_event_wait_q); 535 init_waitqueue_head(&info->status_event_wait_q);
537 init_waitqueue_head(&info->event_wait_q); 536 init_waitqueue_head(&info->event_wait_q);
538 spin_lock_init(&info->lock); 537 spin_lock_init(&info->lock);
539 spin_lock_init(&info->netlock); 538 spin_lock_init(&info->netlock);
540 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 539 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
541 info->idle_mode = HDLC_TXIDLE_FLAGS; 540 info->idle_mode = HDLC_TXIDLE_FLAGS;
542 info->imra_value = 0xffff; 541 info->imra_value = 0xffff;
543 info->imrb_value = 0xffff; 542 info->imrb_value = 0xffff;
544 info->pim_value = 0xff; 543 info->pim_value = 0xff;
545 544
546 info->p_dev = link; 545 info->p_dev = link;
547 link->priv = info; 546 link->priv = info;
548 547
549 /* Initialize the struct pcmcia_device structure */ 548 /* Initialize the struct pcmcia_device structure */
550 549
551 ret = mgslpc_config(link); 550 ret = mgslpc_config(link);
552 if (ret) { 551 if (ret != 0)
553 tty_port_destroy(&info->port); 552 goto failed;
554 return ret; 553
555 } 554 ret = mgslpc_add_device(info);
556 555 if (ret != 0)
557 mgslpc_add_device(info); 556 goto failed_release;
558 557
559 return 0; 558 return 0;
559
560failed_release:
561 mgslpc_release((u_long)link);
562failed:
563 tty_port_destroy(&info->port);
564 kfree(info);
565 return ret;
560} 566}
561 567
562/* Card has been inserted. 568/* Card has been inserted.
@@ -569,35 +575,35 @@ static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
569 575
570static int mgslpc_config(struct pcmcia_device *link) 576static int mgslpc_config(struct pcmcia_device *link)
571{ 577{
572 MGSLPC_INFO *info = link->priv; 578 MGSLPC_INFO *info = link->priv;
573 int ret; 579 int ret;
574 580
575 if (debug_level >= DEBUG_LEVEL_INFO) 581 if (debug_level >= DEBUG_LEVEL_INFO)
576 printk("mgslpc_config(0x%p)\n", link); 582 printk("mgslpc_config(0x%p)\n", link);
577 583
578 link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; 584 link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
579 585
580 ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL); 586 ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
581 if (ret != 0) 587 if (ret != 0)
582 goto failed; 588 goto failed;
583 589
584 link->config_index = 8; 590 link->config_index = 8;
585 link->config_regs = PRESENT_OPTION; 591 link->config_regs = PRESENT_OPTION;
586 592
587 ret = pcmcia_request_irq(link, mgslpc_isr); 593 ret = pcmcia_request_irq(link, mgslpc_isr);
588 if (ret) 594 if (ret)
589 goto failed; 595 goto failed;
590 ret = pcmcia_enable_device(link); 596 ret = pcmcia_enable_device(link);
591 if (ret) 597 if (ret)
592 goto failed; 598 goto failed;
593 599
594 info->io_base = link->resource[0]->start; 600 info->io_base = link->resource[0]->start;
595 info->irq_level = link->irq; 601 info->irq_level = link->irq;
596 return 0; 602 return 0;
597 603
598failed: 604failed:
599 mgslpc_release((u_long)link); 605 mgslpc_release((u_long)link);
600 return -ENODEV; 606 return -ENODEV;
601} 607}
602 608
603/* Card has been removed. 609/* Card has been removed.
@@ -703,12 +709,12 @@ static void tx_pause(struct tty_struct *tty)
703 if (mgslpc_paranoia_check(info, tty->name, "tx_pause")) 709 if (mgslpc_paranoia_check(info, tty->name, "tx_pause"))
704 return; 710 return;
705 if (debug_level >= DEBUG_LEVEL_INFO) 711 if (debug_level >= DEBUG_LEVEL_INFO)
706 printk("tx_pause(%s)\n",info->device_name); 712 printk("tx_pause(%s)\n", info->device_name);
707 713
708 spin_lock_irqsave(&info->lock,flags); 714 spin_lock_irqsave(&info->lock, flags);
709 if (info->tx_enabled) 715 if (info->tx_enabled)
710 tx_stop(info); 716 tx_stop(info);
711 spin_unlock_irqrestore(&info->lock,flags); 717 spin_unlock_irqrestore(&info->lock, flags);
712} 718}
713 719
714static void tx_release(struct tty_struct *tty) 720static void tx_release(struct tty_struct *tty)
@@ -719,12 +725,12 @@ static void tx_release(struct tty_struct *tty)
719 if (mgslpc_paranoia_check(info, tty->name, "tx_release")) 725 if (mgslpc_paranoia_check(info, tty->name, "tx_release"))
720 return; 726 return;
721 if (debug_level >= DEBUG_LEVEL_INFO) 727 if (debug_level >= DEBUG_LEVEL_INFO)
722 printk("tx_release(%s)\n",info->device_name); 728 printk("tx_release(%s)\n", info->device_name);
723 729
724 spin_lock_irqsave(&info->lock,flags); 730 spin_lock_irqsave(&info->lock, flags);
725 if (!info->tx_enabled) 731 if (!info->tx_enabled)
726 tx_start(info, tty); 732 tx_start(info, tty);
727 spin_unlock_irqrestore(&info->lock,flags); 733 spin_unlock_irqrestore(&info->lock, flags);
728} 734}
729 735
730/* Return next bottom half action to perform. 736/* Return next bottom half action to perform.
@@ -735,7 +741,7 @@ static int bh_action(MGSLPC_INFO *info)
735 unsigned long flags; 741 unsigned long flags;
736 int rc = 0; 742 int rc = 0;
737 743
738 spin_lock_irqsave(&info->lock,flags); 744 spin_lock_irqsave(&info->lock, flags);
739 745
740 if (info->pending_bh & BH_RECEIVE) { 746 if (info->pending_bh & BH_RECEIVE) {
741 info->pending_bh &= ~BH_RECEIVE; 747 info->pending_bh &= ~BH_RECEIVE;
@@ -754,7 +760,7 @@ static int bh_action(MGSLPC_INFO *info)
754 info->bh_requested = false; 760 info->bh_requested = false;
755 } 761 }
756 762
757 spin_unlock_irqrestore(&info->lock,flags); 763 spin_unlock_irqrestore(&info->lock, flags);
758 764
759 return rc; 765 return rc;
760} 766}
@@ -766,7 +772,7 @@ static void bh_handler(struct work_struct *work)
766 int action; 772 int action;
767 773
768 if (debug_level >= DEBUG_LEVEL_BH) 774 if (debug_level >= DEBUG_LEVEL_BH)
769 printk( "%s(%d):bh_handler(%s) entry\n", 775 printk("%s(%d):bh_handler(%s) entry\n",
770 __FILE__,__LINE__,info->device_name); 776 __FILE__,__LINE__,info->device_name);
771 777
772 info->bh_running = true; 778 info->bh_running = true;
@@ -775,8 +781,8 @@ static void bh_handler(struct work_struct *work)
775 while((action = bh_action(info)) != 0) { 781 while((action = bh_action(info)) != 0) {
776 782
777 /* Process work item */ 783 /* Process work item */
778 if ( debug_level >= DEBUG_LEVEL_BH ) 784 if (debug_level >= DEBUG_LEVEL_BH)
779 printk( "%s(%d):bh_handler() work item action=%d\n", 785 printk("%s(%d):bh_handler() work item action=%d\n",
780 __FILE__,__LINE__,action); 786 __FILE__,__LINE__,action);
781 787
782 switch (action) { 788 switch (action) {
@@ -799,7 +805,7 @@ static void bh_handler(struct work_struct *work)
799 805
800 tty_kref_put(tty); 806 tty_kref_put(tty);
801 if (debug_level >= DEBUG_LEVEL_BH) 807 if (debug_level >= DEBUG_LEVEL_BH)
802 printk( "%s(%d):bh_handler(%s) exit\n", 808 printk("%s(%d):bh_handler(%s) exit\n",
803 __FILE__,__LINE__,info->device_name); 809 __FILE__,__LINE__,info->device_name);
804} 810}
805 811
@@ -828,7 +834,7 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
828 RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size)); 834 RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size));
829 835
830 if (debug_level >= DEBUG_LEVEL_ISR) 836 if (debug_level >= DEBUG_LEVEL_ISR)
831 printk("%s(%d):rx_ready_hdlc(eom=%d)\n",__FILE__,__LINE__,eom); 837 printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom);
832 838
833 if (!info->rx_enabled) 839 if (!info->rx_enabled)
834 return; 840 return;
@@ -844,7 +850,8 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
844 850
845 if (eom) { 851 if (eom) {
846 /* end of frame, get FIFO count from RBCL register */ 852 /* end of frame, get FIFO count from RBCL register */
847 if (!(fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f))) 853 fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
854 if (fifo_count == 0)
848 fifo_count = 32; 855 fifo_count = 32;
849 } else 856 } else
850 fifo_count = 32; 857 fifo_count = 32;
@@ -889,7 +896,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd)
889 unsigned char data, status, flag; 896 unsigned char data, status, flag;
890 int fifo_count; 897 int fifo_count;
891 int work = 0; 898 int work = 0;
892 struct mgsl_icount *icount = &info->icount; 899 struct mgsl_icount *icount = &info->icount;
893 900
894 if (tcd) { 901 if (tcd) {
895 /* early termination, get FIFO count from RBCL register */ 902 /* early termination, get FIFO count from RBCL register */
@@ -994,7 +1001,7 @@ static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
994 int c; 1001 int c;
995 1002
996 if (debug_level >= DEBUG_LEVEL_ISR) 1003 if (debug_level >= DEBUG_LEVEL_ISR)
997 printk("%s(%d):tx_ready(%s)\n", __FILE__,__LINE__,info->device_name); 1004 printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name);
998 1005
999 if (info->params.mode == MGSL_MODE_HDLC) { 1006 if (info->params.mode == MGSL_MODE_HDLC) {
1000 if (!info->tx_active) 1007 if (!info->tx_active)
@@ -1239,7 +1246,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
1239 */ 1246 */
1240 1247
1241 if (info->pending_bh && !info->bh_running && !info->bh_requested) { 1248 if (info->pending_bh && !info->bh_running && !info->bh_requested) {
1242 if ( debug_level >= DEBUG_LEVEL_ISR ) 1249 if (debug_level >= DEBUG_LEVEL_ISR)
1243 printk("%s(%d):%s queueing bh task.\n", 1250 printk("%s(%d):%s queueing bh task.\n",
1244 __FILE__,__LINE__,info->device_name); 1251 __FILE__,__LINE__,info->device_name);
1245 schedule_work(&info->task); 1252 schedule_work(&info->task);
@@ -1263,7 +1270,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
1263 int retval = 0; 1270 int retval = 0;
1264 1271
1265 if (debug_level >= DEBUG_LEVEL_INFO) 1272 if (debug_level >= DEBUG_LEVEL_INFO)
1266 printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name); 1273 printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name);
1267 1274
1268 if (info->port.flags & ASYNC_INITIALIZED) 1275 if (info->port.flags & ASYNC_INITIALIZED)
1269 return 0; 1276 return 0;
@@ -1273,7 +1280,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
1273 info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); 1280 info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1274 if (!info->tx_buf) { 1281 if (!info->tx_buf) {
1275 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", 1282 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1276 __FILE__,__LINE__,info->device_name); 1283 __FILE__, __LINE__, info->device_name);
1277 return -ENOMEM; 1284 return -ENOMEM;
1278 } 1285 }
1279 } 1286 }
@@ -1288,15 +1295,15 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
1288 retval = claim_resources(info); 1295 retval = claim_resources(info);
1289 1296
1290 /* perform existence check and diagnostics */ 1297 /* perform existence check and diagnostics */
1291 if ( !retval ) 1298 if (!retval)
1292 retval = adapter_test(info); 1299 retval = adapter_test(info);
1293 1300
1294 if ( retval ) { 1301 if (retval) {
1295 if (capable(CAP_SYS_ADMIN) && tty) 1302 if (capable(CAP_SYS_ADMIN) && tty)
1296 set_bit(TTY_IO_ERROR, &tty->flags); 1303 set_bit(TTY_IO_ERROR, &tty->flags);
1297 release_resources(info); 1304 release_resources(info);
1298 return retval; 1305 return retval;
1299 } 1306 }
1300 1307
1301 /* program hardware for current parameters */ 1308 /* program hardware for current parameters */
1302 mgslpc_change_params(info, tty); 1309 mgslpc_change_params(info, tty);
@@ -1320,7 +1327,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
1320 1327
1321 if (debug_level >= DEBUG_LEVEL_INFO) 1328 if (debug_level >= DEBUG_LEVEL_INFO)
1322 printk("%s(%d):mgslpc_shutdown(%s)\n", 1329 printk("%s(%d):mgslpc_shutdown(%s)\n",
1323 __FILE__,__LINE__, info->device_name ); 1330 __FILE__, __LINE__, info->device_name);
1324 1331
1325 /* clear status wait queue because status changes */ 1332 /* clear status wait queue because status changes */
1326 /* can't happen after shutting down the hardware */ 1333 /* can't happen after shutting down the hardware */
@@ -1334,7 +1341,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
1334 info->tx_buf = NULL; 1341 info->tx_buf = NULL;
1335 } 1342 }
1336 1343
1337 spin_lock_irqsave(&info->lock,flags); 1344 spin_lock_irqsave(&info->lock, flags);
1338 1345
1339 rx_stop(info); 1346 rx_stop(info);
1340 tx_stop(info); 1347 tx_stop(info);
@@ -1342,12 +1349,12 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
1342 /* TODO:disable interrupts instead of reset to preserve signal states */ 1349 /* TODO:disable interrupts instead of reset to preserve signal states */
1343 reset_device(info); 1350 reset_device(info);
1344 1351
1345 if (!tty || tty->termios.c_cflag & HUPCL) { 1352 if (!tty || tty->termios.c_cflag & HUPCL) {
1346 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); 1353 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1347 set_signals(info); 1354 set_signals(info);
1348 } 1355 }
1349 1356
1350 spin_unlock_irqrestore(&info->lock,flags); 1357 spin_unlock_irqrestore(&info->lock, flags);
1351 1358
1352 release_resources(info); 1359 release_resources(info);
1353 1360
@@ -1361,7 +1368,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
1361{ 1368{
1362 unsigned long flags; 1369 unsigned long flags;
1363 1370
1364 spin_lock_irqsave(&info->lock,flags); 1371 spin_lock_irqsave(&info->lock, flags);
1365 1372
1366 rx_stop(info); 1373 rx_stop(info);
1367 tx_stop(info); 1374 tx_stop(info);
@@ -1386,7 +1393,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
1386 if (info->netcount || (tty && (tty->termios.c_cflag & CREAD))) 1393 if (info->netcount || (tty && (tty->termios.c_cflag & CREAD)))
1387 rx_start(info); 1394 rx_start(info);
1388 1395
1389 spin_unlock_irqrestore(&info->lock,flags); 1396 spin_unlock_irqrestore(&info->lock, flags);
1390} 1397}
1391 1398
1392/* Reconfigure adapter based on new parameters 1399/* Reconfigure adapter based on new parameters
@@ -1401,13 +1408,13 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
1401 1408
1402 if (debug_level >= DEBUG_LEVEL_INFO) 1409 if (debug_level >= DEBUG_LEVEL_INFO)
1403 printk("%s(%d):mgslpc_change_params(%s)\n", 1410 printk("%s(%d):mgslpc_change_params(%s)\n",
1404 __FILE__,__LINE__, info->device_name ); 1411 __FILE__, __LINE__, info->device_name);
1405 1412
1406 cflag = tty->termios.c_cflag; 1413 cflag = tty->termios.c_cflag;
1407 1414
1408 /* if B0 rate (hangup) specified then negate RTS and DTR */ 1415 /* if B0 rate (hangup) specified then negate RTS and DTR */
1409 /* otherwise assert RTS and DTR */ 1416 /* otherwise assert RTS and DTR */
1410 if (cflag & CBAUD) 1417 if (cflag & CBAUD)
1411 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; 1418 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
1412 else 1419 else
1413 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); 1420 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
@@ -1453,7 +1460,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
1453 info->params.data_rate = tty_get_baud_rate(tty); 1460 info->params.data_rate = tty_get_baud_rate(tty);
1454 } 1461 }
1455 1462
1456 if ( info->params.data_rate ) { 1463 if (info->params.data_rate) {
1457 info->timeout = (32*HZ*bits_per_char) / 1464 info->timeout = (32*HZ*bits_per_char) /
1458 info->params.data_rate; 1465 info->params.data_rate;
1459 } 1466 }
@@ -1488,8 +1495,8 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
1488 unsigned long flags; 1495 unsigned long flags;
1489 1496
1490 if (debug_level >= DEBUG_LEVEL_INFO) { 1497 if (debug_level >= DEBUG_LEVEL_INFO) {
1491 printk( "%s(%d):mgslpc_put_char(%d) on %s\n", 1498 printk("%s(%d):mgslpc_put_char(%d) on %s\n",
1492 __FILE__,__LINE__,ch,info->device_name); 1499 __FILE__, __LINE__, ch, info->device_name);
1493 } 1500 }
1494 1501
1495 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char")) 1502 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
@@ -1498,7 +1505,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
1498 if (!info->tx_buf) 1505 if (!info->tx_buf)
1499 return 0; 1506 return 0;
1500 1507
1501 spin_lock_irqsave(&info->lock,flags); 1508 spin_lock_irqsave(&info->lock, flags);
1502 1509
1503 if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) { 1510 if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) {
1504 if (info->tx_count < TXBUFSIZE - 1) { 1511 if (info->tx_count < TXBUFSIZE - 1) {
@@ -1508,7 +1515,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
1508 } 1515 }
1509 } 1516 }
1510 1517
1511 spin_unlock_irqrestore(&info->lock,flags); 1518 spin_unlock_irqrestore(&info->lock, flags);
1512 return 1; 1519 return 1;
1513} 1520}
1514 1521
@@ -1521,8 +1528,8 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
1521 unsigned long flags; 1528 unsigned long flags;
1522 1529
1523 if (debug_level >= DEBUG_LEVEL_INFO) 1530 if (debug_level >= DEBUG_LEVEL_INFO)
1524 printk( "%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n", 1531 printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
1525 __FILE__,__LINE__,info->device_name,info->tx_count); 1532 __FILE__, __LINE__, info->device_name, info->tx_count);
1526 1533
1527 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars")) 1534 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
1528 return; 1535 return;
@@ -1532,13 +1539,13 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
1532 return; 1539 return;
1533 1540
1534 if (debug_level >= DEBUG_LEVEL_INFO) 1541 if (debug_level >= DEBUG_LEVEL_INFO)
1535 printk( "%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n", 1542 printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
1536 __FILE__,__LINE__,info->device_name); 1543 __FILE__, __LINE__, info->device_name);
1537 1544
1538 spin_lock_irqsave(&info->lock,flags); 1545 spin_lock_irqsave(&info->lock, flags);
1539 if (!info->tx_active) 1546 if (!info->tx_active)
1540 tx_start(info, tty); 1547 tx_start(info, tty);
1541 spin_unlock_irqrestore(&info->lock,flags); 1548 spin_unlock_irqrestore(&info->lock, flags);
1542} 1549}
1543 1550
1544/* Send a block of data 1551/* Send a block of data
@@ -1559,8 +1566,8 @@ static int mgslpc_write(struct tty_struct * tty,
1559 unsigned long flags; 1566 unsigned long flags;
1560 1567
1561 if (debug_level >= DEBUG_LEVEL_INFO) 1568 if (debug_level >= DEBUG_LEVEL_INFO)
1562 printk( "%s(%d):mgslpc_write(%s) count=%d\n", 1569 printk("%s(%d):mgslpc_write(%s) count=%d\n",
1563 __FILE__,__LINE__,info->device_name,count); 1570 __FILE__, __LINE__, info->device_name, count);
1564 1571
1565 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") || 1572 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
1566 !info->tx_buf) 1573 !info->tx_buf)
@@ -1586,26 +1593,26 @@ static int mgslpc_write(struct tty_struct * tty,
1586 1593
1587 memcpy(info->tx_buf + info->tx_put, buf, c); 1594 memcpy(info->tx_buf + info->tx_put, buf, c);
1588 1595
1589 spin_lock_irqsave(&info->lock,flags); 1596 spin_lock_irqsave(&info->lock, flags);
1590 info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1); 1597 info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1);
1591 info->tx_count += c; 1598 info->tx_count += c;
1592 spin_unlock_irqrestore(&info->lock,flags); 1599 spin_unlock_irqrestore(&info->lock, flags);
1593 1600
1594 buf += c; 1601 buf += c;
1595 count -= c; 1602 count -= c;
1596 ret += c; 1603 ret += c;
1597 } 1604 }
1598start: 1605start:
1599 if (info->tx_count && !tty->stopped && !tty->hw_stopped) { 1606 if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
1600 spin_lock_irqsave(&info->lock,flags); 1607 spin_lock_irqsave(&info->lock, flags);
1601 if (!info->tx_active) 1608 if (!info->tx_active)
1602 tx_start(info, tty); 1609 tx_start(info, tty);
1603 spin_unlock_irqrestore(&info->lock,flags); 1610 spin_unlock_irqrestore(&info->lock, flags);
1604 } 1611 }
1605cleanup: 1612cleanup:
1606 if (debug_level >= DEBUG_LEVEL_INFO) 1613 if (debug_level >= DEBUG_LEVEL_INFO)
1607 printk( "%s(%d):mgslpc_write(%s) returning=%d\n", 1614 printk("%s(%d):mgslpc_write(%s) returning=%d\n",
1608 __FILE__,__LINE__,info->device_name,ret); 1615 __FILE__, __LINE__, info->device_name, ret);
1609 return ret; 1616 return ret;
1610} 1617}
1611 1618
@@ -1633,7 +1640,7 @@ static int mgslpc_write_room(struct tty_struct *tty)
1633 1640
1634 if (debug_level >= DEBUG_LEVEL_INFO) 1641 if (debug_level >= DEBUG_LEVEL_INFO)
1635 printk("%s(%d):mgslpc_write_room(%s)=%d\n", 1642 printk("%s(%d):mgslpc_write_room(%s)=%d\n",
1636 __FILE__,__LINE__, info->device_name, ret); 1643 __FILE__, __LINE__, info->device_name, ret);
1637 return ret; 1644 return ret;
1638} 1645}
1639 1646
@@ -1646,7 +1653,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
1646 1653
1647 if (debug_level >= DEBUG_LEVEL_INFO) 1654 if (debug_level >= DEBUG_LEVEL_INFO)
1648 printk("%s(%d):mgslpc_chars_in_buffer(%s)\n", 1655 printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
1649 __FILE__,__LINE__, info->device_name ); 1656 __FILE__, __LINE__, info->device_name);
1650 1657
1651 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer")) 1658 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer"))
1652 return 0; 1659 return 0;
@@ -1658,7 +1665,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
1658 1665
1659 if (debug_level >= DEBUG_LEVEL_INFO) 1666 if (debug_level >= DEBUG_LEVEL_INFO)
1660 printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n", 1667 printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
1661 __FILE__,__LINE__, info->device_name, rc); 1668 __FILE__, __LINE__, info->device_name, rc);
1662 1669
1663 return rc; 1670 return rc;
1664} 1671}
@@ -1672,15 +1679,15 @@ static void mgslpc_flush_buffer(struct tty_struct *tty)
1672 1679
1673 if (debug_level >= DEBUG_LEVEL_INFO) 1680 if (debug_level >= DEBUG_LEVEL_INFO)
1674 printk("%s(%d):mgslpc_flush_buffer(%s) entry\n", 1681 printk("%s(%d):mgslpc_flush_buffer(%s) entry\n",
1675 __FILE__,__LINE__, info->device_name ); 1682 __FILE__, __LINE__, info->device_name);
1676 1683
1677 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer")) 1684 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer"))
1678 return; 1685 return;
1679 1686
1680 spin_lock_irqsave(&info->lock,flags); 1687 spin_lock_irqsave(&info->lock, flags);
1681 info->tx_count = info->tx_put = info->tx_get = 0; 1688 info->tx_count = info->tx_put = info->tx_get = 0;
1682 del_timer(&info->tx_timer); 1689 del_timer(&info->tx_timer);
1683 spin_unlock_irqrestore(&info->lock,flags); 1690 spin_unlock_irqrestore(&info->lock, flags);
1684 1691
1685 wake_up_interruptible(&tty->write_wait); 1692 wake_up_interruptible(&tty->write_wait);
1686 tty_wakeup(tty); 1693 tty_wakeup(tty);
@@ -1695,17 +1702,17 @@ static void mgslpc_send_xchar(struct tty_struct *tty, char ch)
1695 1702
1696 if (debug_level >= DEBUG_LEVEL_INFO) 1703 if (debug_level >= DEBUG_LEVEL_INFO)
1697 printk("%s(%d):mgslpc_send_xchar(%s,%d)\n", 1704 printk("%s(%d):mgslpc_send_xchar(%s,%d)\n",
1698 __FILE__,__LINE__, info->device_name, ch ); 1705 __FILE__, __LINE__, info->device_name, ch);
1699 1706
1700 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar")) 1707 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar"))
1701 return; 1708 return;
1702 1709
1703 info->x_char = ch; 1710 info->x_char = ch;
1704 if (ch) { 1711 if (ch) {
1705 spin_lock_irqsave(&info->lock,flags); 1712 spin_lock_irqsave(&info->lock, flags);
1706 if (!info->tx_enabled) 1713 if (!info->tx_enabled)
1707 tx_start(info, tty); 1714 tx_start(info, tty);
1708 spin_unlock_irqrestore(&info->lock,flags); 1715 spin_unlock_irqrestore(&info->lock, flags);
1709 } 1716 }
1710} 1717}
1711 1718
@@ -1718,7 +1725,7 @@ static void mgslpc_throttle(struct tty_struct * tty)
1718 1725
1719 if (debug_level >= DEBUG_LEVEL_INFO) 1726 if (debug_level >= DEBUG_LEVEL_INFO)
1720 printk("%s(%d):mgslpc_throttle(%s) entry\n", 1727 printk("%s(%d):mgslpc_throttle(%s) entry\n",
1721 __FILE__,__LINE__, info->device_name ); 1728 __FILE__, __LINE__, info->device_name);
1722 1729
1723 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle")) 1730 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle"))
1724 return; 1731 return;
@@ -1726,11 +1733,11 @@ static void mgslpc_throttle(struct tty_struct * tty)
1726 if (I_IXOFF(tty)) 1733 if (I_IXOFF(tty))
1727 mgslpc_send_xchar(tty, STOP_CHAR(tty)); 1734 mgslpc_send_xchar(tty, STOP_CHAR(tty));
1728 1735
1729 if (tty->termios.c_cflag & CRTSCTS) { 1736 if (tty->termios.c_cflag & CRTSCTS) {
1730 spin_lock_irqsave(&info->lock,flags); 1737 spin_lock_irqsave(&info->lock, flags);
1731 info->serial_signals &= ~SerialSignal_RTS; 1738 info->serial_signals &= ~SerialSignal_RTS;
1732 set_signals(info); 1739 set_signals(info);
1733 spin_unlock_irqrestore(&info->lock,flags); 1740 spin_unlock_irqrestore(&info->lock, flags);
1734 } 1741 }
1735} 1742}
1736 1743
@@ -1743,7 +1750,7 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
1743 1750
1744 if (debug_level >= DEBUG_LEVEL_INFO) 1751 if (debug_level >= DEBUG_LEVEL_INFO)
1745 printk("%s(%d):mgslpc_unthrottle(%s) entry\n", 1752 printk("%s(%d):mgslpc_unthrottle(%s) entry\n",
1746 __FILE__,__LINE__, info->device_name ); 1753 __FILE__, __LINE__, info->device_name);
1747 1754
1748 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle")) 1755 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle"))
1749 return; 1756 return;
@@ -1755,11 +1762,11 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
1755 mgslpc_send_xchar(tty, START_CHAR(tty)); 1762 mgslpc_send_xchar(tty, START_CHAR(tty));
1756 } 1763 }
1757 1764
1758 if (tty->termios.c_cflag & CRTSCTS) { 1765 if (tty->termios.c_cflag & CRTSCTS) {
1759 spin_lock_irqsave(&info->lock,flags); 1766 spin_lock_irqsave(&info->lock, flags);
1760 info->serial_signals |= SerialSignal_RTS; 1767 info->serial_signals |= SerialSignal_RTS;
1761 set_signals(info); 1768 set_signals(info);
1762 spin_unlock_irqrestore(&info->lock,flags); 1769 spin_unlock_irqrestore(&info->lock, flags);
1763 } 1770 }
1764} 1771}
1765 1772
@@ -1797,33 +1804,33 @@ static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params)
1797 * 1804 *
1798 * Arguments: 1805 * Arguments:
1799 * 1806 *
1800 * info pointer to device instance data 1807 * info pointer to device instance data
1801 * new_params user buffer containing new serial params 1808 * new_params user buffer containing new serial params
1802 * 1809 *
1803 * Returns: 0 if success, otherwise error code 1810 * Returns: 0 if success, otherwise error code
1804 */ 1811 */
1805static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty) 1812static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty)
1806{ 1813{
1807 unsigned long flags; 1814 unsigned long flags;
1808 MGSL_PARAMS tmp_params; 1815 MGSL_PARAMS tmp_params;
1809 int err; 1816 int err;
1810 1817
1811 if (debug_level >= DEBUG_LEVEL_INFO) 1818 if (debug_level >= DEBUG_LEVEL_INFO)
1812 printk("%s(%d):set_params %s\n", __FILE__,__LINE__, 1819 printk("%s(%d):set_params %s\n", __FILE__,__LINE__,
1813 info->device_name ); 1820 info->device_name);
1814 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); 1821 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
1815 if (err) { 1822 if (err) {
1816 if ( debug_level >= DEBUG_LEVEL_INFO ) 1823 if (debug_level >= DEBUG_LEVEL_INFO)
1817 printk( "%s(%d):set_params(%s) user buffer copy failed\n", 1824 printk("%s(%d):set_params(%s) user buffer copy failed\n",
1818 __FILE__,__LINE__,info->device_name); 1825 __FILE__, __LINE__, info->device_name);
1819 return -EFAULT; 1826 return -EFAULT;
1820 } 1827 }
1821 1828
1822 spin_lock_irqsave(&info->lock,flags); 1829 spin_lock_irqsave(&info->lock, flags);
1823 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 1830 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
1824 spin_unlock_irqrestore(&info->lock,flags); 1831 spin_unlock_irqrestore(&info->lock, flags);
1825 1832
1826 mgslpc_change_params(info, tty); 1833 mgslpc_change_params(info, tty);
1827 1834
1828 return 0; 1835 return 0;
1829} 1836}
@@ -1841,13 +1848,13 @@ static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode)
1841 1848
1842static int set_txidle(MGSLPC_INFO * info, int idle_mode) 1849static int set_txidle(MGSLPC_INFO * info, int idle_mode)
1843{ 1850{
1844 unsigned long flags; 1851 unsigned long flags;
1845 if (debug_level >= DEBUG_LEVEL_INFO) 1852 if (debug_level >= DEBUG_LEVEL_INFO)
1846 printk("set_txidle(%s,%d)\n", info->device_name, idle_mode); 1853 printk("set_txidle(%s,%d)\n", info->device_name, idle_mode);
1847 spin_lock_irqsave(&info->lock,flags); 1854 spin_lock_irqsave(&info->lock, flags);
1848 info->idle_mode = idle_mode; 1855 info->idle_mode = idle_mode;
1849 tx_set_idle(info); 1856 tx_set_idle(info);
1850 spin_unlock_irqrestore(&info->lock,flags); 1857 spin_unlock_irqrestore(&info->lock, flags);
1851 return 0; 1858 return 0;
1852} 1859}
1853 1860
@@ -1864,11 +1871,11 @@ static int get_interface(MGSLPC_INFO * info, int __user *if_mode)
1864 1871
1865static int set_interface(MGSLPC_INFO * info, int if_mode) 1872static int set_interface(MGSLPC_INFO * info, int if_mode)
1866{ 1873{
1867 unsigned long flags; 1874 unsigned long flags;
1868 unsigned char val; 1875 unsigned char val;
1869 if (debug_level >= DEBUG_LEVEL_INFO) 1876 if (debug_level >= DEBUG_LEVEL_INFO)
1870 printk("set_interface(%s,%d)\n", info->device_name, if_mode); 1877 printk("set_interface(%s,%d)\n", info->device_name, if_mode);
1871 spin_lock_irqsave(&info->lock,flags); 1878 spin_lock_irqsave(&info->lock, flags);
1872 info->if_mode = if_mode; 1879 info->if_mode = if_mode;
1873 1880
1874 val = read_reg(info, PVR) & 0x0f; 1881 val = read_reg(info, PVR) & 0x0f;
@@ -1880,18 +1887,18 @@ static int set_interface(MGSLPC_INFO * info, int if_mode)
1880 } 1887 }
1881 write_reg(info, PVR, val); 1888 write_reg(info, PVR, val);
1882 1889
1883 spin_unlock_irqrestore(&info->lock,flags); 1890 spin_unlock_irqrestore(&info->lock, flags);
1884 return 0; 1891 return 0;
1885} 1892}
1886 1893
1887static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty) 1894static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
1888{ 1895{
1889 unsigned long flags; 1896 unsigned long flags;
1890 1897
1891 if (debug_level >= DEBUG_LEVEL_INFO) 1898 if (debug_level >= DEBUG_LEVEL_INFO)
1892 printk("set_txenable(%s,%d)\n", info->device_name, enable); 1899 printk("set_txenable(%s,%d)\n", info->device_name, enable);
1893 1900
1894 spin_lock_irqsave(&info->lock,flags); 1901 spin_lock_irqsave(&info->lock, flags);
1895 if (enable) { 1902 if (enable) {
1896 if (!info->tx_enabled) 1903 if (!info->tx_enabled)
1897 tx_start(info, tty); 1904 tx_start(info, tty);
@@ -1899,18 +1906,18 @@ static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
1899 if (info->tx_enabled) 1906 if (info->tx_enabled)
1900 tx_stop(info); 1907 tx_stop(info);
1901 } 1908 }
1902 spin_unlock_irqrestore(&info->lock,flags); 1909 spin_unlock_irqrestore(&info->lock, flags);
1903 return 0; 1910 return 0;
1904} 1911}
1905 1912
1906static int tx_abort(MGSLPC_INFO * info) 1913static int tx_abort(MGSLPC_INFO * info)
1907{ 1914{
1908 unsigned long flags; 1915 unsigned long flags;
1909 1916
1910 if (debug_level >= DEBUG_LEVEL_INFO) 1917 if (debug_level >= DEBUG_LEVEL_INFO)
1911 printk("tx_abort(%s)\n", info->device_name); 1918 printk("tx_abort(%s)\n", info->device_name);
1912 1919
1913 spin_lock_irqsave(&info->lock,flags); 1920 spin_lock_irqsave(&info->lock, flags);
1914 if (info->tx_active && info->tx_count && 1921 if (info->tx_active && info->tx_count &&
1915 info->params.mode == MGSL_MODE_HDLC) { 1922 info->params.mode == MGSL_MODE_HDLC) {
1916 /* clear data count so FIFO is not filled on next IRQ. 1923 /* clear data count so FIFO is not filled on next IRQ.
@@ -1919,18 +1926,18 @@ static int tx_abort(MGSLPC_INFO * info)
1919 info->tx_count = info->tx_put = info->tx_get = 0; 1926 info->tx_count = info->tx_put = info->tx_get = 0;
1920 info->tx_aborting = true; 1927 info->tx_aborting = true;
1921 } 1928 }
1922 spin_unlock_irqrestore(&info->lock,flags); 1929 spin_unlock_irqrestore(&info->lock, flags);
1923 return 0; 1930 return 0;
1924} 1931}
1925 1932
1926static int set_rxenable(MGSLPC_INFO * info, int enable) 1933static int set_rxenable(MGSLPC_INFO * info, int enable)
1927{ 1934{
1928 unsigned long flags; 1935 unsigned long flags;
1929 1936
1930 if (debug_level >= DEBUG_LEVEL_INFO) 1937 if (debug_level >= DEBUG_LEVEL_INFO)
1931 printk("set_rxenable(%s,%d)\n", info->device_name, enable); 1938 printk("set_rxenable(%s,%d)\n", info->device_name, enable);
1932 1939
1933 spin_lock_irqsave(&info->lock,flags); 1940 spin_lock_irqsave(&info->lock, flags);
1934 if (enable) { 1941 if (enable) {
1935 if (!info->rx_enabled) 1942 if (!info->rx_enabled)
1936 rx_start(info); 1943 rx_start(info);
@@ -1938,21 +1945,21 @@ static int set_rxenable(MGSLPC_INFO * info, int enable)
1938 if (info->rx_enabled) 1945 if (info->rx_enabled)
1939 rx_stop(info); 1946 rx_stop(info);
1940 } 1947 }
1941 spin_unlock_irqrestore(&info->lock,flags); 1948 spin_unlock_irqrestore(&info->lock, flags);
1942 return 0; 1949 return 0;
1943} 1950}
1944 1951
1945/* wait for specified event to occur 1952/* wait for specified event to occur
1946 * 1953 *
1947 * Arguments: info pointer to device instance data 1954 * Arguments: info pointer to device instance data
1948 * mask pointer to bitmask of events to wait for 1955 * mask pointer to bitmask of events to wait for
1949 * Return Value: 0 if successful and bit mask updated with 1956 * Return Value: 0 if successful and bit mask updated with
1950 * of events triggerred, 1957 * of events triggerred,
1951 * otherwise error code 1958 * otherwise error code
1952 */ 1959 */
1953static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr) 1960static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
1954{ 1961{
1955 unsigned long flags; 1962 unsigned long flags;
1956 int s; 1963 int s;
1957 int rc=0; 1964 int rc=0;
1958 struct mgsl_icount cprev, cnow; 1965 struct mgsl_icount cprev, cnow;
@@ -1968,18 +1975,18 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
1968 if (debug_level >= DEBUG_LEVEL_INFO) 1975 if (debug_level >= DEBUG_LEVEL_INFO)
1969 printk("wait_events(%s,%d)\n", info->device_name, mask); 1976 printk("wait_events(%s,%d)\n", info->device_name, mask);
1970 1977
1971 spin_lock_irqsave(&info->lock,flags); 1978 spin_lock_irqsave(&info->lock, flags);
1972 1979
1973 /* return immediately if state matches requested events */ 1980 /* return immediately if state matches requested events */
1974 get_signals(info); 1981 get_signals(info);
1975 s = info->serial_signals; 1982 s = info->serial_signals;
1976 events = mask & 1983 events = mask &
1977 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + 1984 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
1978 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + 1985 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
1979 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + 1986 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
1980 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); 1987 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
1981 if (events) { 1988 if (events) {
1982 spin_unlock_irqrestore(&info->lock,flags); 1989 spin_unlock_irqrestore(&info->lock, flags);
1983 goto exit; 1990 goto exit;
1984 } 1991 }
1985 1992
@@ -1994,7 +2001,7 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
1994 set_current_state(TASK_INTERRUPTIBLE); 2001 set_current_state(TASK_INTERRUPTIBLE);
1995 add_wait_queue(&info->event_wait_q, &wait); 2002 add_wait_queue(&info->event_wait_q, &wait);
1996 2003
1997 spin_unlock_irqrestore(&info->lock,flags); 2004 spin_unlock_irqrestore(&info->lock, flags);
1998 2005
1999 2006
2000 for(;;) { 2007 for(;;) {
@@ -2005,11 +2012,11 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
2005 } 2012 }
2006 2013
2007 /* get current irq counts */ 2014 /* get current irq counts */
2008 spin_lock_irqsave(&info->lock,flags); 2015 spin_lock_irqsave(&info->lock, flags);
2009 cnow = info->icount; 2016 cnow = info->icount;
2010 newsigs = info->input_signal_events; 2017 newsigs = info->input_signal_events;
2011 set_current_state(TASK_INTERRUPTIBLE); 2018 set_current_state(TASK_INTERRUPTIBLE);
2012 spin_unlock_irqrestore(&info->lock,flags); 2019 spin_unlock_irqrestore(&info->lock, flags);
2013 2020
2014 /* if no change, wait aborted for some reason */ 2021 /* if no change, wait aborted for some reason */
2015 if (newsigs.dsr_up == oldsigs.dsr_up && 2022 if (newsigs.dsr_up == oldsigs.dsr_up &&
@@ -2048,10 +2055,10 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
2048 set_current_state(TASK_RUNNING); 2055 set_current_state(TASK_RUNNING);
2049 2056
2050 if (mask & MgslEvent_ExitHuntMode) { 2057 if (mask & MgslEvent_ExitHuntMode) {
2051 spin_lock_irqsave(&info->lock,flags); 2058 spin_lock_irqsave(&info->lock, flags);
2052 if (!waitqueue_active(&info->event_wait_q)) 2059 if (!waitqueue_active(&info->event_wait_q))
2053 irq_disable(info, CHA, IRQ_EXITHUNT); 2060 irq_disable(info, CHA, IRQ_EXITHUNT);
2054 spin_unlock_irqrestore(&info->lock,flags); 2061 spin_unlock_irqrestore(&info->lock, flags);
2055 } 2062 }
2056exit: 2063exit:
2057 if (rc == 0) 2064 if (rc == 0)
@@ -2061,17 +2068,17 @@ exit:
2061 2068
2062static int modem_input_wait(MGSLPC_INFO *info,int arg) 2069static int modem_input_wait(MGSLPC_INFO *info,int arg)
2063{ 2070{
2064 unsigned long flags; 2071 unsigned long flags;
2065 int rc; 2072 int rc;
2066 struct mgsl_icount cprev, cnow; 2073 struct mgsl_icount cprev, cnow;
2067 DECLARE_WAITQUEUE(wait, current); 2074 DECLARE_WAITQUEUE(wait, current);
2068 2075
2069 /* save current irq counts */ 2076 /* save current irq counts */
2070 spin_lock_irqsave(&info->lock,flags); 2077 spin_lock_irqsave(&info->lock, flags);
2071 cprev = info->icount; 2078 cprev = info->icount;
2072 add_wait_queue(&info->status_event_wait_q, &wait); 2079 add_wait_queue(&info->status_event_wait_q, &wait);
2073 set_current_state(TASK_INTERRUPTIBLE); 2080 set_current_state(TASK_INTERRUPTIBLE);
2074 spin_unlock_irqrestore(&info->lock,flags); 2081 spin_unlock_irqrestore(&info->lock, flags);
2075 2082
2076 for(;;) { 2083 for(;;) {
2077 schedule(); 2084 schedule();
@@ -2081,10 +2088,10 @@ static int modem_input_wait(MGSLPC_INFO *info,int arg)
2081 } 2088 }
2082 2089
2083 /* get new irq counts */ 2090 /* get new irq counts */
2084 spin_lock_irqsave(&info->lock,flags); 2091 spin_lock_irqsave(&info->lock, flags);
2085 cnow = info->icount; 2092 cnow = info->icount;
2086 set_current_state(TASK_INTERRUPTIBLE); 2093 set_current_state(TASK_INTERRUPTIBLE);
2087 spin_unlock_irqrestore(&info->lock,flags); 2094 spin_unlock_irqrestore(&info->lock, flags);
2088 2095
2089 /* if no change, wait aborted for some reason */ 2096 /* if no change, wait aborted for some reason */
2090 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2097 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
@@ -2115,11 +2122,11 @@ static int tiocmget(struct tty_struct *tty)
2115{ 2122{
2116 MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; 2123 MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
2117 unsigned int result; 2124 unsigned int result;
2118 unsigned long flags; 2125 unsigned long flags;
2119 2126
2120 spin_lock_irqsave(&info->lock,flags); 2127 spin_lock_irqsave(&info->lock, flags);
2121 get_signals(info); 2128 get_signals(info);
2122 spin_unlock_irqrestore(&info->lock,flags); 2129 spin_unlock_irqrestore(&info->lock, flags);
2123 2130
2124 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + 2131 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2125 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + 2132 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
@@ -2130,7 +2137,7 @@ static int tiocmget(struct tty_struct *tty)
2130 2137
2131 if (debug_level >= DEBUG_LEVEL_INFO) 2138 if (debug_level >= DEBUG_LEVEL_INFO)
2132 printk("%s(%d):%s tiocmget() value=%08X\n", 2139 printk("%s(%d):%s tiocmget() value=%08X\n",
2133 __FILE__,__LINE__, info->device_name, result ); 2140 __FILE__, __LINE__, info->device_name, result);
2134 return result; 2141 return result;
2135} 2142}
2136 2143
@@ -2140,11 +2147,11 @@ static int tiocmset(struct tty_struct *tty,
2140 unsigned int set, unsigned int clear) 2147 unsigned int set, unsigned int clear)
2141{ 2148{
2142 MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; 2149 MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
2143 unsigned long flags; 2150 unsigned long flags;
2144 2151
2145 if (debug_level >= DEBUG_LEVEL_INFO) 2152 if (debug_level >= DEBUG_LEVEL_INFO)
2146 printk("%s(%d):%s tiocmset(%x,%x)\n", 2153 printk("%s(%d):%s tiocmset(%x,%x)\n",
2147 __FILE__,__LINE__,info->device_name, set, clear); 2154 __FILE__, __LINE__, info->device_name, set, clear);
2148 2155
2149 if (set & TIOCM_RTS) 2156 if (set & TIOCM_RTS)
2150 info->serial_signals |= SerialSignal_RTS; 2157 info->serial_signals |= SerialSignal_RTS;
@@ -2155,9 +2162,9 @@ static int tiocmset(struct tty_struct *tty,
2155 if (clear & TIOCM_DTR) 2162 if (clear & TIOCM_DTR)
2156 info->serial_signals &= ~SerialSignal_DTR; 2163 info->serial_signals &= ~SerialSignal_DTR;
2157 2164
2158 spin_lock_irqsave(&info->lock,flags); 2165 spin_lock_irqsave(&info->lock, flags);
2159 set_signals(info); 2166 set_signals(info);
2160 spin_unlock_irqrestore(&info->lock,flags); 2167 spin_unlock_irqrestore(&info->lock, flags);
2161 2168
2162 return 0; 2169 return 0;
2163} 2170}
@@ -2174,17 +2181,17 @@ static int mgslpc_break(struct tty_struct *tty, int break_state)
2174 2181
2175 if (debug_level >= DEBUG_LEVEL_INFO) 2182 if (debug_level >= DEBUG_LEVEL_INFO)
2176 printk("%s(%d):mgslpc_break(%s,%d)\n", 2183 printk("%s(%d):mgslpc_break(%s,%d)\n",
2177 __FILE__,__LINE__, info->device_name, break_state); 2184 __FILE__, __LINE__, info->device_name, break_state);
2178 2185
2179 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break")) 2186 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break"))
2180 return -EINVAL; 2187 return -EINVAL;
2181 2188
2182 spin_lock_irqsave(&info->lock,flags); 2189 spin_lock_irqsave(&info->lock, flags);
2183 if (break_state == -1) 2190 if (break_state == -1)
2184 set_reg_bits(info, CHA+DAFO, BIT6); 2191 set_reg_bits(info, CHA+DAFO, BIT6);
2185 else 2192 else
2186 clear_reg_bits(info, CHA+DAFO, BIT6); 2193 clear_reg_bits(info, CHA+DAFO, BIT6);
2187 spin_unlock_irqrestore(&info->lock,flags); 2194 spin_unlock_irqrestore(&info->lock, flags);
2188 return 0; 2195 return 0;
2189} 2196}
2190 2197
@@ -2195,9 +2202,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
2195 struct mgsl_icount cnow; /* kernel counter temps */ 2202 struct mgsl_icount cnow; /* kernel counter temps */
2196 unsigned long flags; 2203 unsigned long flags;
2197 2204
2198 spin_lock_irqsave(&info->lock,flags); 2205 spin_lock_irqsave(&info->lock, flags);
2199 cnow = info->icount; 2206 cnow = info->icount;
2200 spin_unlock_irqrestore(&info->lock,flags); 2207 spin_unlock_irqrestore(&info->lock, flags);
2201 2208
2202 icount->cts = cnow.cts; 2209 icount->cts = cnow.cts;
2203 icount->dsr = cnow.dsr; 2210 icount->dsr = cnow.dsr;
@@ -2218,9 +2225,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
2218 * 2225 *
2219 * Arguments: 2226 * Arguments:
2220 * 2227 *
2221 * tty pointer to tty instance data 2228 * tty pointer to tty instance data
2222 * cmd IOCTL command code 2229 * cmd IOCTL command code
2223 * arg command argument/context 2230 * arg command argument/context
2224 * 2231 *
2225 * Return Value: 0 if success, otherwise error code 2232 * Return Value: 0 if success, otherwise error code
2226 */ 2233 */
@@ -2231,8 +2238,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
2231 void __user *argp = (void __user *)arg; 2238 void __user *argp = (void __user *)arg;
2232 2239
2233 if (debug_level >= DEBUG_LEVEL_INFO) 2240 if (debug_level >= DEBUG_LEVEL_INFO)
2234 printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__, 2241 printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__,
2235 info->device_name, cmd ); 2242 info->device_name, cmd);
2236 2243
2237 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl")) 2244 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
2238 return -ENODEV; 2245 return -ENODEV;
@@ -2278,8 +2285,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
2278 * 2285 *
2279 * Arguments: 2286 * Arguments:
2280 * 2287 *
2281 * tty pointer to tty structure 2288 * tty pointer to tty structure
2282 * termios pointer to buffer to hold returned old termios 2289 * termios pointer to buffer to hold returned old termios
2283 */ 2290 */
2284static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 2291static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
2285{ 2292{
@@ -2287,8 +2294,8 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
2287 unsigned long flags; 2294 unsigned long flags;
2288 2295
2289 if (debug_level >= DEBUG_LEVEL_INFO) 2296 if (debug_level >= DEBUG_LEVEL_INFO)
2290 printk("%s(%d):mgslpc_set_termios %s\n", __FILE__,__LINE__, 2297 printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__,
2291 tty->driver->name ); 2298 tty->driver->name);
2292 2299
2293 /* just return if nothing has changed */ 2300 /* just return if nothing has changed */
2294 if ((tty->termios.c_cflag == old_termios->c_cflag) 2301 if ((tty->termios.c_cflag == old_termios->c_cflag)
@@ -2302,22 +2309,22 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
2302 if (old_termios->c_cflag & CBAUD && 2309 if (old_termios->c_cflag & CBAUD &&
2303 !(tty->termios.c_cflag & CBAUD)) { 2310 !(tty->termios.c_cflag & CBAUD)) {
2304 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); 2311 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
2305 spin_lock_irqsave(&info->lock,flags); 2312 spin_lock_irqsave(&info->lock, flags);
2306 set_signals(info); 2313 set_signals(info);
2307 spin_unlock_irqrestore(&info->lock,flags); 2314 spin_unlock_irqrestore(&info->lock, flags);
2308 } 2315 }
2309 2316
2310 /* Handle transition away from B0 status */ 2317 /* Handle transition away from B0 status */
2311 if (!(old_termios->c_cflag & CBAUD) && 2318 if (!(old_termios->c_cflag & CBAUD) &&
2312 tty->termios.c_cflag & CBAUD) { 2319 tty->termios.c_cflag & CBAUD) {
2313 info->serial_signals |= SerialSignal_DTR; 2320 info->serial_signals |= SerialSignal_DTR;
2314 if (!(tty->termios.c_cflag & CRTSCTS) || 2321 if (!(tty->termios.c_cflag & CRTSCTS) ||
2315 !test_bit(TTY_THROTTLED, &tty->flags)) { 2322 !test_bit(TTY_THROTTLED, &tty->flags)) {
2316 info->serial_signals |= SerialSignal_RTS; 2323 info->serial_signals |= SerialSignal_RTS;
2317 } 2324 }
2318 spin_lock_irqsave(&info->lock,flags); 2325 spin_lock_irqsave(&info->lock, flags);
2319 set_signals(info); 2326 set_signals(info);
2320 spin_unlock_irqrestore(&info->lock,flags); 2327 spin_unlock_irqrestore(&info->lock, flags);
2321 } 2328 }
2322 2329
2323 /* Handle turning off CRTSCTS */ 2330 /* Handle turning off CRTSCTS */
@@ -2338,15 +2345,15 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
2338 2345
2339 if (debug_level >= DEBUG_LEVEL_INFO) 2346 if (debug_level >= DEBUG_LEVEL_INFO)
2340 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", 2347 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
2341 __FILE__,__LINE__, info->device_name, port->count); 2348 __FILE__, __LINE__, info->device_name, port->count);
2342 2349
2343 WARN_ON(!port->count); 2350 WARN_ON(!port->count);
2344 2351
2345 if (tty_port_close_start(port, tty, filp) == 0) 2352 if (tty_port_close_start(port, tty, filp) == 0)
2346 goto cleanup; 2353 goto cleanup;
2347 2354
2348 if (port->flags & ASYNC_INITIALIZED) 2355 if (port->flags & ASYNC_INITIALIZED)
2349 mgslpc_wait_until_sent(tty, info->timeout); 2356 mgslpc_wait_until_sent(tty, info->timeout);
2350 2357
2351 mgslpc_flush_buffer(tty); 2358 mgslpc_flush_buffer(tty);
2352 2359
@@ -2357,7 +2364,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
2357 tty_port_tty_set(port, NULL); 2364 tty_port_tty_set(port, NULL);
2358cleanup: 2365cleanup:
2359 if (debug_level >= DEBUG_LEVEL_INFO) 2366 if (debug_level >= DEBUG_LEVEL_INFO)
2360 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__, 2367 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
2361 tty->driver->name, port->count); 2368 tty->driver->name, port->count);
2362} 2369}
2363 2370
@@ -2368,12 +2375,12 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
2368 MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; 2375 MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
2369 unsigned long orig_jiffies, char_time; 2376 unsigned long orig_jiffies, char_time;
2370 2377
2371 if (!info ) 2378 if (!info)
2372 return; 2379 return;
2373 2380
2374 if (debug_level >= DEBUG_LEVEL_INFO) 2381 if (debug_level >= DEBUG_LEVEL_INFO)
2375 printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n", 2382 printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n",
2376 __FILE__,__LINE__, info->device_name ); 2383 __FILE__, __LINE__, info->device_name);
2377 2384
2378 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent")) 2385 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
2379 return; 2386 return;
@@ -2389,8 +2396,8 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
2389 * Note: use tight timings here to satisfy the NIST-PCTS. 2396 * Note: use tight timings here to satisfy the NIST-PCTS.
2390 */ 2397 */
2391 2398
2392 if ( info->params.data_rate ) { 2399 if (info->params.data_rate) {
2393 char_time = info->timeout/(32 * 5); 2400 char_time = info->timeout/(32 * 5);
2394 if (!char_time) 2401 if (!char_time)
2395 char_time++; 2402 char_time++;
2396 } else 2403 } else
@@ -2421,7 +2428,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
2421exit: 2428exit:
2422 if (debug_level >= DEBUG_LEVEL_INFO) 2429 if (debug_level >= DEBUG_LEVEL_INFO)
2423 printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n", 2430 printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n",
2424 __FILE__,__LINE__, info->device_name ); 2431 __FILE__, __LINE__, info->device_name);
2425} 2432}
2426 2433
2427/* Called by tty_hangup() when a hangup is signaled. 2434/* Called by tty_hangup() when a hangup is signaled.
@@ -2433,7 +2440,7 @@ static void mgslpc_hangup(struct tty_struct *tty)
2433 2440
2434 if (debug_level >= DEBUG_LEVEL_INFO) 2441 if (debug_level >= DEBUG_LEVEL_INFO)
2435 printk("%s(%d):mgslpc_hangup(%s)\n", 2442 printk("%s(%d):mgslpc_hangup(%s)\n",
2436 __FILE__,__LINE__, info->device_name ); 2443 __FILE__, __LINE__, info->device_name);
2437 2444
2438 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup")) 2445 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup"))
2439 return; 2446 return;
@@ -2448,9 +2455,9 @@ static int carrier_raised(struct tty_port *port)
2448 MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); 2455 MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
2449 unsigned long flags; 2456 unsigned long flags;
2450 2457
2451 spin_lock_irqsave(&info->lock,flags); 2458 spin_lock_irqsave(&info->lock, flags);
2452 get_signals(info); 2459 get_signals(info);
2453 spin_unlock_irqrestore(&info->lock,flags); 2460 spin_unlock_irqrestore(&info->lock, flags);
2454 2461
2455 if (info->serial_signals & SerialSignal_DCD) 2462 if (info->serial_signals & SerialSignal_DCD)
2456 return 1; 2463 return 1;
@@ -2462,13 +2469,13 @@ static void dtr_rts(struct tty_port *port, int onoff)
2462 MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); 2469 MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
2463 unsigned long flags; 2470 unsigned long flags;
2464 2471
2465 spin_lock_irqsave(&info->lock,flags); 2472 spin_lock_irqsave(&info->lock, flags);
2466 if (onoff) 2473 if (onoff)
2467 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; 2474 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
2468 else 2475 else
2469 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); 2476 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
2470 set_signals(info); 2477 set_signals(info);
2471 spin_unlock_irqrestore(&info->lock,flags); 2478 spin_unlock_irqrestore(&info->lock, flags);
2472} 2479}
2473 2480
2474 2481
@@ -2476,14 +2483,14 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
2476{ 2483{
2477 MGSLPC_INFO *info; 2484 MGSLPC_INFO *info;
2478 struct tty_port *port; 2485 struct tty_port *port;
2479 int retval, line; 2486 int retval, line;
2480 unsigned long flags; 2487 unsigned long flags;
2481 2488
2482 /* verify range of specified line number */ 2489 /* verify range of specified line number */
2483 line = tty->index; 2490 line = tty->index;
2484 if (line >= mgslpc_device_count) { 2491 if (line >= mgslpc_device_count) {
2485 printk("%s(%d):mgslpc_open with invalid line #%d.\n", 2492 printk("%s(%d):mgslpc_open with invalid line #%d.\n",
2486 __FILE__,__LINE__,line); 2493 __FILE__, __LINE__, line);
2487 return -ENODEV; 2494 return -ENODEV;
2488 } 2495 }
2489 2496
@@ -2500,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
2500 2507
2501 if (debug_level >= DEBUG_LEVEL_INFO) 2508 if (debug_level >= DEBUG_LEVEL_INFO)
2502 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", 2509 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
2503 __FILE__,__LINE__,tty->driver->name, port->count); 2510 __FILE__, __LINE__, tty->driver->name, port->count);
2504 2511
2505 /* If port is closing, signal caller to try again */ 2512 /* If port is closing, signal caller to try again */
2506 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){ 2513 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
@@ -2535,13 +2542,13 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
2535 if (retval) { 2542 if (retval) {
2536 if (debug_level >= DEBUG_LEVEL_INFO) 2543 if (debug_level >= DEBUG_LEVEL_INFO)
2537 printk("%s(%d):block_til_ready(%s) returned %d\n", 2544 printk("%s(%d):block_til_ready(%s) returned %d\n",
2538 __FILE__,__LINE__, info->device_name, retval); 2545 __FILE__, __LINE__, info->device_name, retval);
2539 goto cleanup; 2546 goto cleanup;
2540 } 2547 }
2541 2548
2542 if (debug_level >= DEBUG_LEVEL_INFO) 2549 if (debug_level >= DEBUG_LEVEL_INFO)
2543 printk("%s(%d):mgslpc_open(%s) success\n", 2550 printk("%s(%d):mgslpc_open(%s) success\n",
2544 __FILE__,__LINE__, info->device_name); 2551 __FILE__, __LINE__, info->device_name);
2545 retval = 0; 2552 retval = 0;
2546 2553
2547cleanup: 2554cleanup:
@@ -2561,9 +2568,9 @@ static inline void line_info(struct seq_file *m, MGSLPC_INFO *info)
2561 info->device_name, info->io_base, info->irq_level); 2568 info->device_name, info->io_base, info->irq_level);
2562 2569
2563 /* output current serial signal states */ 2570 /* output current serial signal states */
2564 spin_lock_irqsave(&info->lock,flags); 2571 spin_lock_irqsave(&info->lock, flags);
2565 get_signals(info); 2572 get_signals(info);
2566 spin_unlock_irqrestore(&info->lock,flags); 2573 spin_unlock_irqrestore(&info->lock, flags);
2567 2574
2568 stat_buf[0] = 0; 2575 stat_buf[0] = 0;
2569 stat_buf[1] = 0; 2576 stat_buf[1] = 0;
@@ -2625,7 +2632,7 @@ static int mgslpc_proc_show(struct seq_file *m, void *v)
2625 seq_printf(m, "synclink driver:%s\n", driver_version); 2632 seq_printf(m, "synclink driver:%s\n", driver_version);
2626 2633
2627 info = mgslpc_device_list; 2634 info = mgslpc_device_list;
2628 while( info ) { 2635 while (info) {
2629 line_info(m, info); 2636 line_info(m, info);
2630 info = info->next_device; 2637 info = info->next_device;
2631 } 2638 }
@@ -2686,8 +2693,8 @@ static void rx_free_buffers(MGSLPC_INFO *info)
2686 2693
2687static int claim_resources(MGSLPC_INFO *info) 2694static int claim_resources(MGSLPC_INFO *info)
2688{ 2695{
2689 if (rx_alloc_buffers(info) < 0 ) { 2696 if (rx_alloc_buffers(info) < 0) {
2690 printk( "Can't allocate rx buffer %s\n", info->device_name); 2697 printk("Can't allocate rx buffer %s\n", info->device_name);
2691 release_resources(info); 2698 release_resources(info);
2692 return -ENODEV; 2699 return -ENODEV;
2693 } 2700 }
@@ -2706,8 +2713,12 @@ static void release_resources(MGSLPC_INFO *info)
2706 * 2713 *
2707 * Arguments: info pointer to device instance data 2714 * Arguments: info pointer to device instance data
2708 */ 2715 */
2709static void mgslpc_add_device(MGSLPC_INFO *info) 2716static int mgslpc_add_device(MGSLPC_INFO *info)
2710{ 2717{
2718 MGSLPC_INFO *current_dev = NULL;
2719 struct device *tty_dev;
2720 int ret;
2721
2711 info->next_device = NULL; 2722 info->next_device = NULL;
2712 info->line = mgslpc_device_count; 2723 info->line = mgslpc_device_count;
2713 sprintf(info->device_name,"ttySLP%d",info->line); 2724 sprintf(info->device_name,"ttySLP%d",info->line);
@@ -2722,8 +2733,8 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
2722 if (!mgslpc_device_list) 2733 if (!mgslpc_device_list)
2723 mgslpc_device_list = info; 2734 mgslpc_device_list = info;
2724 else { 2735 else {
2725 MGSLPC_INFO *current_dev = mgslpc_device_list; 2736 current_dev = mgslpc_device_list;
2726 while( current_dev->next_device ) 2737 while (current_dev->next_device)
2727 current_dev = current_dev->next_device; 2738 current_dev = current_dev->next_device;
2728 current_dev->next_device = info; 2739 current_dev->next_device = info;
2729 } 2740 }
@@ -2733,14 +2744,34 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
2733 else if (info->max_frame_size > 65535) 2744 else if (info->max_frame_size > 65535)
2734 info->max_frame_size = 65535; 2745 info->max_frame_size = 65535;
2735 2746
2736 printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n", 2747 printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n",
2737 info->device_name, info->io_base, info->irq_level); 2748 info->device_name, info->io_base, info->irq_level);
2738 2749
2739#if SYNCLINK_GENERIC_HDLC 2750#if SYNCLINK_GENERIC_HDLC
2740 hdlcdev_init(info); 2751 ret = hdlcdev_init(info);
2752 if (ret != 0)
2753 goto failed;
2741#endif 2754#endif
2742 tty_port_register_device(&info->port, serial_driver, info->line, 2755
2756 tty_dev = tty_port_register_device(&info->port, serial_driver, info->line,
2743 &info->p_dev->dev); 2757 &info->p_dev->dev);
2758 if (IS_ERR(tty_dev)) {
2759 ret = PTR_ERR(tty_dev);
2760#if SYNCLINK_GENERIC_HDLC
2761 hdlcdev_exit(info);
2762#endif
2763 goto failed;
2764 }
2765
2766 return 0;
2767
2768failed:
2769 if (current_dev)
2770 current_dev->next_device = NULL;
2771 else
2772 mgslpc_device_list = NULL;
2773 mgslpc_device_count--;
2774 return ret;
2744} 2775}
2745 2776
2746static void mgslpc_remove_device(MGSLPC_INFO *remove_info) 2777static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
@@ -3262,7 +3293,7 @@ static void rx_stop(MGSLPC_INFO *info)
3262{ 3293{
3263 if (debug_level >= DEBUG_LEVEL_ISR) 3294 if (debug_level >= DEBUG_LEVEL_ISR)
3264 printk("%s(%d):rx_stop(%s)\n", 3295 printk("%s(%d):rx_stop(%s)\n",
3265 __FILE__,__LINE__, info->device_name ); 3296 __FILE__, __LINE__, info->device_name);
3266 3297
3267 /* MODE:03 RAC Receiver Active, 0=inactive */ 3298 /* MODE:03 RAC Receiver Active, 0=inactive */
3268 clear_reg_bits(info, CHA + MODE, BIT3); 3299 clear_reg_bits(info, CHA + MODE, BIT3);
@@ -3275,7 +3306,7 @@ static void rx_start(MGSLPC_INFO *info)
3275{ 3306{
3276 if (debug_level >= DEBUG_LEVEL_ISR) 3307 if (debug_level >= DEBUG_LEVEL_ISR)
3277 printk("%s(%d):rx_start(%s)\n", 3308 printk("%s(%d):rx_start(%s)\n",
3278 __FILE__,__LINE__, info->device_name ); 3309 __FILE__, __LINE__, info->device_name);
3279 3310
3280 rx_reset_buffers(info); 3311 rx_reset_buffers(info);
3281 info->rx_enabled = false; 3312 info->rx_enabled = false;
@@ -3291,7 +3322,7 @@ static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty)
3291{ 3322{
3292 if (debug_level >= DEBUG_LEVEL_ISR) 3323 if (debug_level >= DEBUG_LEVEL_ISR)
3293 printk("%s(%d):tx_start(%s)\n", 3324 printk("%s(%d):tx_start(%s)\n",
3294 __FILE__,__LINE__, info->device_name ); 3325 __FILE__, __LINE__, info->device_name);
3295 3326
3296 if (info->tx_count) { 3327 if (info->tx_count) {
3297 /* If auto RTS enabled and RTS is inactive, then assert */ 3328 /* If auto RTS enabled and RTS is inactive, then assert */
@@ -3329,7 +3360,7 @@ static void tx_stop(MGSLPC_INFO *info)
3329{ 3360{
3330 if (debug_level >= DEBUG_LEVEL_ISR) 3361 if (debug_level >= DEBUG_LEVEL_ISR)
3331 printk("%s(%d):tx_stop(%s)\n", 3362 printk("%s(%d):tx_stop(%s)\n",
3332 __FILE__,__LINE__, info->device_name ); 3363 __FILE__, __LINE__, info->device_name);
3333 3364
3334 del_timer(&info->tx_timer); 3365 del_timer(&info->tx_timer);
3335 3366
@@ -3681,7 +3712,7 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
3681 3712
3682 if (debug_level >= DEBUG_LEVEL_BH) 3713 if (debug_level >= DEBUG_LEVEL_BH)
3683 printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n", 3714 printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n",
3684 __FILE__,__LINE__,info->device_name,status,framesize); 3715 __FILE__, __LINE__, info->device_name, status, framesize);
3685 3716
3686 if (debug_level >= DEBUG_LEVEL_DATA) 3717 if (debug_level >= DEBUG_LEVEL_DATA)
3687 trace_block(info, buf->data, framesize, 0); 3718 trace_block(info, buf->data, framesize, 0);
@@ -3709,13 +3740,13 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
3709 } 3740 }
3710 } 3741 }
3711 3742
3712 spin_lock_irqsave(&info->lock,flags); 3743 spin_lock_irqsave(&info->lock, flags);
3713 buf->status = buf->count = 0; 3744 buf->status = buf->count = 0;
3714 info->rx_frame_count--; 3745 info->rx_frame_count--;
3715 info->rx_get++; 3746 info->rx_get++;
3716 if (info->rx_get >= info->rx_buf_count) 3747 if (info->rx_get >= info->rx_buf_count)
3717 info->rx_get = 0; 3748 info->rx_get = 0;
3718 spin_unlock_irqrestore(&info->lock,flags); 3749 spin_unlock_irqrestore(&info->lock, flags);
3719 3750
3720 return true; 3751 return true;
3721} 3752}
@@ -3729,7 +3760,7 @@ static bool register_test(MGSLPC_INFO *info)
3729 bool rc = true; 3760 bool rc = true;
3730 unsigned long flags; 3761 unsigned long flags;
3731 3762
3732 spin_lock_irqsave(&info->lock,flags); 3763 spin_lock_irqsave(&info->lock, flags);
3733 reset_device(info); 3764 reset_device(info);
3734 3765
3735 for (i = 0; i < count; i++) { 3766 for (i = 0; i < count; i++) {
@@ -3742,7 +3773,7 @@ static bool register_test(MGSLPC_INFO *info)
3742 } 3773 }
3743 } 3774 }
3744 3775
3745 spin_unlock_irqrestore(&info->lock,flags); 3776 spin_unlock_irqrestore(&info->lock, flags);
3746 return rc; 3777 return rc;
3747} 3778}
3748 3779
@@ -3751,7 +3782,7 @@ static bool irq_test(MGSLPC_INFO *info)
3751 unsigned long end_time; 3782 unsigned long end_time;
3752 unsigned long flags; 3783 unsigned long flags;
3753 3784
3754 spin_lock_irqsave(&info->lock,flags); 3785 spin_lock_irqsave(&info->lock, flags);
3755 reset_device(info); 3786 reset_device(info);
3756 3787
3757 info->testing_irq = true; 3788 info->testing_irq = true;
@@ -3765,7 +3796,7 @@ static bool irq_test(MGSLPC_INFO *info)
3765 write_reg(info, CHA + TIMR, 0); /* 512 cycles */ 3796 write_reg(info, CHA + TIMR, 0); /* 512 cycles */
3766 issue_command(info, CHA, CMD_START_TIMER); 3797 issue_command(info, CHA, CMD_START_TIMER);
3767 3798
3768 spin_unlock_irqrestore(&info->lock,flags); 3799 spin_unlock_irqrestore(&info->lock, flags);
3769 3800
3770 end_time=100; 3801 end_time=100;
3771 while(end_time-- && !info->irq_occurred) { 3802 while(end_time-- && !info->irq_occurred) {
@@ -3774,9 +3805,9 @@ static bool irq_test(MGSLPC_INFO *info)
3774 3805
3775 info->testing_irq = false; 3806 info->testing_irq = false;
3776 3807
3777 spin_lock_irqsave(&info->lock,flags); 3808 spin_lock_irqsave(&info->lock, flags);
3778 reset_device(info); 3809 reset_device(info);
3779 spin_unlock_irqrestore(&info->lock,flags); 3810 spin_unlock_irqrestore(&info->lock, flags);
3780 3811
3781 return info->irq_occurred; 3812 return info->irq_occurred;
3782} 3813}
@@ -3785,21 +3816,21 @@ static int adapter_test(MGSLPC_INFO *info)
3785{ 3816{
3786 if (!register_test(info)) { 3817 if (!register_test(info)) {
3787 info->init_error = DiagStatus_AddressFailure; 3818 info->init_error = DiagStatus_AddressFailure;
3788 printk( "%s(%d):Register test failure for device %s Addr=%04X\n", 3819 printk("%s(%d):Register test failure for device %s Addr=%04X\n",
3789 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); 3820 __FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base));
3790 return -ENODEV; 3821 return -ENODEV;
3791 } 3822 }
3792 3823
3793 if (!irq_test(info)) { 3824 if (!irq_test(info)) {
3794 info->init_error = DiagStatus_IrqFailure; 3825 info->init_error = DiagStatus_IrqFailure;
3795 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", 3826 printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n",
3796 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); 3827 __FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level));
3797 return -ENODEV; 3828 return -ENODEV;
3798 } 3829 }
3799 3830
3800 if (debug_level >= DEBUG_LEVEL_INFO) 3831 if (debug_level >= DEBUG_LEVEL_INFO)
3801 printk("%s(%d):device %s passed diagnostics\n", 3832 printk("%s(%d):device %s passed diagnostics\n",
3802 __FILE__,__LINE__,info->device_name); 3833 __FILE__, __LINE__, info->device_name);
3803 return 0; 3834 return 0;
3804} 3835}
3805 3836
@@ -3808,9 +3839,9 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
3808 int i; 3839 int i;
3809 int linecount; 3840 int linecount;
3810 if (xmit) 3841 if (xmit)
3811 printk("%s tx data:\n",info->device_name); 3842 printk("%s tx data:\n", info->device_name);
3812 else 3843 else
3813 printk("%s rx data:\n",info->device_name); 3844 printk("%s rx data:\n", info->device_name);
3814 3845
3815 while(count) { 3846 while(count) {
3816 if (count > 16) 3847 if (count > 16)
@@ -3819,12 +3850,12 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
3819 linecount = count; 3850 linecount = count;
3820 3851
3821 for(i=0;i<linecount;i++) 3852 for(i=0;i<linecount;i++)
3822 printk("%02X ",(unsigned char)data[i]); 3853 printk("%02X ", (unsigned char)data[i]);
3823 for(;i<17;i++) 3854 for(;i<17;i++)
3824 printk(" "); 3855 printk(" ");
3825 for(i=0;i<linecount;i++) { 3856 for(i=0;i<linecount;i++) {
3826 if (data[i]>=040 && data[i]<=0176) 3857 if (data[i]>=040 && data[i]<=0176)
3827 printk("%c",data[i]); 3858 printk("%c", data[i]);
3828 else 3859 else
3829 printk("."); 3860 printk(".");
3830 } 3861 }
@@ -3843,18 +3874,18 @@ static void tx_timeout(unsigned long context)
3843 MGSLPC_INFO *info = (MGSLPC_INFO*)context; 3874 MGSLPC_INFO *info = (MGSLPC_INFO*)context;
3844 unsigned long flags; 3875 unsigned long flags;
3845 3876
3846 if ( debug_level >= DEBUG_LEVEL_INFO ) 3877 if (debug_level >= DEBUG_LEVEL_INFO)
3847 printk( "%s(%d):tx_timeout(%s)\n", 3878 printk("%s(%d):tx_timeout(%s)\n",
3848 __FILE__,__LINE__,info->device_name); 3879 __FILE__, __LINE__, info->device_name);
3849 if(info->tx_active && 3880 if (info->tx_active &&
3850 info->params.mode == MGSL_MODE_HDLC) { 3881 info->params.mode == MGSL_MODE_HDLC) {
3851 info->icount.txtimeout++; 3882 info->icount.txtimeout++;
3852 } 3883 }
3853 spin_lock_irqsave(&info->lock,flags); 3884 spin_lock_irqsave(&info->lock, flags);
3854 info->tx_active = false; 3885 info->tx_active = false;
3855 info->tx_count = info->tx_put = info->tx_get = 0; 3886 info->tx_count = info->tx_put = info->tx_get = 0;
3856 3887
3857 spin_unlock_irqrestore(&info->lock,flags); 3888 spin_unlock_irqrestore(&info->lock, flags);
3858 3889
3859#if SYNCLINK_GENERIC_HDLC 3890#if SYNCLINK_GENERIC_HDLC
3860 if (info->netcount) 3891 if (info->netcount)
@@ -3936,7 +3967,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
3936 unsigned long flags; 3967 unsigned long flags;
3937 3968
3938 if (debug_level >= DEBUG_LEVEL_INFO) 3969 if (debug_level >= DEBUG_LEVEL_INFO)
3939 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); 3970 printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name);
3940 3971
3941 /* stop sending until this frame completes */ 3972 /* stop sending until this frame completes */
3942 netif_stop_queue(dev); 3973 netif_stop_queue(dev);
@@ -3957,13 +3988,13 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
3957 dev->trans_start = jiffies; 3988 dev->trans_start = jiffies;
3958 3989
3959 /* start hardware transmitter if necessary */ 3990 /* start hardware transmitter if necessary */
3960 spin_lock_irqsave(&info->lock,flags); 3991 spin_lock_irqsave(&info->lock, flags);
3961 if (!info->tx_active) { 3992 if (!info->tx_active) {
3962 struct tty_struct *tty = tty_port_tty_get(&info->port); 3993 struct tty_struct *tty = tty_port_tty_get(&info->port);
3963 tx_start(info, tty); 3994 tx_start(info, tty);
3964 tty_kref_put(tty); 3995 tty_kref_put(tty);
3965 } 3996 }
3966 spin_unlock_irqrestore(&info->lock,flags); 3997 spin_unlock_irqrestore(&info->lock, flags);
3967 3998
3968 return NETDEV_TX_OK; 3999 return NETDEV_TX_OK;
3969} 4000}
@@ -3984,10 +4015,11 @@ static int hdlcdev_open(struct net_device *dev)
3984 unsigned long flags; 4015 unsigned long flags;
3985 4016
3986 if (debug_level >= DEBUG_LEVEL_INFO) 4017 if (debug_level >= DEBUG_LEVEL_INFO)
3987 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); 4018 printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name);
3988 4019
3989 /* generic HDLC layer open processing */ 4020 /* generic HDLC layer open processing */
3990 if ((rc = hdlc_open(dev))) 4021 rc = hdlc_open(dev);
4022 if (rc != 0)
3991 return rc; 4023 return rc;
3992 4024
3993 /* arbitrate between network and tty opens */ 4025 /* arbitrate between network and tty opens */
@@ -4002,7 +4034,8 @@ static int hdlcdev_open(struct net_device *dev)
4002 4034
4003 tty = tty_port_tty_get(&info->port); 4035 tty = tty_port_tty_get(&info->port);
4004 /* claim resources and init adapter */ 4036 /* claim resources and init adapter */
4005 if ((rc = startup(info, tty)) != 0) { 4037 rc = startup(info, tty);
4038 if (rc != 0) {
4006 tty_kref_put(tty); 4039 tty_kref_put(tty);
4007 spin_lock_irqsave(&info->netlock, flags); 4040 spin_lock_irqsave(&info->netlock, flags);
4008 info->netcount=0; 4041 info->netcount=0;
@@ -4044,7 +4077,7 @@ static int hdlcdev_close(struct net_device *dev)
4044 unsigned long flags; 4077 unsigned long flags;
4045 4078
4046 if (debug_level >= DEBUG_LEVEL_INFO) 4079 if (debug_level >= DEBUG_LEVEL_INFO)
4047 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); 4080 printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name);
4048 4081
4049 netif_stop_queue(dev); 4082 netif_stop_queue(dev);
4050 4083
@@ -4078,7 +4111,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4078 unsigned int flags; 4111 unsigned int flags;
4079 4112
4080 if (debug_level >= DEBUG_LEVEL_INFO) 4113 if (debug_level >= DEBUG_LEVEL_INFO)
4081 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); 4114 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
4082 4115
4083 /* return error if TTY interface open */ 4116 /* return error if TTY interface open */
4084 if (info->port.count) 4117 if (info->port.count)
@@ -4179,14 +4212,14 @@ static void hdlcdev_tx_timeout(struct net_device *dev)
4179 unsigned long flags; 4212 unsigned long flags;
4180 4213
4181 if (debug_level >= DEBUG_LEVEL_INFO) 4214 if (debug_level >= DEBUG_LEVEL_INFO)
4182 printk("hdlcdev_tx_timeout(%s)\n",dev->name); 4215 printk("hdlcdev_tx_timeout(%s)\n", dev->name);
4183 4216
4184 dev->stats.tx_errors++; 4217 dev->stats.tx_errors++;
4185 dev->stats.tx_aborted_errors++; 4218 dev->stats.tx_aborted_errors++;
4186 4219
4187 spin_lock_irqsave(&info->lock,flags); 4220 spin_lock_irqsave(&info->lock, flags);
4188 tx_stop(info); 4221 tx_stop(info);
4189 spin_unlock_irqrestore(&info->lock,flags); 4222 spin_unlock_irqrestore(&info->lock, flags);
4190 4223
4191 netif_wake_queue(dev); 4224 netif_wake_queue(dev);
4192} 4225}
@@ -4217,7 +4250,7 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
4217 struct net_device *dev = info->netdev; 4250 struct net_device *dev = info->netdev;
4218 4251
4219 if (debug_level >= DEBUG_LEVEL_INFO) 4252 if (debug_level >= DEBUG_LEVEL_INFO)
4220 printk("hdlcdev_rx(%s)\n",dev->name); 4253 printk("hdlcdev_rx(%s)\n", dev->name);
4221 4254
4222 if (skb == NULL) { 4255 if (skb == NULL) {
4223 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); 4256 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
@@ -4260,8 +4293,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
4260 4293
4261 /* allocate and initialize network and HDLC layer objects */ 4294 /* allocate and initialize network and HDLC layer objects */
4262 4295
4263 if (!(dev = alloc_hdlcdev(info))) { 4296 dev = alloc_hdlcdev(info);
4264 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); 4297 if (dev == NULL) {
4298 printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__);
4265 return -ENOMEM; 4299 return -ENOMEM;
4266 } 4300 }
4267 4301
@@ -4280,8 +4314,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
4280 hdlc->xmit = hdlcdev_xmit; 4314 hdlc->xmit = hdlcdev_xmit;
4281 4315
4282 /* register objects with HDLC layer */ 4316 /* register objects with HDLC layer */
4283 if ((rc = register_hdlc_device(dev))) { 4317 rc = register_hdlc_device(dev);
4284 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); 4318 if (rc) {
4319 printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__);
4285 free_netdev(dev); 4320 free_netdev(dev);
4286 return rc; 4321 return rc;
4287 } 4322 }
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 07122a9ef36e..5168a1324a65 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -29,7 +29,7 @@ config EXTCON_ADC_JACK
29 29
30config EXTCON_MAX77693 30config EXTCON_MAX77693
31 tristate "MAX77693 EXTCON Support" 31 tristate "MAX77693 EXTCON Support"
32 depends on MFD_MAX77693 32 depends on MFD_MAX77693 && INPUT
33 select IRQ_DOMAIN 33 select IRQ_DOMAIN
34 select REGMAP_I2C 34 select REGMAP_I2C
35 help 35 help
@@ -47,7 +47,7 @@ config EXTCON_MAX8997
47 47
48config EXTCON_ARIZONA 48config EXTCON_ARIZONA
49 tristate "Wolfson Arizona EXTCON support" 49 tristate "Wolfson Arizona EXTCON support"
50 depends on MFD_ARIZONA && INPUT 50 depends on MFD_ARIZONA && INPUT && SND_SOC
51 help 51 help
52 Say Y here to enable support for external accessory detection 52 Say Y here to enable support for external accessory detection
53 with Wolfson Arizona devices. These are audio CODECs with 53 with Wolfson Arizona devices. These are audio CODECs with
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 414aed50b1bc..dc357a4051f6 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -27,12 +27,18 @@
27#include <linux/regulator/consumer.h> 27#include <linux/regulator/consumer.h>
28#include <linux/extcon.h> 28#include <linux/extcon.h>
29 29
30#include <sound/soc.h>
31
30#include <linux/mfd/arizona/core.h> 32#include <linux/mfd/arizona/core.h>
31#include <linux/mfd/arizona/pdata.h> 33#include <linux/mfd/arizona/pdata.h>
32#include <linux/mfd/arizona/registers.h> 34#include <linux/mfd/arizona/registers.h>
33 35
34#define ARIZONA_NUM_BUTTONS 6 36#define ARIZONA_NUM_BUTTONS 6
35 37
38#define ARIZONA_ACCDET_MODE_MIC 0
39#define ARIZONA_ACCDET_MODE_HPL 1
40#define ARIZONA_ACCDET_MODE_HPR 2
41
36struct arizona_extcon_info { 42struct arizona_extcon_info {
37 struct device *dev; 43 struct device *dev;
38 struct arizona *arizona; 44 struct arizona *arizona;
@@ -45,17 +51,28 @@ struct arizona_extcon_info {
45 int micd_num_modes; 51 int micd_num_modes;
46 52
47 bool micd_reva; 53 bool micd_reva;
54 bool micd_clamp;
55
56 struct delayed_work hpdet_work;
57
58 bool hpdet_active;
59 bool hpdet_done;
60
61 int num_hpdet_res;
62 unsigned int hpdet_res[3];
48 63
49 bool mic; 64 bool mic;
50 bool detecting; 65 bool detecting;
51 int jack_flips; 66 int jack_flips;
52 67
68 int hpdet_ip;
69
53 struct extcon_dev edev; 70 struct extcon_dev edev;
54}; 71};
55 72
56static const struct arizona_micd_config micd_default_modes[] = { 73static const struct arizona_micd_config micd_default_modes[] = {
57 { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
58 { 0, 2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 }, 74 { 0, 2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 },
75 { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
59}; 76};
60 77
61static struct { 78static struct {
@@ -73,11 +90,13 @@ static struct {
73#define ARIZONA_CABLE_MECHANICAL 0 90#define ARIZONA_CABLE_MECHANICAL 0
74#define ARIZONA_CABLE_MICROPHONE 1 91#define ARIZONA_CABLE_MICROPHONE 1
75#define ARIZONA_CABLE_HEADPHONE 2 92#define ARIZONA_CABLE_HEADPHONE 2
93#define ARIZONA_CABLE_LINEOUT 3
76 94
77static const char *arizona_cable[] = { 95static const char *arizona_cable[] = {
78 "Mechanical", 96 "Mechanical",
79 "Microphone", 97 "Microphone",
80 "Headphone", 98 "Headphone",
99 "Line-out",
81 NULL, 100 NULL,
82}; 101};
83 102
@@ -85,8 +104,9 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
85{ 104{
86 struct arizona *arizona = info->arizona; 105 struct arizona *arizona = info->arizona;
87 106
88 gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio, 107 if (arizona->pdata.micd_pol_gpio > 0)
89 info->micd_modes[mode].gpio); 108 gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
109 info->micd_modes[mode].gpio);
90 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1, 110 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
91 ARIZONA_MICD_BIAS_SRC_MASK, 111 ARIZONA_MICD_BIAS_SRC_MASK,
92 info->micd_modes[mode].bias); 112 info->micd_modes[mode].bias);
@@ -98,19 +118,70 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
98 dev_dbg(arizona->dev, "Set jack polarity to %d\n", mode); 118 dev_dbg(arizona->dev, "Set jack polarity to %d\n", mode);
99} 119}
100 120
121static const char *arizona_extcon_get_micbias(struct arizona_extcon_info *info)
122{
123 switch (info->micd_modes[0].bias >> ARIZONA_MICD_BIAS_SRC_SHIFT) {
124 case 1:
125 return "MICBIAS1";
126 case 2:
127 return "MICBIAS2";
128 case 3:
129 return "MICBIAS3";
130 default:
131 return "MICVDD";
132 }
133}
134
135static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
136{
137 struct arizona *arizona = info->arizona;
138 const char *widget = arizona_extcon_get_micbias(info);
139 struct snd_soc_dapm_context *dapm = arizona->dapm;
140 int ret;
141
142 mutex_lock(&dapm->card->dapm_mutex);
143
144 ret = snd_soc_dapm_force_enable_pin(dapm, widget);
145 if (ret != 0)
146 dev_warn(arizona->dev, "Failed to enable %s: %d\n",
147 widget, ret);
148
149 mutex_unlock(&dapm->card->dapm_mutex);
150
151 snd_soc_dapm_sync(dapm);
152
153 if (!arizona->pdata.micd_force_micbias) {
154 mutex_lock(&dapm->card->dapm_mutex);
155
156 ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
157 if (ret != 0)
158 dev_warn(arizona->dev, "Failed to disable %s: %d\n",
159 widget, ret);
160
161 mutex_unlock(&dapm->card->dapm_mutex);
162
163 snd_soc_dapm_sync(dapm);
164 }
165}
166
101static void arizona_start_mic(struct arizona_extcon_info *info) 167static void arizona_start_mic(struct arizona_extcon_info *info)
102{ 168{
103 struct arizona *arizona = info->arizona; 169 struct arizona *arizona = info->arizona;
104 bool change; 170 bool change;
105 int ret; 171 int ret;
106 172
107 info->detecting = true;
108 info->mic = false;
109 info->jack_flips = 0;
110
111 /* Microphone detection can't use idle mode */ 173 /* Microphone detection can't use idle mode */
112 pm_runtime_get(info->dev); 174 pm_runtime_get(info->dev);
113 175
176 if (info->detecting) {
177 ret = regulator_allow_bypass(info->micvdd, false);
178 if (ret != 0) {
179 dev_err(arizona->dev,
180 "Failed to regulate MICVDD: %d\n",
181 ret);
182 }
183 }
184
114 ret = regulator_enable(info->micvdd); 185 ret = regulator_enable(info->micvdd);
115 if (ret != 0) { 186 if (ret != 0) {
116 dev_err(arizona->dev, "Failed to enable MICVDD: %d\n", 187 dev_err(arizona->dev, "Failed to enable MICVDD: %d\n",
@@ -123,6 +194,12 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
123 regmap_write(arizona->regmap, 0x80, 0x0); 194 regmap_write(arizona->regmap, 0x80, 0x0);
124 } 195 }
125 196
197 regmap_update_bits(arizona->regmap,
198 ARIZONA_ACCESSORY_DETECT_MODE_1,
199 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
200
201 arizona_extcon_pulse_micbias(info);
202
126 regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1, 203 regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
127 ARIZONA_MICD_ENA, ARIZONA_MICD_ENA, 204 ARIZONA_MICD_ENA, ARIZONA_MICD_ENA,
128 &change); 205 &change);
@@ -135,18 +212,39 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
135static void arizona_stop_mic(struct arizona_extcon_info *info) 212static void arizona_stop_mic(struct arizona_extcon_info *info)
136{ 213{
137 struct arizona *arizona = info->arizona; 214 struct arizona *arizona = info->arizona;
215 const char *widget = arizona_extcon_get_micbias(info);
216 struct snd_soc_dapm_context *dapm = arizona->dapm;
138 bool change; 217 bool change;
218 int ret;
139 219
140 regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1, 220 regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
141 ARIZONA_MICD_ENA, 0, 221 ARIZONA_MICD_ENA, 0,
142 &change); 222 &change);
143 223
224 mutex_lock(&dapm->card->dapm_mutex);
225
226 ret = snd_soc_dapm_disable_pin(dapm, widget);
227 if (ret != 0)
228 dev_warn(arizona->dev,
229 "Failed to disable %s: %d\n",
230 widget, ret);
231
232 mutex_unlock(&dapm->card->dapm_mutex);
233
234 snd_soc_dapm_sync(dapm);
235
144 if (info->micd_reva) { 236 if (info->micd_reva) {
145 regmap_write(arizona->regmap, 0x80, 0x3); 237 regmap_write(arizona->regmap, 0x80, 0x3);
146 regmap_write(arizona->regmap, 0x294, 2); 238 regmap_write(arizona->regmap, 0x294, 2);
147 regmap_write(arizona->regmap, 0x80, 0x0); 239 regmap_write(arizona->regmap, 0x80, 0x0);
148 } 240 }
149 241
242 ret = regulator_allow_bypass(info->micvdd, true);
243 if (ret != 0) {
244 dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
245 ret);
246 }
247
150 if (change) { 248 if (change) {
151 regulator_disable(info->micvdd); 249 regulator_disable(info->micvdd);
152 pm_runtime_mark_last_busy(info->dev); 250 pm_runtime_mark_last_busy(info->dev);
@@ -154,6 +252,478 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
154 } 252 }
155} 253}
156 254
255static struct {
256 unsigned int factor_a;
257 unsigned int factor_b;
258} arizona_hpdet_b_ranges[] = {
259 { 5528, 362464 },
260 { 11084, 6186851 },
261 { 11065, 65460395 },
262};
263
264static struct {
265 int min;
266 int max;
267} arizona_hpdet_c_ranges[] = {
268 { 0, 30 },
269 { 8, 100 },
270 { 100, 1000 },
271 { 1000, 10000 },
272};
273
274static int arizona_hpdet_read(struct arizona_extcon_info *info)
275{
276 struct arizona *arizona = info->arizona;
277 unsigned int val, range;
278 int ret;
279
280 ret = regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_2, &val);
281 if (ret != 0) {
282 dev_err(arizona->dev, "Failed to read HPDET status: %d\n",
283 ret);
284 return ret;
285 }
286
287 switch (info->hpdet_ip) {
288 case 0:
289 if (!(val & ARIZONA_HP_DONE)) {
290 dev_err(arizona->dev, "HPDET did not complete: %x\n",
291 val);
292 return -EAGAIN;
293 }
294
295 val &= ARIZONA_HP_LVL_MASK;
296 break;
297
298 case 1:
299 if (!(val & ARIZONA_HP_DONE_B)) {
300 dev_err(arizona->dev, "HPDET did not complete: %x\n",
301 val);
302 return -EAGAIN;
303 }
304
305 ret = regmap_read(arizona->regmap, ARIZONA_HP_DACVAL, &val);
306 if (ret != 0) {
307 dev_err(arizona->dev, "Failed to read HP value: %d\n",
308 ret);
309 return -EAGAIN;
310 }
311
312 regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
313 &range);
314 range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
315 >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
316
317 if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 &&
318 (val < 100 || val > 0x3fb)) {
319 range++;
320 dev_dbg(arizona->dev, "Moving to HPDET range %d\n",
321 range);
322 regmap_update_bits(arizona->regmap,
323 ARIZONA_HEADPHONE_DETECT_1,
324 ARIZONA_HP_IMPEDANCE_RANGE_MASK,
325 range <<
326 ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
327 return -EAGAIN;
328 }
329
330 /* If we go out of range report top of range */
331 if (val < 100 || val > 0x3fb) {
332 dev_dbg(arizona->dev, "Measurement out of range\n");
333 return 10000;
334 }
335
336 dev_dbg(arizona->dev, "HPDET read %d in range %d\n",
337 val, range);
338
339 val = arizona_hpdet_b_ranges[range].factor_b
340 / ((val * 100) -
341 arizona_hpdet_b_ranges[range].factor_a);
342 break;
343
344 default:
345 dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n",
346 info->hpdet_ip);
347 case 2:
348 if (!(val & ARIZONA_HP_DONE_B)) {
349 dev_err(arizona->dev, "HPDET did not complete: %x\n",
350 val);
351 return -EAGAIN;
352 }
353
354 val &= ARIZONA_HP_LVL_B_MASK;
355
356 regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
357 &range);
358 range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
359 >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
360
361 /* Skip up or down a range? */
362 if (range && (val < arizona_hpdet_c_ranges[range].min)) {
363 range--;
364 dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
365 arizona_hpdet_c_ranges[range].min,
366 arizona_hpdet_c_ranges[range].max);
367 regmap_update_bits(arizona->regmap,
368 ARIZONA_HEADPHONE_DETECT_1,
369 ARIZONA_HP_IMPEDANCE_RANGE_MASK,
370 range <<
371 ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
372 return -EAGAIN;
373 }
374
375 if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 &&
376 (val >= arizona_hpdet_c_ranges[range].max)) {
377 range++;
378 dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
379 arizona_hpdet_c_ranges[range].min,
380 arizona_hpdet_c_ranges[range].max);
381 regmap_update_bits(arizona->regmap,
382 ARIZONA_HEADPHONE_DETECT_1,
383 ARIZONA_HP_IMPEDANCE_RANGE_MASK,
384 range <<
385 ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
386 return -EAGAIN;
387 }
388 }
389
390 dev_dbg(arizona->dev, "HP impedance %d ohms\n", val);
391 return val;
392}
393
394static int arizona_hpdet_do_id(struct arizona_extcon_info *info, int *reading)
395{
396 struct arizona *arizona = info->arizona;
397 int id_gpio = arizona->pdata.hpdet_id_gpio;
398
399 /*
400 * If we're using HPDET for accessory identification we need
401 * to take multiple measurements, step through them in sequence.
402 */
403 if (arizona->pdata.hpdet_acc_id) {
404 info->hpdet_res[info->num_hpdet_res++] = *reading;
405
406 /*
407 * If the impedence is too high don't measure the
408 * second ground.
409 */
410 if (info->num_hpdet_res == 1 && *reading >= 45) {
411 dev_dbg(arizona->dev, "Skipping ground flip\n");
412 info->hpdet_res[info->num_hpdet_res++] = *reading;
413 }
414
415 if (info->num_hpdet_res == 1) {
416 dev_dbg(arizona->dev, "Flipping ground\n");
417
418 regmap_update_bits(arizona->regmap,
419 ARIZONA_ACCESSORY_DETECT_MODE_1,
420 ARIZONA_ACCDET_SRC,
421 ~info->micd_modes[0].src);
422
423 regmap_update_bits(arizona->regmap,
424 ARIZONA_HEADPHONE_DETECT_1,
425 ARIZONA_HP_POLL, ARIZONA_HP_POLL);
426 return -EAGAIN;
427 }
428
429 /* Only check the mic directly if we didn't already ID it */
430 if (id_gpio && info->num_hpdet_res == 2 &&
431 !((info->hpdet_res[0] > info->hpdet_res[1] * 2))) {
432 dev_dbg(arizona->dev, "Measuring mic\n");
433
434 regmap_update_bits(arizona->regmap,
435 ARIZONA_ACCESSORY_DETECT_MODE_1,
436 ARIZONA_ACCDET_MODE_MASK |
437 ARIZONA_ACCDET_SRC,
438 ARIZONA_ACCDET_MODE_HPR |
439 info->micd_modes[0].src);
440
441 gpio_set_value_cansleep(id_gpio, 1);
442
443 regmap_update_bits(arizona->regmap,
444 ARIZONA_HEADPHONE_DETECT_1,
445 ARIZONA_HP_POLL, ARIZONA_HP_POLL);
446 return -EAGAIN;
447 }
448
449 /* OK, got both. Now, compare... */
450 dev_dbg(arizona->dev, "HPDET measured %d %d %d\n",
451 info->hpdet_res[0], info->hpdet_res[1],
452 info->hpdet_res[2]);
453
454
455 /* Take the headphone impedance for the main report */
456 *reading = info->hpdet_res[0];
457
458 /*
459 * Either the two grounds measure differently or we
460 * measure the mic as high impedance.
461 */
462 if ((info->hpdet_res[0] > info->hpdet_res[1] * 2) ||
463 (id_gpio && info->hpdet_res[2] > 10)) {
464 dev_dbg(arizona->dev, "Detected mic\n");
465 info->mic = true;
466 info->detecting = true;
467 } else {
468 dev_dbg(arizona->dev, "Detected headphone\n");
469 }
470
471 /* Make sure everything is reset back to the real polarity */
472 regmap_update_bits(arizona->regmap,
473 ARIZONA_ACCESSORY_DETECT_MODE_1,
474 ARIZONA_ACCDET_SRC,
475 info->micd_modes[0].src);
476 }
477
478 return 0;
479}
480
481static irqreturn_t arizona_hpdet_irq(int irq, void *data)
482{
483 struct arizona_extcon_info *info = data;
484 struct arizona *arizona = info->arizona;
485 int id_gpio = arizona->pdata.hpdet_id_gpio;
486 int report = ARIZONA_CABLE_HEADPHONE;
487 unsigned int val;
488 int ret, reading;
489
490 mutex_lock(&info->lock);
491
492 /* If we got a spurious IRQ for some reason then ignore it */
493 if (!info->hpdet_active) {
494 dev_warn(arizona->dev, "Spurious HPDET IRQ\n");
495 mutex_unlock(&info->lock);
496 return IRQ_NONE;
497 }
498
499 /* If the cable was removed while measuring ignore the result */
500 ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
501 if (ret < 0) {
502 dev_err(arizona->dev, "Failed to check cable state: %d\n",
503 ret);
504 goto out;
505 } else if (!ret) {
506 dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
507 goto done;
508 }
509
510 ret = arizona_hpdet_read(info);
511 if (ret == -EAGAIN) {
512 goto out;
513 } else if (ret < 0) {
514 goto done;
515 }
516 reading = ret;
517
518 /* Reset back to starting range */
519 regmap_update_bits(arizona->regmap,
520 ARIZONA_HEADPHONE_DETECT_1,
521 ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
522 0);
523
524 ret = arizona_hpdet_do_id(info, &reading);
525 if (ret == -EAGAIN) {
526 goto out;
527 } else if (ret < 0) {
528 goto done;
529 }
530
531 /* Report high impedence cables as line outputs */
532 if (reading >= 5000)
533 report = ARIZONA_CABLE_LINEOUT;
534 else
535 report = ARIZONA_CABLE_HEADPHONE;
536
537 ret = extcon_set_cable_state_(&info->edev, report, true);
538 if (ret != 0)
539 dev_err(arizona->dev, "Failed to report HP/line: %d\n",
540 ret);
541
542 mutex_lock(&arizona->dapm->card->dapm_mutex);
543
544 ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
545 if (ret != 0) {
546 dev_err(arizona->dev, "Failed to read output enables: %d\n",
547 ret);
548 val = 0;
549 }
550
551 if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
552 ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0);
553 if (ret != 0)
554 dev_warn(arizona->dev, "Failed to undo magic: %d\n",
555 ret);
556
557 ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0);
558 if (ret != 0)
559 dev_warn(arizona->dev, "Failed to undo magic: %d\n",
560 ret);
561 }
562
563 mutex_unlock(&arizona->dapm->card->dapm_mutex);
564
565done:
566 if (id_gpio)
567 gpio_set_value_cansleep(id_gpio, 0);
568
569 /* Revert back to MICDET mode */
570 regmap_update_bits(arizona->regmap,
571 ARIZONA_ACCESSORY_DETECT_MODE_1,
572 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
573
574 /* If we have a mic then reenable MICDET */
575 if (info->mic)
576 arizona_start_mic(info);
577
578 if (info->hpdet_active) {
579 pm_runtime_put_autosuspend(info->dev);
580 info->hpdet_active = false;
581 }
582
583 info->hpdet_done = true;
584
585out:
586 mutex_unlock(&info->lock);
587
588 return IRQ_HANDLED;
589}
590
591static void arizona_identify_headphone(struct arizona_extcon_info *info)
592{
593 struct arizona *arizona = info->arizona;
594 int ret;
595
596 if (info->hpdet_done)
597 return;
598
599 dev_dbg(arizona->dev, "Starting HPDET\n");
600
601 /* Make sure we keep the device enabled during the measurement */
602 pm_runtime_get(info->dev);
603
604 info->hpdet_active = true;
605
606 if (info->mic)
607 arizona_stop_mic(info);
608
609 ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0x4000);
610 if (ret != 0)
611 dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
612
613 ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0x4000);
614 if (ret != 0)
615 dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
616
617 ret = regmap_update_bits(arizona->regmap,
618 ARIZONA_ACCESSORY_DETECT_MODE_1,
619 ARIZONA_ACCDET_MODE_MASK,
620 ARIZONA_ACCDET_MODE_HPL);
621 if (ret != 0) {
622 dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
623 goto err;
624 }
625
626 ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
627 ARIZONA_HP_POLL, ARIZONA_HP_POLL);
628 if (ret != 0) {
629 dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
630 ret);
631 goto err;
632 }
633
634 return;
635
636err:
637 regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
638 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
639
640 /* Just report headphone */
641 ret = extcon_update_state(&info->edev,
642 1 << ARIZONA_CABLE_HEADPHONE,
643 1 << ARIZONA_CABLE_HEADPHONE);
644 if (ret != 0)
645 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
646
647 if (info->mic)
648 arizona_start_mic(info);
649
650 info->hpdet_active = false;
651}
652
653static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
654{
655 struct arizona *arizona = info->arizona;
656 unsigned int val;
657 int ret;
658
659 dev_dbg(arizona->dev, "Starting identification via HPDET\n");
660
661 /* Make sure we keep the device enabled during the measurement */
662 pm_runtime_get_sync(info->dev);
663
664 info->hpdet_active = true;
665
666 arizona_extcon_pulse_micbias(info);
667
668 mutex_lock(&arizona->dapm->card->dapm_mutex);
669
670 ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
671 if (ret != 0) {
672 dev_err(arizona->dev, "Failed to read output enables: %d\n",
673 ret);
674 val = 0;
675 }
676
677 if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
678 ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000,
679 0x4000);
680 if (ret != 0)
681 dev_warn(arizona->dev, "Failed to do magic: %d\n",
682 ret);
683
684 ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000,
685 0x4000);
686 if (ret != 0)
687 dev_warn(arizona->dev, "Failed to do magic: %d\n",
688 ret);
689 }
690
691 mutex_unlock(&arizona->dapm->card->dapm_mutex);
692
693 ret = regmap_update_bits(arizona->regmap,
694 ARIZONA_ACCESSORY_DETECT_MODE_1,
695 ARIZONA_ACCDET_SRC | ARIZONA_ACCDET_MODE_MASK,
696 info->micd_modes[0].src |
697 ARIZONA_ACCDET_MODE_HPL);
698 if (ret != 0) {
699 dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
700 goto err;
701 }
702
703 ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
704 ARIZONA_HP_POLL, ARIZONA_HP_POLL);
705 if (ret != 0) {
706 dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
707 ret);
708 goto err;
709 }
710
711 return;
712
713err:
714 regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
715 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
716
717 /* Just report headphone */
718 ret = extcon_update_state(&info->edev,
719 1 << ARIZONA_CABLE_HEADPHONE,
720 1 << ARIZONA_CABLE_HEADPHONE);
721 if (ret != 0)
722 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
723
724 info->hpdet_active = false;
725}
726
157static irqreturn_t arizona_micdet(int irq, void *data) 727static irqreturn_t arizona_micdet(int irq, void *data)
158{ 728{
159 struct arizona_extcon_info *info = data; 729 struct arizona_extcon_info *info = data;
@@ -187,16 +757,23 @@ static irqreturn_t arizona_micdet(int irq, void *data)
187 757
188 /* If we got a high impedence we should have a headset, report it. */ 758 /* If we got a high impedence we should have a headset, report it. */
189 if (info->detecting && (val & 0x400)) { 759 if (info->detecting && (val & 0x400)) {
760 arizona_identify_headphone(info);
761
190 ret = extcon_update_state(&info->edev, 762 ret = extcon_update_state(&info->edev,
191 1 << ARIZONA_CABLE_MICROPHONE | 763 1 << ARIZONA_CABLE_MICROPHONE,
192 1 << ARIZONA_CABLE_HEADPHONE, 764 1 << ARIZONA_CABLE_MICROPHONE);
193 1 << ARIZONA_CABLE_MICROPHONE |
194 1 << ARIZONA_CABLE_HEADPHONE);
195 765
196 if (ret != 0) 766 if (ret != 0)
197 dev_err(arizona->dev, "Headset report failed: %d\n", 767 dev_err(arizona->dev, "Headset report failed: %d\n",
198 ret); 768 ret);
199 769
770 /* Don't need to regulate for button detection */
771 ret = regulator_allow_bypass(info->micvdd, false);
772 if (ret != 0) {
773 dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
774 ret);
775 }
776
200 info->mic = true; 777 info->mic = true;
201 info->detecting = false; 778 info->detecting = false;
202 goto handled; 779 goto handled;
@@ -209,20 +786,13 @@ static irqreturn_t arizona_micdet(int irq, void *data)
209 * impedence then give up and report headphones. 786 * impedence then give up and report headphones.
210 */ 787 */
211 if (info->detecting && (val & 0x3f8)) { 788 if (info->detecting && (val & 0x3f8)) {
212 info->jack_flips++;
213
214 if (info->jack_flips >= info->micd_num_modes) { 789 if (info->jack_flips >= info->micd_num_modes) {
215 dev_dbg(arizona->dev, "Detected headphone\n"); 790 dev_dbg(arizona->dev, "Detected HP/line\n");
791 arizona_identify_headphone(info);
792
216 info->detecting = false; 793 info->detecting = false;
217 arizona_stop_mic(info);
218 794
219 ret = extcon_set_cable_state_(&info->edev, 795 arizona_stop_mic(info);
220 ARIZONA_CABLE_HEADPHONE,
221 true);
222 if (ret != 0)
223 dev_err(arizona->dev,
224 "Headphone report failed: %d\n",
225 ret);
226 } else { 796 } else {
227 info->micd_mode++; 797 info->micd_mode++;
228 if (info->micd_mode == info->micd_num_modes) 798 if (info->micd_mode == info->micd_num_modes)
@@ -258,13 +828,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
258 info->detecting = false; 828 info->detecting = false;
259 arizona_stop_mic(info); 829 arizona_stop_mic(info);
260 830
261 ret = extcon_set_cable_state_(&info->edev, 831 arizona_identify_headphone(info);
262 ARIZONA_CABLE_HEADPHONE,
263 true);
264 if (ret != 0)
265 dev_err(arizona->dev,
266 "Headphone report failed: %d\n",
267 ret);
268 } else { 832 } else {
269 dev_warn(arizona->dev, "Button with no mic: %x\n", 833 dev_warn(arizona->dev, "Button with no mic: %x\n",
270 val); 834 val);
@@ -275,6 +839,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
275 input_report_key(info->input, 839 input_report_key(info->input,
276 arizona_lvl_to_key[i].report, 0); 840 arizona_lvl_to_key[i].report, 0);
277 input_sync(info->input); 841 input_sync(info->input);
842 arizona_extcon_pulse_micbias(info);
278 } 843 }
279 844
280handled: 845handled:
@@ -284,17 +849,38 @@ handled:
284 return IRQ_HANDLED; 849 return IRQ_HANDLED;
285} 850}
286 851
852static void arizona_hpdet_work(struct work_struct *work)
853{
854 struct arizona_extcon_info *info = container_of(work,
855 struct arizona_extcon_info,
856 hpdet_work.work);
857
858 mutex_lock(&info->lock);
859 arizona_start_hpdet_acc_id(info);
860 mutex_unlock(&info->lock);
861}
862
287static irqreturn_t arizona_jackdet(int irq, void *data) 863static irqreturn_t arizona_jackdet(int irq, void *data)
288{ 864{
289 struct arizona_extcon_info *info = data; 865 struct arizona_extcon_info *info = data;
290 struct arizona *arizona = info->arizona; 866 struct arizona *arizona = info->arizona;
291 unsigned int val; 867 unsigned int val, present, mask;
292 int ret, i; 868 int ret, i;
293 869
294 pm_runtime_get_sync(info->dev); 870 pm_runtime_get_sync(info->dev);
295 871
872 cancel_delayed_work_sync(&info->hpdet_work);
873
296 mutex_lock(&info->lock); 874 mutex_lock(&info->lock);
297 875
876 if (arizona->pdata.jd_gpio5) {
877 mask = ARIZONA_MICD_CLAMP_STS;
878 present = 0;
879 } else {
880 mask = ARIZONA_JD1_STS;
881 present = ARIZONA_JD1_STS;
882 }
883
298 ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val); 884 ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val);
299 if (ret != 0) { 885 if (ret != 0) {
300 dev_err(arizona->dev, "Failed to read jackdet status: %d\n", 886 dev_err(arizona->dev, "Failed to read jackdet status: %d\n",
@@ -304,7 +890,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
304 return IRQ_NONE; 890 return IRQ_NONE;
305 } 891 }
306 892
307 if (val & ARIZONA_JD1_STS) { 893 if ((val & mask) == present) {
308 dev_dbg(arizona->dev, "Detected jack\n"); 894 dev_dbg(arizona->dev, "Detected jack\n");
309 ret = extcon_set_cable_state_(&info->edev, 895 ret = extcon_set_cable_state_(&info->edev,
310 ARIZONA_CABLE_MECHANICAL, true); 896 ARIZONA_CABLE_MECHANICAL, true);
@@ -313,12 +899,31 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
313 dev_err(arizona->dev, "Mechanical report failed: %d\n", 899 dev_err(arizona->dev, "Mechanical report failed: %d\n",
314 ret); 900 ret);
315 901
316 arizona_start_mic(info); 902 if (!arizona->pdata.hpdet_acc_id) {
903 info->detecting = true;
904 info->mic = false;
905 info->jack_flips = 0;
906
907 arizona_start_mic(info);
908 } else {
909 schedule_delayed_work(&info->hpdet_work,
910 msecs_to_jiffies(250));
911 }
912
913 regmap_update_bits(arizona->regmap,
914 ARIZONA_JACK_DETECT_DEBOUNCE,
915 ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB, 0);
317 } else { 916 } else {
318 dev_dbg(arizona->dev, "Detected jack removal\n"); 917 dev_dbg(arizona->dev, "Detected jack removal\n");
319 918
320 arizona_stop_mic(info); 919 arizona_stop_mic(info);
321 920
921 info->num_hpdet_res = 0;
922 for (i = 0; i < ARRAY_SIZE(info->hpdet_res); i++)
923 info->hpdet_res[i] = 0;
924 info->mic = false;
925 info->hpdet_done = false;
926
322 for (i = 0; i < ARIZONA_NUM_BUTTONS; i++) 927 for (i = 0; i < ARIZONA_NUM_BUTTONS; i++)
323 input_report_key(info->input, 928 input_report_key(info->input,
324 arizona_lvl_to_key[i].report, 0); 929 arizona_lvl_to_key[i].report, 0);
@@ -328,8 +933,20 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
328 if (ret != 0) 933 if (ret != 0)
329 dev_err(arizona->dev, "Removal report failed: %d\n", 934 dev_err(arizona->dev, "Removal report failed: %d\n",
330 ret); 935 ret);
936
937 regmap_update_bits(arizona->regmap,
938 ARIZONA_JACK_DETECT_DEBOUNCE,
939 ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB,
940 ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB);
331 } 941 }
332 942
943 /* Clear trig_sts to make sure DCVDD is not forced up */
944 regmap_write(arizona->regmap, ARIZONA_AOD_WKUP_AND_TRIG,
945 ARIZONA_MICD_CLAMP_FALL_TRIG_STS |
946 ARIZONA_MICD_CLAMP_RISE_TRIG_STS |
947 ARIZONA_JD1_FALL_TRIG_STS |
948 ARIZONA_JD1_RISE_TRIG_STS);
949
333 mutex_unlock(&info->lock); 950 mutex_unlock(&info->lock);
334 951
335 pm_runtime_mark_last_busy(info->dev); 952 pm_runtime_mark_last_busy(info->dev);
@@ -343,8 +960,12 @@ static int arizona_extcon_probe(struct platform_device *pdev)
343 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); 960 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
344 struct arizona_pdata *pdata; 961 struct arizona_pdata *pdata;
345 struct arizona_extcon_info *info; 962 struct arizona_extcon_info *info;
963 int jack_irq_fall, jack_irq_rise;
346 int ret, mode, i; 964 int ret, mode, i;
347 965
966 if (!arizona->dapm || !arizona->dapm->card)
967 return -EPROBE_DEFER;
968
348 pdata = dev_get_platdata(arizona->dev); 969 pdata = dev_get_platdata(arizona->dev);
349 970
350 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 971 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -364,7 +985,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
364 mutex_init(&info->lock); 985 mutex_init(&info->lock);
365 info->arizona = arizona; 986 info->arizona = arizona;
366 info->dev = &pdev->dev; 987 info->dev = &pdev->dev;
367 info->detecting = true; 988 INIT_DELAYED_WORK(&info->hpdet_work, arizona_hpdet_work);
368 platform_set_drvdata(pdev, info); 989 platform_set_drvdata(pdev, info);
369 990
370 switch (arizona->type) { 991 switch (arizona->type) {
@@ -374,6 +995,8 @@ static int arizona_extcon_probe(struct platform_device *pdev)
374 info->micd_reva = true; 995 info->micd_reva = true;
375 break; 996 break;
376 default: 997 default:
998 info->micd_clamp = true;
999 info->hpdet_ip = 1;
377 break; 1000 break;
378 } 1001 }
379 break; 1002 break;
@@ -416,9 +1039,64 @@ static int arizona_extcon_probe(struct platform_device *pdev)
416 } 1039 }
417 } 1040 }
418 1041
1042 if (arizona->pdata.hpdet_id_gpio > 0) {
1043 ret = devm_gpio_request_one(&pdev->dev,
1044 arizona->pdata.hpdet_id_gpio,
1045 GPIOF_OUT_INIT_LOW,
1046 "HPDET");
1047 if (ret != 0) {
1048 dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
1049 arizona->pdata.hpdet_id_gpio, ret);
1050 goto err_register;
1051 }
1052 }
1053
1054 if (arizona->pdata.micd_bias_start_time)
1055 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
1056 ARIZONA_MICD_BIAS_STARTTIME_MASK,
1057 arizona->pdata.micd_bias_start_time
1058 << ARIZONA_MICD_BIAS_STARTTIME_SHIFT);
1059
1060 if (arizona->pdata.micd_rate)
1061 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
1062 ARIZONA_MICD_RATE_MASK,
1063 arizona->pdata.micd_rate
1064 << ARIZONA_MICD_RATE_SHIFT);
1065
1066 if (arizona->pdata.micd_dbtime)
1067 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
1068 ARIZONA_MICD_DBTIME_MASK,
1069 arizona->pdata.micd_dbtime
1070 << ARIZONA_MICD_DBTIME_SHIFT);
1071
1072 /*
1073 * If we have a clamp use it, activating in conjunction with
1074 * GPIO5 if that is connected for jack detect operation.
1075 */
1076 if (info->micd_clamp) {
1077 if (arizona->pdata.jd_gpio5) {
1078 /* Put the GPIO into input mode */
1079 regmap_write(arizona->regmap, ARIZONA_GPIO5_CTRL,
1080 0xc101);
1081
1082 regmap_update_bits(arizona->regmap,
1083 ARIZONA_MICD_CLAMP_CONTROL,
1084 ARIZONA_MICD_CLAMP_MODE_MASK, 0x9);
1085 } else {
1086 regmap_update_bits(arizona->regmap,
1087 ARIZONA_MICD_CLAMP_CONTROL,
1088 ARIZONA_MICD_CLAMP_MODE_MASK, 0x4);
1089 }
1090
1091 regmap_update_bits(arizona->regmap,
1092 ARIZONA_JACK_DETECT_DEBOUNCE,
1093 ARIZONA_MICD_CLAMP_DB,
1094 ARIZONA_MICD_CLAMP_DB);
1095 }
1096
419 arizona_extcon_set_mode(info, 0); 1097 arizona_extcon_set_mode(info, 0);
420 1098
421 info->input = input_allocate_device(); 1099 info->input = devm_input_allocate_device(&pdev->dev);
422 if (!info->input) { 1100 if (!info->input) {
423 dev_err(arizona->dev, "Can't allocate input dev\n"); 1101 dev_err(arizona->dev, "Can't allocate input dev\n");
424 ret = -ENOMEM; 1102 ret = -ENOMEM;
@@ -436,7 +1114,15 @@ static int arizona_extcon_probe(struct platform_device *pdev)
436 pm_runtime_idle(&pdev->dev); 1114 pm_runtime_idle(&pdev->dev);
437 pm_runtime_get_sync(&pdev->dev); 1115 pm_runtime_get_sync(&pdev->dev);
438 1116
439 ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_RISE, 1117 if (arizona->pdata.jd_gpio5) {
1118 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
1119 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
1120 } else {
1121 jack_irq_rise = ARIZONA_IRQ_JD_RISE;
1122 jack_irq_fall = ARIZONA_IRQ_JD_FALL;
1123 }
1124
1125 ret = arizona_request_irq(arizona, jack_irq_rise,
440 "JACKDET rise", arizona_jackdet, info); 1126 "JACKDET rise", arizona_jackdet, info);
441 if (ret != 0) { 1127 if (ret != 0) {
442 dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n", 1128 dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
@@ -444,21 +1130,21 @@ static int arizona_extcon_probe(struct platform_device *pdev)
444 goto err_input; 1130 goto err_input;
445 } 1131 }
446 1132
447 ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 1); 1133 ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
448 if (ret != 0) { 1134 if (ret != 0) {
449 dev_err(&pdev->dev, "Failed to set JD rise IRQ wake: %d\n", 1135 dev_err(&pdev->dev, "Failed to set JD rise IRQ wake: %d\n",
450 ret); 1136 ret);
451 goto err_rise; 1137 goto err_rise;
452 } 1138 }
453 1139
454 ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_FALL, 1140 ret = arizona_request_irq(arizona, jack_irq_fall,
455 "JACKDET fall", arizona_jackdet, info); 1141 "JACKDET fall", arizona_jackdet, info);
456 if (ret != 0) { 1142 if (ret != 0) {
457 dev_err(&pdev->dev, "Failed to get JD fall IRQ: %d\n", ret); 1143 dev_err(&pdev->dev, "Failed to get JD fall IRQ: %d\n", ret);
458 goto err_rise_wake; 1144 goto err_rise_wake;
459 } 1145 }
460 1146
461 ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 1); 1147 ret = arizona_set_irq_wake(arizona, jack_irq_fall, 1);
462 if (ret != 0) { 1148 if (ret != 0) {
463 dev_err(&pdev->dev, "Failed to set JD fall IRQ wake: %d\n", 1149 dev_err(&pdev->dev, "Failed to set JD fall IRQ wake: %d\n",
464 ret); 1150 ret);
@@ -472,11 +1158,12 @@ static int arizona_extcon_probe(struct platform_device *pdev)
472 goto err_fall_wake; 1158 goto err_fall_wake;
473 } 1159 }
474 1160
475 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1, 1161 ret = arizona_request_irq(arizona, ARIZONA_IRQ_HPDET,
476 ARIZONA_MICD_BIAS_STARTTIME_MASK | 1162 "HPDET", arizona_hpdet_irq, info);
477 ARIZONA_MICD_RATE_MASK, 1163 if (ret != 0) {
478 7 << ARIZONA_MICD_BIAS_STARTTIME_SHIFT | 1164 dev_err(&pdev->dev, "Failed to get HPDET IRQ: %d\n", ret);
479 8 << ARIZONA_MICD_RATE_SHIFT); 1165 goto err_micdet;
1166 }
480 1167
481 arizona_clk32k_enable(arizona); 1168 arizona_clk32k_enable(arizona);
482 regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_DEBOUNCE, 1169 regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_DEBOUNCE,
@@ -494,23 +1181,24 @@ static int arizona_extcon_probe(struct platform_device *pdev)
494 ret = input_register_device(info->input); 1181 ret = input_register_device(info->input);
495 if (ret) { 1182 if (ret) {
496 dev_err(&pdev->dev, "Can't register input device: %d\n", ret); 1183 dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
497 goto err_micdet; 1184 goto err_hpdet;
498 } 1185 }
499 1186
500 return 0; 1187 return 0;
501 1188
1189err_hpdet:
1190 arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
502err_micdet: 1191err_micdet:
503 arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info); 1192 arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
504err_fall_wake: 1193err_fall_wake:
505 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0); 1194 arizona_set_irq_wake(arizona, jack_irq_fall, 0);
506err_fall: 1195err_fall:
507 arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info); 1196 arizona_free_irq(arizona, jack_irq_fall, info);
508err_rise_wake: 1197err_rise_wake:
509 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0); 1198 arizona_set_irq_wake(arizona, jack_irq_rise, 0);
510err_rise: 1199err_rise:
511 arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info); 1200 arizona_free_irq(arizona, jack_irq_rise, info);
512err_input: 1201err_input:
513 input_free_device(info->input);
514err_register: 1202err_register:
515 pm_runtime_disable(&pdev->dev); 1203 pm_runtime_disable(&pdev->dev);
516 extcon_dev_unregister(&info->edev); 1204 extcon_dev_unregister(&info->edev);
@@ -522,18 +1210,32 @@ static int arizona_extcon_remove(struct platform_device *pdev)
522{ 1210{
523 struct arizona_extcon_info *info = platform_get_drvdata(pdev); 1211 struct arizona_extcon_info *info = platform_get_drvdata(pdev);
524 struct arizona *arizona = info->arizona; 1212 struct arizona *arizona = info->arizona;
1213 int jack_irq_rise, jack_irq_fall;
525 1214
526 pm_runtime_disable(&pdev->dev); 1215 pm_runtime_disable(&pdev->dev);
527 1216
528 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0); 1217 regmap_update_bits(arizona->regmap,
529 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0); 1218 ARIZONA_MICD_CLAMP_CONTROL,
1219 ARIZONA_MICD_CLAMP_MODE_MASK, 0);
1220
1221 if (arizona->pdata.jd_gpio5) {
1222 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
1223 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
1224 } else {
1225 jack_irq_rise = ARIZONA_IRQ_JD_RISE;
1226 jack_irq_fall = ARIZONA_IRQ_JD_FALL;
1227 }
1228
1229 arizona_set_irq_wake(arizona, jack_irq_rise, 0);
1230 arizona_set_irq_wake(arizona, jack_irq_fall, 0);
1231 arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
530 arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info); 1232 arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
531 arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info); 1233 arizona_free_irq(arizona, jack_irq_rise, info);
532 arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info); 1234 arizona_free_irq(arizona, jack_irq_fall, info);
1235 cancel_delayed_work_sync(&info->hpdet_work);
533 regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE, 1236 regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
534 ARIZONA_JD1_ENA, 0); 1237 ARIZONA_JD1_ENA, 0);
535 arizona_clk32k_disable(arizona); 1238 arizona_clk32k_disable(arizona);
536 input_unregister_device(info->input);
537 extcon_dev_unregister(&info->edev); 1239 extcon_dev_unregister(&info->edev);
538 1240
539 return 0; 1241 return 0;
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 1b14bfcdc176..02bec32adde4 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -29,7 +29,7 @@
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <linux/extcon.h> 31#include <linux/extcon.h>
32#include <linux/extcon/extcon_gpio.h> 32#include <linux/extcon/extcon-gpio.h>
33 33
34struct gpio_extcon_data { 34struct gpio_extcon_data {
35 struct extcon_dev edev; 35 struct extcon_dev edev;
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 8c17b65eb74d..b70e3815c459 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/input.h>
22#include <linux/interrupt.h> 23#include <linux/interrupt.h>
23#include <linux/err.h> 24#include <linux/err.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
@@ -29,92 +30,7 @@
29#include <linux/irqdomain.h> 30#include <linux/irqdomain.h>
30 31
31#define DEV_NAME "max77693-muic" 32#define DEV_NAME "max77693-muic"
32 33#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
33/* MAX77693 MUIC - STATUS1~3 Register */
34#define STATUS1_ADC_SHIFT (0)
35#define STATUS1_ADCLOW_SHIFT (5)
36#define STATUS1_ADCERR_SHIFT (6)
37#define STATUS1_ADC1K_SHIFT (7)
38#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
39#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
40#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
41#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
42
43#define STATUS2_CHGTYP_SHIFT (0)
44#define STATUS2_CHGDETRUN_SHIFT (3)
45#define STATUS2_DCDTMR_SHIFT (4)
46#define STATUS2_DXOVP_SHIFT (5)
47#define STATUS2_VBVOLT_SHIFT (6)
48#define STATUS2_VIDRM_SHIFT (7)
49#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
50#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
51#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
52#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
53#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
54#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
55
56#define STATUS3_OVP_SHIFT (2)
57#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
58
59/* MAX77693 CDETCTRL1~2 register */
60#define CDETCTRL1_CHGDETEN_SHIFT (0)
61#define CDETCTRL1_CHGTYPMAN_SHIFT (1)
62#define CDETCTRL1_DCDEN_SHIFT (2)
63#define CDETCTRL1_DCD2SCT_SHIFT (3)
64#define CDETCTRL1_CDDELAY_SHIFT (4)
65#define CDETCTRL1_DCDCPL_SHIFT (5)
66#define CDETCTRL1_CDPDET_SHIFT (7)
67#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT)
68#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
69#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT)
70#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT)
71#define CDETCTRL1_CDDELAY_MASK (0x1 << CDETCTRL1_CDDELAY_SHIFT)
72#define CDETCTRL1_DCDCPL_MASK (0x1 << CDETCTRL1_DCDCPL_SHIFT)
73#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT)
74
75#define CDETCTRL2_VIDRMEN_SHIFT (1)
76#define CDETCTRL2_DXOVPEN_SHIFT (3)
77#define CDETCTRL2_VIDRMEN_MASK (0x1 << CDETCTRL2_VIDRMEN_SHIFT)
78#define CDETCTRL2_DXOVPEN_MASK (0x1 << CDETCTRL2_DXOVPEN_SHIFT)
79
80/* MAX77693 MUIC - CONTROL1~3 register */
81#define COMN1SW_SHIFT (0)
82#define COMP2SW_SHIFT (3)
83#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
84#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
85#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
86#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
87 | (1 << COMN1SW_SHIFT))
88#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
89 | (2 << COMN1SW_SHIFT))
90#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
91 | (3 << COMN1SW_SHIFT))
92#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
93 | (0 << COMN1SW_SHIFT))
94
95#define CONTROL2_LOWPWR_SHIFT (0)
96#define CONTROL2_ADCEN_SHIFT (1)
97#define CONTROL2_CPEN_SHIFT (2)
98#define CONTROL2_SFOUTASRT_SHIFT (3)
99#define CONTROL2_SFOUTORD_SHIFT (4)
100#define CONTROL2_ACCDET_SHIFT (5)
101#define CONTROL2_USBCPINT_SHIFT (6)
102#define CONTROL2_RCPS_SHIFT (7)
103#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
104#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
105#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
106#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
107#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
108#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
109#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
110#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
111
112#define CONTROL3_JIGSET_SHIFT (0)
113#define CONTROL3_BTLDSET_SHIFT (2)
114#define CONTROL3_ADCDBSET_SHIFT (4)
115#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
116#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
117#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
118 34
119enum max77693_muic_adc_debounce_time { 35enum max77693_muic_adc_debounce_time {
120 ADC_DEBOUNCE_TIME_5MS = 0, 36 ADC_DEBOUNCE_TIME_5MS = 0,
@@ -127,14 +43,40 @@ struct max77693_muic_info {
127 struct device *dev; 43 struct device *dev;
128 struct max77693_dev *max77693; 44 struct max77693_dev *max77693;
129 struct extcon_dev *edev; 45 struct extcon_dev *edev;
130 int prev_adc; 46 int prev_cable_type;
131 int prev_adc_gnd; 47 int prev_cable_type_gnd;
132 int prev_chg_type; 48 int prev_chg_type;
49 int prev_button_type;
133 u8 status[2]; 50 u8 status[2];
134 51
135 int irq; 52 int irq;
136 struct work_struct irq_work; 53 struct work_struct irq_work;
137 struct mutex mutex; 54 struct mutex mutex;
55
56 /*
57 * Use delayed workqueue to detect cable state and then
58 * notify cable state to notifiee/platform through uevent.
59 * After completing the booting of platform, the extcon provider
60 * driver should notify cable state to upper layer.
61 */
62 struct delayed_work wq_detcable;
63
64 /* Button of dock device */
65 struct input_dev *dock;
66
67 /*
68 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
69 * h/w path of COMP2/COMN1 on CONTROL1 register.
70 */
71 int path_usb;
72 int path_uart;
73};
74
75enum max77693_muic_cable_group {
76 MAX77693_CABLE_GROUP_ADC = 0,
77 MAX77693_CABLE_GROUP_ADC_GND,
78 MAX77693_CABLE_GROUP_CHG,
79 MAX77693_CABLE_GROUP_VBVOLT,
138}; 80};
139 81
140enum max77693_muic_charger_type { 82enum max77693_muic_charger_type {
@@ -215,27 +157,59 @@ enum max77693_muic_acc_type {
215 157
216 /* The below accessories have same ADC value so ADCLow and 158 /* The below accessories have same ADC value so ADCLow and
217 ADC1K bit is used to separate specific accessory */ 159 ADC1K bit is used to separate specific accessory */
218 MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, ADCLow:0, ADC1K:0 */ 160 MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, VBVolot:0, ADCLow:0, ADC1K:0 */
219 MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, ADCLow:1, ADC1K:0 */ 161 MAX77693_MUIC_GND_USB_OTG_VB = 0x104, /* ADC:0x0, VBVolot:1, ADCLow:0, ADC1K:0 */
220 MAX77693_MUIC_GND_MHL_CABLE = 0x103, /* ADC:0x0, ADCLow:1, ADC1K:1 */ 162 MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:0 */
163 MAX77693_MUIC_GND_MHL = 0x103, /* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:1 */
164 MAX77693_MUIC_GND_MHL_VB = 0x107, /* ADC:0x0, VBVolot:1, ADCLow:1, ADC1K:1 */
221}; 165};
222 166
223/* MAX77693 MUIC device support below list of accessories(external connector) */ 167/* MAX77693 MUIC device support below list of accessories(external connector) */
224const char *max77693_extcon_cable[] = { 168enum {
225 [0] = "USB", 169 EXTCON_CABLE_USB = 0,
226 [1] = "USB-Host", 170 EXTCON_CABLE_USB_HOST,
227 [2] = "TA", 171 EXTCON_CABLE_TA,
228 [3] = "Fast-charger", 172 EXTCON_CABLE_FAST_CHARGER,
229 [4] = "Slow-charger", 173 EXTCON_CABLE_SLOW_CHARGER,
230 [5] = "Charge-downstream", 174 EXTCON_CABLE_CHARGE_DOWNSTREAM,
231 [6] = "MHL", 175 EXTCON_CABLE_MHL,
232 [7] = "Audio-video-load", 176 EXTCON_CABLE_MHL_TA,
233 [8] = "Audio-video-noload", 177 EXTCON_CABLE_JIG_USB_ON,
234 [9] = "JIG", 178 EXTCON_CABLE_JIG_USB_OFF,
179 EXTCON_CABLE_JIG_UART_OFF,
180 EXTCON_CABLE_JIG_UART_ON,
181 EXTCON_CABLE_DOCK_SMART,
182 EXTCON_CABLE_DOCK_DESK,
183 EXTCON_CABLE_DOCK_AUDIO,
184
185 _EXTCON_CABLE_NUM,
186};
187
188static const char *max77693_extcon_cable[] = {
189 [EXTCON_CABLE_USB] = "USB",
190 [EXTCON_CABLE_USB_HOST] = "USB-Host",
191 [EXTCON_CABLE_TA] = "TA",
192 [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
193 [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
194 [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
195 [EXTCON_CABLE_MHL] = "MHL",
196 [EXTCON_CABLE_MHL_TA] = "MHL_TA",
197 [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
198 [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
199 [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
200 [EXTCON_CABLE_JIG_UART_ON] = "Dock-Car",
201 [EXTCON_CABLE_DOCK_SMART] = "Dock-Smart",
202 [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
203 [EXTCON_CABLE_DOCK_AUDIO] = "Dock-Audio",
235 204
236 NULL, 205 NULL,
237}; 206};
238 207
208/*
209 * max77693_muic_set_debounce_time - Set the debounce time of ADC
210 * @info: the instance including private data of max77693 MUIC
211 * @time: the debounce time of ADC
212 */
239static int max77693_muic_set_debounce_time(struct max77693_muic_info *info, 213static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
240 enum max77693_muic_adc_debounce_time time) 214 enum max77693_muic_adc_debounce_time time)
241{ 215{
@@ -250,18 +224,29 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
250 MAX77693_MUIC_REG_CTRL3, 224 MAX77693_MUIC_REG_CTRL3,
251 time << CONTROL3_ADCDBSET_SHIFT, 225 time << CONTROL3_ADCDBSET_SHIFT,
252 CONTROL3_ADCDBSET_MASK); 226 CONTROL3_ADCDBSET_MASK);
253 if (ret) 227 if (ret) {
254 dev_err(info->dev, "failed to set ADC debounce time\n"); 228 dev_err(info->dev, "failed to set ADC debounce time\n");
229 return -EAGAIN;
230 }
255 break; 231 break;
256 default: 232 default:
257 dev_err(info->dev, "invalid ADC debounce time\n"); 233 dev_err(info->dev, "invalid ADC debounce time\n");
258 ret = -EINVAL; 234 return -EINVAL;
259 break;
260 } 235 }
261 236
262 return ret; 237 return 0;
263}; 238};
264 239
240/*
241 * max77693_muic_set_path - Set hardware line according to attached cable
242 * @info: the instance including private data of max77693 MUIC
243 * @value: the path according to attached cable
244 * @attached: the state of cable (true:attached, false:detached)
245 *
246 * The max77693 MUIC device share outside H/W line among a varity of cables
247 * so, this function set internal path of H/W line according to the type of
248 * attached cable.
249 */
265static int max77693_muic_set_path(struct max77693_muic_info *info, 250static int max77693_muic_set_path(struct max77693_muic_info *info,
266 u8 val, bool attached) 251 u8 val, bool attached)
267{ 252{
@@ -277,7 +262,7 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
277 MAX77693_MUIC_REG_CTRL1, ctrl1, COMP_SW_MASK); 262 MAX77693_MUIC_REG_CTRL1, ctrl1, COMP_SW_MASK);
278 if (ret < 0) { 263 if (ret < 0) {
279 dev_err(info->dev, "failed to update MUIC register\n"); 264 dev_err(info->dev, "failed to update MUIC register\n");
280 goto out; 265 return -EAGAIN;
281 } 266 }
282 267
283 if (attached) 268 if (attached)
@@ -290,141 +275,457 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
290 CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK); 275 CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
291 if (ret < 0) { 276 if (ret < 0) {
292 dev_err(info->dev, "failed to update MUIC register\n"); 277 dev_err(info->dev, "failed to update MUIC register\n");
293 goto out; 278 return -EAGAIN;
294 } 279 }
295 280
296 dev_info(info->dev, 281 dev_info(info->dev,
297 "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n", 282 "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
298 ctrl1, ctrl2, attached ? "attached" : "detached"); 283 ctrl1, ctrl2, attached ? "attached" : "detached");
299out: 284
300 return ret; 285 return 0;
301} 286}
302 287
303static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info, 288/*
304 bool attached) 289 * max77693_muic_get_cable_type - Return cable type and check cable state
290 * @info: the instance including private data of max77693 MUIC
291 * @group: the path according to attached cable
292 * @attached: store cable state and return
293 *
294 * This function check the cable state either attached or detached,
295 * and then divide precise type of cable according to cable group.
296 * - MAX77693_CABLE_GROUP_ADC
297 * - MAX77693_CABLE_GROUP_ADC_GND
298 * - MAX77693_CABLE_GROUP_CHG
299 * - MAX77693_CABLE_GROUP_VBVOLT
300 */
301static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
302 enum max77693_muic_cable_group group, bool *attached)
305{ 303{
306 int ret = 0; 304 int cable_type = 0;
307 int type; 305 int adc;
308 int adc, adc1k, adclow; 306 int adc1k;
307 int adclow;
308 int vbvolt;
309 int chg_type;
310
311 switch (group) {
312 case MAX77693_CABLE_GROUP_ADC:
313 /*
314 * Read ADC value to check cable type and decide cable state
315 * according to cable type
316 */
317 adc = info->status[0] & STATUS1_ADC_MASK;
318 adc >>= STATUS1_ADC_SHIFT;
319
320 /*
321 * Check current cable state/cable type and store cable type
322 * (info->prev_cable_type) for handling cable when cable is
323 * detached.
324 */
325 if (adc == MAX77693_MUIC_ADC_OPEN) {
326 *attached = false;
327
328 cable_type = info->prev_cable_type;
329 info->prev_cable_type = MAX77693_MUIC_ADC_OPEN;
330 } else {
331 *attached = true;
332
333 cable_type = info->prev_cable_type = adc;
334 }
335 break;
336 case MAX77693_CABLE_GROUP_ADC_GND:
337 /*
338 * Read ADC value to check cable type and decide cable state
339 * according to cable type
340 */
341 adc = info->status[0] & STATUS1_ADC_MASK;
342 adc >>= STATUS1_ADC_SHIFT;
343
344 /*
345 * Check current cable state/cable type and store cable type
346 * (info->prev_cable_type/_gnd) for handling cable when cable
347 * is detached.
348 */
349 if (adc == MAX77693_MUIC_ADC_OPEN) {
350 *attached = false;
351
352 cable_type = info->prev_cable_type_gnd;
353 info->prev_cable_type_gnd = MAX77693_MUIC_ADC_OPEN;
354 } else {
355 *attached = true;
356
357 adclow = info->status[0] & STATUS1_ADCLOW_MASK;
358 adclow >>= STATUS1_ADCLOW_SHIFT;
359 adc1k = info->status[0] & STATUS1_ADC1K_MASK;
360 adc1k >>= STATUS1_ADC1K_SHIFT;
361
362 vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
363 vbvolt >>= STATUS2_VBVOLT_SHIFT;
364
365 /**
366 * [0x1][VBVolt][ADCLow][ADC1K]
367 * [0x1 0 0 0 ] : USB_OTG
368 * [0x1 1 0 0 ] : USB_OTG_VB
369 * [0x1 0 1 0 ] : Audio Video Cable with load
370 * [0x1 0 1 1 ] : MHL without charging connector
371 * [0x1 1 1 1 ] : MHL with charging connector
372 */
373 cable_type = ((0x1 << 8)
374 | (vbvolt << 2)
375 | (adclow << 1)
376 | adc1k);
377
378 info->prev_cable_type = adc;
379 info->prev_cable_type_gnd = cable_type;
380 }
309 381
310 if (attached) { 382 break;
383 case MAX77693_CABLE_GROUP_CHG:
384 /*
385 * Read charger type to check cable type and decide cable state
386 * according to type of charger cable.
387 */
388 chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
389 chg_type >>= STATUS2_CHGTYP_SHIFT;
390
391 if (chg_type == MAX77693_CHARGER_TYPE_NONE) {
392 *attached = false;
393
394 cable_type = info->prev_chg_type;
395 info->prev_chg_type = MAX77693_CHARGER_TYPE_NONE;
396 } else {
397 *attached = true;
398
399 /*
400 * Check current cable state/cable type and store cable
401 * type(info->prev_chg_type) for handling cable when
402 * charger cable is detached.
403 */
404 cable_type = info->prev_chg_type = chg_type;
405 }
406
407 break;
408 case MAX77693_CABLE_GROUP_VBVOLT:
409 /*
410 * Read ADC value to check cable type and decide cable state
411 * according to cable type
412 */
311 adc = info->status[0] & STATUS1_ADC_MASK; 413 adc = info->status[0] & STATUS1_ADC_MASK;
312 adclow = info->status[0] & STATUS1_ADCLOW_MASK; 414 adc >>= STATUS1_ADC_SHIFT;
313 adclow >>= STATUS1_ADCLOW_SHIFT; 415 chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
314 adc1k = info->status[0] & STATUS1_ADC1K_MASK; 416 chg_type >>= STATUS2_CHGTYP_SHIFT;
315 adc1k >>= STATUS1_ADC1K_SHIFT; 417
316 418 if (adc == MAX77693_MUIC_ADC_OPEN
317 /** 419 && chg_type == MAX77693_CHARGER_TYPE_NONE)
318 * [0x1][ADCLow][ADC1K] 420 *attached = false;
319 * [0x1 0 0 ] : USB_OTG 421 else
320 * [0x1 1 0 ] : Audio Video Cable with load 422 *attached = true;
321 * [0x1 1 1 ] : MHL 423
424 /*
425 * Read vbvolt field, if vbvolt is 1,
426 * this cable is used for charging.
427 */
428 vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
429 vbvolt >>= STATUS2_VBVOLT_SHIFT;
430
431 cable_type = vbvolt;
432 break;
433 default:
434 dev_err(info->dev, "Unknown cable group (%d)\n", group);
435 cable_type = -EINVAL;
436 break;
437 }
438
439 return cable_type;
440}
441
442static int max77693_muic_dock_handler(struct max77693_muic_info *info,
443 int cable_type, bool attached)
444{
445 int ret = 0;
446 int vbvolt;
447 bool cable_attached;
448 char dock_name[CABLE_NAME_MAX];
449
450 dev_info(info->dev,
451 "external connector is %s (adc:0x%02x)\n",
452 attached ? "attached" : "detached", cable_type);
453
454 switch (cable_type) {
455 case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
456 /*
457 * Check power cable whether attached or detached state.
458 * The Dock-Smart device need surely external power supply.
459 * If power cable(USB/TA) isn't connected to Dock device,
460 * user can't use Dock-Smart for desktop mode.
461 */
462 vbvolt = max77693_muic_get_cable_type(info,
463 MAX77693_CABLE_GROUP_VBVOLT, &cable_attached);
464 if (attached && !vbvolt) {
465 dev_warn(info->dev,
466 "Cannot detect external power supply\n");
467 return 0;
468 }
469
470 /*
471 * Notify Dock-Smart/MHL state.
472 * - Dock-Smart device include three type of cable which
473 * are HDMI, USB for mouse/keyboard and micro-usb port
474 * for USB/TA cable. Dock-Smart device need always exteranl
475 * power supply(USB/TA cable through micro-usb cable). Dock-
476 * Smart device support screen output of target to separate
477 * monitor and mouse/keyboard for desktop mode.
478 *
479 * Features of 'USB/TA cable with Dock-Smart device'
480 * - Support MHL
481 * - Support external output feature of audio
482 * - Support charging through micro-usb port without data
483 * connection if TA cable is connected to target.
484 * - Support charging and data connection through micro-usb port
485 * if USB cable is connected between target and host
486 * device.
487 * - Support OTG device (Mouse/Keyboard)
322 */ 488 */
323 type = ((0x1 << 8) | (adclow << 1) | adc1k); 489 ret = max77693_muic_set_path(info, info->path_usb, attached);
490 if (ret < 0)
491 return ret;
492
493 extcon_set_cable_state(info->edev, "Dock-Smart", attached);
494 extcon_set_cable_state(info->edev, "MHL", attached);
495 goto out;
496 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
497 strcpy(dock_name, "Dock-Car");
498 break;
499 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
500 strcpy(dock_name, "Dock-Desk");
501 break;
502 case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
503 strcpy(dock_name, "Dock-Audio");
504 if (!attached)
505 extcon_set_cable_state(info->edev, "USB", false);
506 break;
507 default:
508 dev_err(info->dev, "failed to detect %s dock device\n",
509 attached ? "attached" : "detached");
510 return -EINVAL;
511 }
512
513 /* Dock-Car/Desk/Audio, PATH:AUDIO */
514 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
515 if (ret < 0)
516 return ret;
517 extcon_set_cable_state(info->edev, dock_name, attached);
518
519out:
520 return 0;
521}
522
523static int max77693_muic_dock_button_handler(struct max77693_muic_info *info,
524 int button_type, bool attached)
525{
526 struct input_dev *dock = info->dock;
527 unsigned int code;
528
529 switch (button_type) {
530 case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON-1
531 ... MAX77693_MUIC_ADC_REMOTE_S3_BUTTON+1:
532 /* DOCK_KEY_PREV */
533 code = KEY_PREVIOUSSONG;
534 break;
535 case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON-1
536 ... MAX77693_MUIC_ADC_REMOTE_S7_BUTTON+1:
537 /* DOCK_KEY_NEXT */
538 code = KEY_NEXTSONG;
539 break;
540 case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
541 /* DOCK_VOL_DOWN */
542 code = KEY_VOLUMEDOWN;
543 break;
544 case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
545 /* DOCK_VOL_UP */
546 code = KEY_VOLUMEUP;
547 break;
548 case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON-1
549 ... MAX77693_MUIC_ADC_REMOTE_S12_BUTTON+1:
550 /* DOCK_KEY_PLAY_PAUSE */
551 code = KEY_PLAYPAUSE;
552 break;
553 default:
554 dev_err(info->dev,
555 "failed to detect %s key (adc:0x%x)\n",
556 attached ? "pressed" : "released", button_type);
557 return -EINVAL;
558 }
559
560 input_event(dock, EV_KEY, code, attached);
561 input_sync(dock);
562
563 return 0;
564}
324 565
325 /* Store previous ADC value to handle accessory 566static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
326 when accessory will be detached */ 567{
327 info->prev_adc = adc; 568 int cable_type_gnd;
328 info->prev_adc_gnd = type; 569 int ret = 0;
329 } else 570 bool attached;
330 type = info->prev_adc_gnd;
331 571
332 switch (type) { 572 cable_type_gnd = max77693_muic_get_cable_type(info,
573 MAX77693_CABLE_GROUP_ADC_GND, &attached);
574
575 switch (cable_type_gnd) {
333 case MAX77693_MUIC_GND_USB_OTG: 576 case MAX77693_MUIC_GND_USB_OTG:
334 /* USB_OTG */ 577 case MAX77693_MUIC_GND_USB_OTG_VB:
578 /* USB_OTG, PATH: AP_USB */
335 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached); 579 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
336 if (ret < 0) 580 if (ret < 0)
337 goto out; 581 return ret;
338 extcon_set_cable_state(info->edev, "USB-Host", attached); 582 extcon_set_cable_state(info->edev, "USB-Host", attached);
339 break; 583 break;
340 case MAX77693_MUIC_GND_AV_CABLE_LOAD: 584 case MAX77693_MUIC_GND_AV_CABLE_LOAD:
341 /* Audio Video Cable with load */ 585 /* Audio Video Cable with load, PATH:AUDIO */
342 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached); 586 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
343 if (ret < 0) 587 if (ret < 0)
344 goto out; 588 return ret;
345 extcon_set_cable_state(info->edev, 589 extcon_set_cable_state(info->edev,
346 "Audio-video-load", attached); 590 "Audio-video-load", attached);
347 break; 591 break;
348 case MAX77693_MUIC_GND_MHL_CABLE: 592 case MAX77693_MUIC_GND_MHL:
349 /* MHL */ 593 case MAX77693_MUIC_GND_MHL_VB:
594 /* MHL or MHL with USB/TA cable */
350 extcon_set_cable_state(info->edev, "MHL", attached); 595 extcon_set_cable_state(info->edev, "MHL", attached);
351 break; 596 break;
352 default: 597 default:
353 dev_err(info->dev, "failed to detect %s accessory\n", 598 dev_err(info->dev, "failed to detect %s cable of gnd type\n",
354 attached ? "attached" : "detached"); 599 attached ? "attached" : "detached");
355 dev_err(info->dev, "- adc:0x%x, adclow:0x%x, adc1k:0x%x\n", 600 return -EINVAL;
356 adc, adclow, adc1k); 601 }
357 ret = -EINVAL; 602
603 return 0;
604}
605
606static int max77693_muic_jig_handler(struct max77693_muic_info *info,
607 int cable_type, bool attached)
608{
609 char cable_name[32];
610 int ret = 0;
611 u8 path = CONTROL1_SW_OPEN;
612
613 dev_info(info->dev,
614 "external connector is %s (adc:0x%02x)\n",
615 attached ? "attached" : "detached", cable_type);
616
617 switch (cable_type) {
618 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
619 /* PATH:AP_USB */
620 strcpy(cable_name, "JIG-USB-OFF");
621 path = CONTROL1_SW_USB;
622 break;
623 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
624 /* PATH:AP_USB */
625 strcpy(cable_name, "JIG-USB-ON");
626 path = CONTROL1_SW_USB;
358 break; 627 break;
628 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
629 /* PATH:AP_UART */
630 strcpy(cable_name, "JIG-UART-OFF");
631 path = CONTROL1_SW_UART;
632 break;
633 default:
634 dev_err(info->dev, "failed to detect %s jig cable\n",
635 attached ? "attached" : "detached");
636 return -EINVAL;
359 } 637 }
360 638
361out: 639 ret = max77693_muic_set_path(info, path, attached);
362 return ret; 640 if (ret < 0)
641 return ret;
642
643 extcon_set_cable_state(info->edev, cable_name, attached);
644
645 return 0;
363} 646}
364 647
365static int max77693_muic_adc_handler(struct max77693_muic_info *info, 648static int max77693_muic_adc_handler(struct max77693_muic_info *info)
366 int curr_adc, bool attached)
367{ 649{
650 int cable_type;
651 int button_type;
652 bool attached;
368 int ret = 0; 653 int ret = 0;
369 int adc;
370 654
371 if (attached) { 655 /* Check accessory state which is either detached or attached */
372 /* Store ADC value to handle accessory 656 cable_type = max77693_muic_get_cable_type(info,
373 when accessory will be detached */ 657 MAX77693_CABLE_GROUP_ADC, &attached);
374 info->prev_adc = curr_adc;
375 adc = curr_adc;
376 } else
377 adc = info->prev_adc;
378 658
379 dev_info(info->dev, 659 dev_info(info->dev,
380 "external connector is %s (adc:0x%02x, prev_adc:0x%x)\n", 660 "external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
381 attached ? "attached" : "detached", curr_adc, info->prev_adc); 661 attached ? "attached" : "detached", cable_type,
662 info->prev_cable_type);
382 663
383 switch (adc) { 664 switch (cable_type) {
384 case MAX77693_MUIC_ADC_GROUND: 665 case MAX77693_MUIC_ADC_GROUND:
385 /* USB_OTG/MHL/Audio */ 666 /* USB_OTG/MHL/Audio */
386 max77693_muic_adc_ground_handler(info, attached); 667 max77693_muic_adc_ground_handler(info);
387 break; 668 break;
388 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: 669 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
389 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: 670 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:
390 /* USB */
391 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
392 if (ret < 0)
393 goto out;
394 extcon_set_cable_state(info->edev, "USB", attached);
395 break;
396 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: 671 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:
397 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:
398 /* JIG */ 672 /* JIG */
399 ret = max77693_muic_set_path(info, CONTROL1_SW_UART, attached); 673 ret = max77693_muic_jig_handler(info, cable_type, attached);
400 if (ret < 0) 674 if (ret < 0)
401 goto out; 675 return ret;
402 extcon_set_cable_state(info->edev, "JIG", attached);
403 break; 676 break;
404 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: 677 case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
405 /* Audio Video cable with no-load */ 678 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
406 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached); 679 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
680 case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
681 /*
682 * DOCK device
683 *
684 * The MAX77693 MUIC device can detect total 34 cable type
685 * except of charger cable and MUIC device didn't define
686 * specfic role of cable in the range of from 0x01 to 0x12
687 * of ADC value. So, can use/define cable with no role according
688 * to schema of hardware board.
689 */
690 ret = max77693_muic_dock_handler(info, cable_type, attached);
407 if (ret < 0) 691 if (ret < 0)
408 goto out; 692 return ret;
409 extcon_set_cable_state(info->edev, 693 break;
410 "Audio-video-noload", attached); 694 case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON: /* DOCK_KEY_PREV */
695 case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON: /* DOCK_KEY_NEXT */
696 case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON: /* DOCK_VOL_DOWN */
697 case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON: /* DOCK_VOL_UP */
698 case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON: /* DOCK_KEY_PLAY_PAUSE */
699 /*
700 * Button of DOCK device
701 * - the Prev/Next/Volume Up/Volume Down/Play-Pause button
702 *
703 * The MAX77693 MUIC device can detect total 34 cable type
704 * except of charger cable and MUIC device didn't define
705 * specfic role of cable in the range of from 0x01 to 0x12
706 * of ADC value. So, can use/define cable with no role according
707 * to schema of hardware board.
708 */
709 if (attached)
710 button_type = info->prev_button_type = cable_type;
711 else
712 button_type = info->prev_button_type;
713
714 ret = max77693_muic_dock_button_handler(info, button_type,
715 attached);
716 if (ret < 0)
717 return ret;
411 break; 718 break;
412 case MAX77693_MUIC_ADC_SEND_END_BUTTON: 719 case MAX77693_MUIC_ADC_SEND_END_BUTTON:
413 case MAX77693_MUIC_ADC_REMOTE_S1_BUTTON: 720 case MAX77693_MUIC_ADC_REMOTE_S1_BUTTON:
414 case MAX77693_MUIC_ADC_REMOTE_S2_BUTTON: 721 case MAX77693_MUIC_ADC_REMOTE_S2_BUTTON:
415 case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON:
416 case MAX77693_MUIC_ADC_REMOTE_S4_BUTTON: 722 case MAX77693_MUIC_ADC_REMOTE_S4_BUTTON:
417 case MAX77693_MUIC_ADC_REMOTE_S5_BUTTON: 723 case MAX77693_MUIC_ADC_REMOTE_S5_BUTTON:
418 case MAX77693_MUIC_ADC_REMOTE_S6_BUTTON: 724 case MAX77693_MUIC_ADC_REMOTE_S6_BUTTON:
419 case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON:
420 case MAX77693_MUIC_ADC_REMOTE_S8_BUTTON: 725 case MAX77693_MUIC_ADC_REMOTE_S8_BUTTON:
421 case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
422 case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
423 case MAX77693_MUIC_ADC_REMOTE_S11_BUTTON: 726 case MAX77693_MUIC_ADC_REMOTE_S11_BUTTON:
424 case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON:
425 case MAX77693_MUIC_ADC_RESERVED_ACC_1: 727 case MAX77693_MUIC_ADC_RESERVED_ACC_1:
426 case MAX77693_MUIC_ADC_RESERVED_ACC_2: 728 case MAX77693_MUIC_ADC_RESERVED_ACC_2:
427 case MAX77693_MUIC_ADC_RESERVED_ACC_3:
428 case MAX77693_MUIC_ADC_RESERVED_ACC_4: 729 case MAX77693_MUIC_ADC_RESERVED_ACC_4:
429 case MAX77693_MUIC_ADC_RESERVED_ACC_5: 730 case MAX77693_MUIC_ADC_RESERVED_ACC_5:
430 case MAX77693_MUIC_ADC_CEA936_AUDIO: 731 case MAX77693_MUIC_ADC_CEA936_AUDIO:
@@ -432,60 +733,164 @@ static int max77693_muic_adc_handler(struct max77693_muic_info *info,
432 case MAX77693_MUIC_ADC_TTY_CONVERTER: 733 case MAX77693_MUIC_ADC_TTY_CONVERTER:
433 case MAX77693_MUIC_ADC_UART_CABLE: 734 case MAX77693_MUIC_ADC_UART_CABLE:
434 case MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG: 735 case MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG:
435 case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:
436 case MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG: 736 case MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG:
437 /* This accessory isn't used in general case if it is specially 737 /*
438 needed to detect additional accessory, should implement 738 * This accessory isn't used in general case if it is specially
439 proper operation when this accessory is attached/detached. */ 739 * needed to detect additional accessory, should implement
740 * proper operation when this accessory is attached/detached.
741 */
440 dev_info(info->dev, 742 dev_info(info->dev,
441 "accessory is %s but it isn't used (adc:0x%x)\n", 743 "accessory is %s but it isn't used (adc:0x%x)\n",
442 attached ? "attached" : "detached", adc); 744 attached ? "attached" : "detached", cable_type);
443 goto out; 745 return -EAGAIN;
444 default: 746 default:
445 dev_err(info->dev, 747 dev_err(info->dev,
446 "failed to detect %s accessory (adc:0x%x)\n", 748 "failed to detect %s accessory (adc:0x%x)\n",
447 attached ? "attached" : "detached", adc); 749 attached ? "attached" : "detached", cable_type);
448 ret = -EINVAL; 750 return -EINVAL;
449 goto out;
450 } 751 }
451 752
452out: 753 return 0;
453 return ret;
454} 754}
455 755
456static int max77693_muic_chg_handler(struct max77693_muic_info *info, 756static int max77693_muic_chg_handler(struct max77693_muic_info *info)
457 int curr_chg_type, bool attached)
458{ 757{
459 int ret = 0;
460 int chg_type; 758 int chg_type;
759 int cable_type_gnd;
760 int cable_type;
761 bool attached;
762 bool cable_attached;
763 int ret = 0;
461 764
462 if (attached) { 765 chg_type = max77693_muic_get_cable_type(info,
463 /* Store previous charger type to control 766 MAX77693_CABLE_GROUP_CHG, &attached);
464 when charger accessory will be detached */
465 info->prev_chg_type = curr_chg_type;
466 chg_type = curr_chg_type;
467 } else
468 chg_type = info->prev_chg_type;
469 767
470 dev_info(info->dev, 768 dev_info(info->dev,
471 "external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n", 769 "external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
472 attached ? "attached" : "detached", 770 attached ? "attached" : "detached",
473 curr_chg_type, info->prev_chg_type); 771 chg_type, info->prev_chg_type);
474 772
475 switch (chg_type) { 773 switch (chg_type) {
476 case MAX77693_CHARGER_TYPE_USB: 774 case MAX77693_CHARGER_TYPE_USB:
477 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached); 775 case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
478 if (ret < 0) 776 case MAX77693_CHARGER_TYPE_NONE:
479 goto out; 777 /* Check MAX77693_CABLE_GROUP_ADC_GND type */
480 extcon_set_cable_state(info->edev, "USB", attached); 778 cable_type_gnd = max77693_muic_get_cable_type(info,
779 MAX77693_CABLE_GROUP_ADC_GND,
780 &cable_attached);
781 switch (cable_type_gnd) {
782 case MAX77693_MUIC_GND_MHL:
783 case MAX77693_MUIC_GND_MHL_VB:
784 /*
785 * MHL cable with MHL_TA(USB/TA) cable
786 * - MHL cable include two port(HDMI line and separate micro-
787 * usb port. When the target connect MHL cable, extcon driver
788 * check whether MHL_TA(USB/TA) cable is connected. If MHL_TA
789 * cable is connected, extcon driver notify state to notifiee
790 * for charging battery.
791 *
792 * Features of 'MHL_TA(USB/TA) with MHL cable'
793 * - Support MHL
794 * - Support charging through micro-usb port without data connection
795 */
796 extcon_set_cable_state(info->edev, "MHL_TA", attached);
797 if (!cable_attached)
798 extcon_set_cable_state(info->edev, "MHL", cable_attached);
799 break;
800 }
801
802 /* Check MAX77693_CABLE_GROUP_ADC type */
803 cable_type = max77693_muic_get_cable_type(info,
804 MAX77693_CABLE_GROUP_ADC,
805 &cable_attached);
806 switch (cable_type) {
807 case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
808 /*
809 * Dock-Audio device with USB/TA cable
810 * - Dock device include two port(Dock-Audio and micro-usb
811 * port). When the target connect Dock-Audio device, extcon
812 * driver check whether USB/TA cable is connected. If USB/TA
813 * cable is connected, extcon driver notify state to notifiee
814 * for charging battery.
815 *
816 * Features of 'USB/TA cable with Dock-Audio device'
817 * - Support external output feature of audio.
818 * - Support charging through micro-usb port without data
819 * connection.
820 */
821 extcon_set_cable_state(info->edev, "USB", attached);
822
823 if (!cable_attached)
824 extcon_set_cable_state(info->edev, "Dock-Audio", cable_attached);
825 break;
826 case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
827 /*
828 * Dock-Smart device with USB/TA cable
829 * - Dock-Desk device include three type of cable which
830 * are HDMI, USB for mouse/keyboard and micro-usb port
831 * for USB/TA cable. Dock-Smart device need always exteranl
832 * power supply(USB/TA cable through micro-usb cable). Dock-
833 * Smart device support screen output of target to separate
834 * monitor and mouse/keyboard for desktop mode.
835 *
836 * Features of 'USB/TA cable with Dock-Smart device'
837 * - Support MHL
838 * - Support external output feature of audio
839 * - Support charging through micro-usb port without data
840 * connection if TA cable is connected to target.
841 * - Support charging and data connection through micro-usb port
842 * if USB cable is connected between target and host
843 * device.
844 * - Support OTG device (Mouse/Keyboard)
845 */
846 ret = max77693_muic_set_path(info, info->path_usb, attached);
847 if (ret < 0)
848 return ret;
849
850 extcon_set_cable_state(info->edev, "Dock-Smart", attached);
851 extcon_set_cable_state(info->edev, "MHL", attached);
852
853 break;
854 }
855
856 /* Check MAX77693_CABLE_GROUP_CHG type */
857 switch (chg_type) {
858 case MAX77693_CHARGER_TYPE_NONE:
859 /*
860 * When MHL(with USB/TA cable) or Dock-Audio with USB/TA cable
861 * is attached, muic device happen below two interrupt.
862 * - 'MAX77693_MUIC_IRQ_INT1_ADC' for detecting MHL/Dock-Audio.
863 * - 'MAX77693_MUIC_IRQ_INT2_CHGTYP' for detecting USB/TA cable
864 * connected to MHL or Dock-Audio.
865 * Always, happen eariler MAX77693_MUIC_IRQ_INT1_ADC interrupt
866 * than MAX77693_MUIC_IRQ_INT2_CHGTYP interrupt.
867 *
868 * If user attach MHL (with USB/TA cable and immediately detach
869 * MHL with USB/TA cable before MAX77693_MUIC_IRQ_INT2_CHGTYP
870 * interrupt is happened, USB/TA cable remain connected state to
871 * target. But USB/TA cable isn't connected to target. The user
872 * be face with unusual action. So, driver should check this
873 * situation in spite of, that previous charger type is N/A.
874 */
875 break;
876 case MAX77693_CHARGER_TYPE_USB:
877 /* Only USB cable, PATH:AP_USB */
878 ret = max77693_muic_set_path(info, info->path_usb, attached);
879 if (ret < 0)
880 return ret;
881
882 extcon_set_cable_state(info->edev, "USB", attached);
883 break;
884 case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
885 /* Only TA cable */
886 extcon_set_cable_state(info->edev, "TA", attached);
887 break;
888 }
481 break; 889 break;
482 case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT: 890 case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
483 extcon_set_cable_state(info->edev, 891 extcon_set_cable_state(info->edev,
484 "Charge-downstream", attached); 892 "Charge-downstream", attached);
485 break; 893 break;
486 case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
487 extcon_set_cable_state(info->edev, "TA", attached);
488 break;
489 case MAX77693_CHARGER_TYPE_APPLE_500MA: 894 case MAX77693_CHARGER_TYPE_APPLE_500MA:
490 extcon_set_cable_state(info->edev, "Slow-charger", attached); 895 extcon_set_cable_state(info->edev, "Slow-charger", attached);
491 break; 896 break;
@@ -498,22 +903,18 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info,
498 dev_err(info->dev, 903 dev_err(info->dev,
499 "failed to detect %s accessory (chg_type:0x%x)\n", 904 "failed to detect %s accessory (chg_type:0x%x)\n",
500 attached ? "attached" : "detached", chg_type); 905 attached ? "attached" : "detached", chg_type);
501 ret = -EINVAL; 906 return -EINVAL;
502 goto out;
503 } 907 }
504 908
505out: 909 return 0;
506 return ret;
507} 910}
508 911
509static void max77693_muic_irq_work(struct work_struct *work) 912static void max77693_muic_irq_work(struct work_struct *work)
510{ 913{
511 struct max77693_muic_info *info = container_of(work, 914 struct max77693_muic_info *info = container_of(work,
512 struct max77693_muic_info, irq_work); 915 struct max77693_muic_info, irq_work);
513 int curr_adc, curr_chg_type;
514 int irq_type = -1; 916 int irq_type = -1;
515 int i, ret = 0; 917 int i, ret = 0;
516 bool attached = true;
517 918
518 if (!info->edev) 919 if (!info->edev)
519 return; 920 return;
@@ -539,14 +940,7 @@ static void max77693_muic_irq_work(struct work_struct *work)
539 case MAX77693_MUIC_IRQ_INT1_ADC1K: 940 case MAX77693_MUIC_IRQ_INT1_ADC1K:
540 /* Handle all of accessory except for 941 /* Handle all of accessory except for
541 type of charger accessory */ 942 type of charger accessory */
542 curr_adc = info->status[0] & STATUS1_ADC_MASK; 943 ret = max77693_muic_adc_handler(info);
543 curr_adc >>= STATUS1_ADC_SHIFT;
544
545 /* Check accessory state which is either detached or attached */
546 if (curr_adc == MAX77693_MUIC_ADC_OPEN)
547 attached = false;
548
549 ret = max77693_muic_adc_handler(info, curr_adc, attached);
550 break; 944 break;
551 case MAX77693_MUIC_IRQ_INT2_CHGTYP: 945 case MAX77693_MUIC_IRQ_INT2_CHGTYP:
552 case MAX77693_MUIC_IRQ_INT2_CHGDETREUN: 946 case MAX77693_MUIC_IRQ_INT2_CHGDETREUN:
@@ -555,15 +949,7 @@ static void max77693_muic_irq_work(struct work_struct *work)
555 case MAX77693_MUIC_IRQ_INT2_VBVOLT: 949 case MAX77693_MUIC_IRQ_INT2_VBVOLT:
556 case MAX77693_MUIC_IRQ_INT2_VIDRM: 950 case MAX77693_MUIC_IRQ_INT2_VIDRM:
557 /* Handle charger accessory */ 951 /* Handle charger accessory */
558 curr_chg_type = info->status[1] & STATUS2_CHGTYP_MASK; 952 ret = max77693_muic_chg_handler(info);
559 curr_chg_type >>= STATUS2_CHGTYP_SHIFT;
560
561 /* Check charger accessory state which
562 is either detached or attached */
563 if (curr_chg_type == MAX77693_CHARGER_TYPE_NONE)
564 attached = false;
565
566 ret = max77693_muic_chg_handler(info, curr_chg_type, attached);
567 break; 953 break;
568 case MAX77693_MUIC_IRQ_INT3_EOC: 954 case MAX77693_MUIC_IRQ_INT3_EOC:
569 case MAX77693_MUIC_IRQ_INT3_CGMBC: 955 case MAX77693_MUIC_IRQ_INT3_CGMBC:
@@ -575,7 +961,8 @@ static void max77693_muic_irq_work(struct work_struct *work)
575 default: 961 default:
576 dev_err(info->dev, "muic interrupt: irq %d occurred\n", 962 dev_err(info->dev, "muic interrupt: irq %d occurred\n",
577 irq_type); 963 irq_type);
578 break; 964 mutex_unlock(&info->mutex);
965 return;
579 } 966 }
580 967
581 if (ret < 0) 968 if (ret < 0)
@@ -604,7 +991,9 @@ static struct regmap_config max77693_muic_regmap_config = {
604static int max77693_muic_detect_accessory(struct max77693_muic_info *info) 991static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
605{ 992{
606 int ret = 0; 993 int ret = 0;
607 int adc, chg_type; 994 int adc;
995 int chg_type;
996 bool attached;
608 997
609 mutex_lock(&info->mutex); 998 mutex_lock(&info->mutex);
610 999
@@ -617,35 +1006,39 @@ static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
617 return -EINVAL; 1006 return -EINVAL;
618 } 1007 }
619 1008
620 adc = info->status[0] & STATUS1_ADC_MASK; 1009 adc = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_ADC,
621 adc >>= STATUS1_ADC_SHIFT; 1010 &attached);
622 1011 if (attached && adc != MAX77693_MUIC_ADC_OPEN) {
623 if (adc != MAX77693_MUIC_ADC_OPEN) { 1012 ret = max77693_muic_adc_handler(info);
624 dev_info(info->dev, 1013 if (ret < 0) {
625 "external connector is attached (adc:0x%02x)\n", adc); 1014 dev_err(info->dev, "Cannot detect accessory\n");
1015 mutex_unlock(&info->mutex);
1016 return ret;
1017 }
1018 }
626 1019
627 ret = max77693_muic_adc_handler(info, adc, true); 1020 chg_type = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_CHG,
628 if (ret < 0) 1021 &attached);
629 dev_err(info->dev, "failed to detect accessory\n"); 1022 if (attached && chg_type != MAX77693_CHARGER_TYPE_NONE) {
630 goto out; 1023 ret = max77693_muic_chg_handler(info);
1024 if (ret < 0) {
1025 dev_err(info->dev, "Cannot detect charger accessory\n");
1026 mutex_unlock(&info->mutex);
1027 return ret;
1028 }
631 } 1029 }
632 1030
633 chg_type = info->status[1] & STATUS2_CHGTYP_MASK; 1031 mutex_unlock(&info->mutex);
634 chg_type >>= STATUS2_CHGTYP_SHIFT;
635 1032
636 if (chg_type != MAX77693_CHARGER_TYPE_NONE) { 1033 return 0;
637 dev_info(info->dev, 1034}
638 "external connector is attached (chg_type:0x%x)\n",
639 chg_type);
640 1035
641 max77693_muic_chg_handler(info, chg_type, true); 1036static void max77693_muic_detect_cable_wq(struct work_struct *work)
642 if (ret < 0) 1037{
643 dev_err(info->dev, "failed to detect charger accessory\n"); 1038 struct max77693_muic_info *info = container_of(to_delayed_work(work),
644 } 1039 struct max77693_muic_info, wq_detcable);
645 1040
646out: 1041 max77693_muic_detect_accessory(info);
647 mutex_unlock(&info->mutex);
648 return ret;
649} 1042}
650 1043
651static int max77693_muic_probe(struct platform_device *pdev) 1044static int max77693_muic_probe(struct platform_device *pdev)
@@ -654,7 +1047,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
654 struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev); 1047 struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
655 struct max77693_muic_platform_data *muic_pdata = pdata->muic_data; 1048 struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
656 struct max77693_muic_info *info; 1049 struct max77693_muic_info *info;
657 int ret, i; 1050 int delay_jiffies;
1051 int ret;
1052 int i;
658 u8 id; 1053 u8 id;
659 1054
660 info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info), 1055 info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
@@ -678,6 +1073,32 @@ static int max77693_muic_probe(struct platform_device *pdev)
678 return ret; 1073 return ret;
679 } 1074 }
680 } 1075 }
1076
1077 /* Register input device for button of dock device */
1078 info->dock = devm_input_allocate_device(&pdev->dev);
1079 if (!info->dock) {
1080 dev_err(&pdev->dev, "%s: failed to allocate input\n", __func__);
1081 return -ENOMEM;
1082 }
1083 info->dock->name = "max77693-muic/dock";
1084 info->dock->phys = "max77693-muic/extcon";
1085 info->dock->dev.parent = &pdev->dev;
1086
1087 __set_bit(EV_REP, info->dock->evbit);
1088
1089 input_set_capability(info->dock, EV_KEY, KEY_VOLUMEUP);
1090 input_set_capability(info->dock, EV_KEY, KEY_VOLUMEDOWN);
1091 input_set_capability(info->dock, EV_KEY, KEY_PLAYPAUSE);
1092 input_set_capability(info->dock, EV_KEY, KEY_PREVIOUSSONG);
1093 input_set_capability(info->dock, EV_KEY, KEY_NEXTSONG);
1094
1095 ret = input_register_device(info->dock);
1096 if (ret < 0) {
1097 dev_err(&pdev->dev, "Cannot register input device error(%d)\n",
1098 ret);
1099 return ret;
1100 }
1101
681 platform_set_drvdata(pdev, info); 1102 platform_set_drvdata(pdev, info);
682 mutex_init(&info->mutex); 1103 mutex_init(&info->mutex);
683 1104
@@ -697,13 +1118,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
697 1118
698 ret = request_threaded_irq(virq, NULL, 1119 ret = request_threaded_irq(virq, NULL,
699 max77693_muic_irq_handler, 1120 max77693_muic_irq_handler,
700 IRQF_ONESHOT, muic_irq->name, info); 1121 IRQF_NO_SUSPEND,
1122 muic_irq->name, info);
701 if (ret) { 1123 if (ret) {
702 dev_err(&pdev->dev, 1124 dev_err(&pdev->dev,
703 "failed: irq request (IRQ: %d," 1125 "failed: irq request (IRQ: %d,"
704 " error :%d)\n", 1126 " error :%d)\n",
705 muic_irq->irq, ret); 1127 muic_irq->irq, ret);
706
707 goto err_irq; 1128 goto err_irq;
708 } 1129 }
709 } 1130 }
@@ -749,23 +1170,54 @@ static int max77693_muic_probe(struct platform_device *pdev)
749 = muic_pdata->init_data[i].data; 1170 = muic_pdata->init_data[i].data;
750 } 1171 }
751 1172
1173 /*
1174 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
1175 * h/w path of COMP2/COMN1 on CONTROL1 register.
1176 */
1177 if (muic_pdata->path_uart)
1178 info->path_uart = muic_pdata->path_uart;
1179 else
1180 info->path_uart = CONTROL1_SW_UART;
1181
1182 if (muic_pdata->path_usb)
1183 info->path_usb = muic_pdata->path_usb;
1184 else
1185 info->path_usb = CONTROL1_SW_USB;
1186
1187 /* Set initial path for UART */
1188 max77693_muic_set_path(info, info->path_uart, true);
1189
752 /* Check revision number of MUIC device*/ 1190 /* Check revision number of MUIC device*/
753 ret = max77693_read_reg(info->max77693->regmap_muic, 1191 ret = max77693_read_reg(info->max77693->regmap_muic,
754 MAX77693_MUIC_REG_ID, &id); 1192 MAX77693_MUIC_REG_ID, &id);
755 if (ret < 0) { 1193 if (ret < 0) {
756 dev_err(&pdev->dev, "failed to read revision number\n"); 1194 dev_err(&pdev->dev, "failed to read revision number\n");
757 goto err_irq; 1195 goto err_extcon;
758 } 1196 }
759 dev_info(info->dev, "device ID : 0x%x\n", id); 1197 dev_info(info->dev, "device ID : 0x%x\n", id);
760 1198
761 /* Set ADC debounce time */ 1199 /* Set ADC debounce time */
762 max77693_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS); 1200 max77693_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
763 1201
764 /* Detect accessory on boot */ 1202 /*
765 max77693_muic_detect_accessory(info); 1203 * Detect accessory after completing the initialization of platform
1204 *
1205 * - Use delayed workqueue to detect cable state and then
1206 * notify cable state to notifiee/platform through uevent.
1207 * After completing the booting of platform, the extcon provider
1208 * driver should notify cable state to upper layer.
1209 */
1210 INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
1211 if (muic_pdata->detcable_delay_ms)
1212 delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
1213 else
1214 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
1215 schedule_delayed_work(&info->wq_detcable, delay_jiffies);
766 1216
767 return ret; 1217 return ret;
768 1218
1219err_extcon:
1220 extcon_dev_unregister(info->edev);
769err_irq: 1221err_irq:
770 while (--i >= 0) 1222 while (--i >= 0)
771 free_irq(muic_irqs[i].virq, info); 1223 free_irq(muic_irqs[i].virq, info);
@@ -780,6 +1232,7 @@ static int max77693_muic_remove(struct platform_device *pdev)
780 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) 1232 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
781 free_irq(muic_irqs[i].virq, info); 1233 free_irq(muic_irqs[i].virq, info);
782 cancel_work_sync(&info->irq_work); 1234 cancel_work_sync(&info->irq_work);
1235 input_unregister_device(info->dock);
783 extcon_dev_unregister(info->edev); 1236 extcon_dev_unregister(info->edev);
784 1237
785 return 0; 1238 return 0;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 93009fe6ef05..e636d950ad6c 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -29,51 +29,14 @@
29#include <linux/irqdomain.h> 29#include <linux/irqdomain.h>
30 30
31#define DEV_NAME "max8997-muic" 31#define DEV_NAME "max8997-muic"
32#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
32 33
33/* MAX8997-MUIC STATUS1 register */ 34enum max8997_muic_adc_debounce_time {
34#define STATUS1_ADC_SHIFT 0 35 ADC_DEBOUNCE_TIME_0_5MS = 0, /* 0.5ms */
35#define STATUS1_ADCLOW_SHIFT 5 36 ADC_DEBOUNCE_TIME_10MS, /* 10ms */
36#define STATUS1_ADCERR_SHIFT 6 37 ADC_DEBOUNCE_TIME_25MS, /* 25ms */
37#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) 38 ADC_DEBOUNCE_TIME_38_62MS, /* 38.62ms */
38#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT) 39};
39#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
40
41/* MAX8997-MUIC STATUS2 register */
42#define STATUS2_CHGTYP_SHIFT 0
43#define STATUS2_CHGDETRUN_SHIFT 3
44#define STATUS2_DCDTMR_SHIFT 4
45#define STATUS2_DBCHG_SHIFT 5
46#define STATUS2_VBVOLT_SHIFT 6
47#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
48#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
49#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
50#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
51#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
52
53/* MAX8997-MUIC STATUS3 register */
54#define STATUS3_OVP_SHIFT 2
55#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
56
57/* MAX8997-MUIC CONTROL1 register */
58#define COMN1SW_SHIFT 0
59#define COMP2SW_SHIFT 3
60#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
61#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
62#define SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
63
64#define MAX8997_SW_USB ((1 << COMP2SW_SHIFT) | (1 << COMN1SW_SHIFT))
65#define MAX8997_SW_AUDIO ((2 << COMP2SW_SHIFT) | (2 << COMN1SW_SHIFT))
66#define MAX8997_SW_UART ((3 << COMP2SW_SHIFT) | (3 << COMN1SW_SHIFT))
67#define MAX8997_SW_OPEN ((0 << COMP2SW_SHIFT) | (0 << COMN1SW_SHIFT))
68
69#define MAX8997_ADC_GROUND 0x00
70#define MAX8997_ADC_MHL 0x01
71#define MAX8997_ADC_JIG_USB_1 0x18
72#define MAX8997_ADC_JIG_USB_2 0x19
73#define MAX8997_ADC_DESKDOCK 0x1a
74#define MAX8997_ADC_JIG_UART 0x1c
75#define MAX8997_ADC_CARDOCK 0x1d
76#define MAX8997_ADC_OPEN 0x1f
77 40
78struct max8997_muic_irq { 41struct max8997_muic_irq {
79 unsigned int irq; 42 unsigned int irq;
@@ -82,61 +45,303 @@ struct max8997_muic_irq {
82}; 45};
83 46
84static struct max8997_muic_irq muic_irqs[] = { 47static struct max8997_muic_irq muic_irqs[] = {
85 { MAX8997_MUICIRQ_ADCError, "muic-ADC_error" }, 48 { MAX8997_MUICIRQ_ADCError, "muic-ADCERROR" },
86 { MAX8997_MUICIRQ_ADCLow, "muic-ADC_low" }, 49 { MAX8997_MUICIRQ_ADCLow, "muic-ADCLOW" },
87 { MAX8997_MUICIRQ_ADC, "muic-ADC" }, 50 { MAX8997_MUICIRQ_ADC, "muic-ADC" },
88 { MAX8997_MUICIRQ_VBVolt, "muic-VB_voltage" }, 51 { MAX8997_MUICIRQ_VBVolt, "muic-VBVOLT" },
89 { MAX8997_MUICIRQ_DBChg, "muic-DB_charger" }, 52 { MAX8997_MUICIRQ_DBChg, "muic-DBCHG" },
90 { MAX8997_MUICIRQ_DCDTmr, "muic-DCD_timer" }, 53 { MAX8997_MUICIRQ_DCDTmr, "muic-DCDTMR" },
91 { MAX8997_MUICIRQ_ChgDetRun, "muic-CDR_status" }, 54 { MAX8997_MUICIRQ_ChgDetRun, "muic-CHGDETRUN" },
92 { MAX8997_MUICIRQ_ChgTyp, "muic-charger_type" }, 55 { MAX8997_MUICIRQ_ChgTyp, "muic-CHGTYP" },
93 { MAX8997_MUICIRQ_OVP, "muic-over_voltage" }, 56 { MAX8997_MUICIRQ_OVP, "muic-OVP" },
57};
58
59/* Define supported cable type */
60enum max8997_muic_acc_type {
61 MAX8997_MUIC_ADC_GROUND = 0x0,
62 MAX8997_MUIC_ADC_MHL, /* MHL*/
63 MAX8997_MUIC_ADC_REMOTE_S1_BUTTON,
64 MAX8997_MUIC_ADC_REMOTE_S2_BUTTON,
65 MAX8997_MUIC_ADC_REMOTE_S3_BUTTON,
66 MAX8997_MUIC_ADC_REMOTE_S4_BUTTON,
67 MAX8997_MUIC_ADC_REMOTE_S5_BUTTON,
68 MAX8997_MUIC_ADC_REMOTE_S6_BUTTON,
69 MAX8997_MUIC_ADC_REMOTE_S7_BUTTON,
70 MAX8997_MUIC_ADC_REMOTE_S8_BUTTON,
71 MAX8997_MUIC_ADC_REMOTE_S9_BUTTON,
72 MAX8997_MUIC_ADC_REMOTE_S10_BUTTON,
73 MAX8997_MUIC_ADC_REMOTE_S11_BUTTON,
74 MAX8997_MUIC_ADC_REMOTE_S12_BUTTON,
75 MAX8997_MUIC_ADC_RESERVED_ACC_1,
76 MAX8997_MUIC_ADC_RESERVED_ACC_2,
77 MAX8997_MUIC_ADC_RESERVED_ACC_3,
78 MAX8997_MUIC_ADC_RESERVED_ACC_4,
79 MAX8997_MUIC_ADC_RESERVED_ACC_5,
80 MAX8997_MUIC_ADC_CEA936_AUDIO,
81 MAX8997_MUIC_ADC_PHONE_POWERED_DEV,
82 MAX8997_MUIC_ADC_TTY_CONVERTER,
83 MAX8997_MUIC_ADC_UART_CABLE,
84 MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG,
85 MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF, /* JIG-USB-OFF */
86 MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON, /* JIG-USB-ON */
87 MAX8997_MUIC_ADC_AV_CABLE_NOLOAD, /* DESKDOCK */
88 MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG,
89 MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF, /* JIG-UART */
90 MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON, /* CARDOCK */
91 MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE,
92 MAX8997_MUIC_ADC_OPEN, /* OPEN */
93};
94
95enum max8997_muic_cable_group {
96 MAX8997_CABLE_GROUP_ADC = 0,
97 MAX8997_CABLE_GROUP_ADC_GND,
98 MAX8997_CABLE_GROUP_CHG,
99 MAX8997_CABLE_GROUP_VBVOLT,
100};
101
102enum max8997_muic_usb_type {
103 MAX8997_USB_HOST,
104 MAX8997_USB_DEVICE,
105};
106
107enum max8997_muic_charger_type {
108 MAX8997_CHARGER_TYPE_NONE = 0,
109 MAX8997_CHARGER_TYPE_USB,
110 MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT,
111 MAX8997_CHARGER_TYPE_DEDICATED_CHG,
112 MAX8997_CHARGER_TYPE_500MA,
113 MAX8997_CHARGER_TYPE_1A,
114 MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7,
94}; 115};
95 116
96struct max8997_muic_info { 117struct max8997_muic_info {
97 struct device *dev; 118 struct device *dev;
98 struct i2c_client *muic; 119 struct i2c_client *muic;
99 struct max8997_muic_platform_data *muic_pdata; 120 struct extcon_dev *edev;
121 int prev_cable_type;
122 int prev_chg_type;
123 u8 status[2];
100 124
101 int irq; 125 int irq;
102 struct work_struct irq_work; 126 struct work_struct irq_work;
127 struct mutex mutex;
103 128
129 struct max8997_muic_platform_data *muic_pdata;
104 enum max8997_muic_charger_type pre_charger_type; 130 enum max8997_muic_charger_type pre_charger_type;
105 int pre_adc;
106 131
107 struct mutex mutex; 132 /*
133 * Use delayed workqueue to detect cable state and then
134 * notify cable state to notifiee/platform through uevent.
135 * After completing the booting of platform, the extcon provider
136 * driver should notify cable state to upper layer.
137 */
138 struct delayed_work wq_detcable;
139
140 /*
141 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
142 * h/w path of COMP2/COMN1 on CONTROL1 register.
143 */
144 int path_usb;
145 int path_uart;
146};
108 147
109 struct extcon_dev *edev; 148enum {
149 EXTCON_CABLE_USB = 0,
150 EXTCON_CABLE_USB_HOST,
151 EXTCON_CABLE_TA,
152 EXTCON_CABLE_FAST_CHARGER,
153 EXTCON_CABLE_SLOW_CHARGER,
154 EXTCON_CABLE_CHARGE_DOWNSTREAM,
155 EXTCON_CABLE_MHL,
156 EXTCON_CABLE_DOCK_DESK,
157 EXTCON_CABLE_DOCK_CARD,
158 EXTCON_CABLE_JIG,
159
160 _EXTCON_CABLE_NUM,
110}; 161};
111 162
112const char *max8997_extcon_cable[] = { 163static const char *max8997_extcon_cable[] = {
113 [0] = "USB", 164 [EXTCON_CABLE_USB] = "USB",
114 [1] = "USB-Host", 165 [EXTCON_CABLE_USB_HOST] = "USB-Host",
115 [2] = "TA", 166 [EXTCON_CABLE_TA] = "TA",
116 [3] = "Fast-charger", 167 [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
117 [4] = "Slow-charger", 168 [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
118 [5] = "Charge-downstream", 169 [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
119 [6] = "MHL", 170 [EXTCON_CABLE_MHL] = "MHL",
120 [7] = "Dock-desk", 171 [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
121 [8] = "Dock-card", 172 [EXTCON_CABLE_DOCK_CARD] = "Dock-Card",
122 [9] = "JIG", 173 [EXTCON_CABLE_JIG] = "JIG",
123 174
124 NULL, 175 NULL,
125}; 176};
126 177
178/*
179 * max8997_muic_set_debounce_time - Set the debounce time of ADC
180 * @info: the instance including private data of max8997 MUIC
181 * @time: the debounce time of ADC
182 */
183static int max8997_muic_set_debounce_time(struct max8997_muic_info *info,
184 enum max8997_muic_adc_debounce_time time)
185{
186 int ret;
187
188 switch (time) {
189 case ADC_DEBOUNCE_TIME_0_5MS:
190 case ADC_DEBOUNCE_TIME_10MS:
191 case ADC_DEBOUNCE_TIME_25MS:
192 case ADC_DEBOUNCE_TIME_38_62MS:
193 ret = max8997_update_reg(info->muic,
194 MAX8997_MUIC_REG_CONTROL3,
195 time << CONTROL3_ADCDBSET_SHIFT,
196 CONTROL3_ADCDBSET_MASK);
197 if (ret) {
198 dev_err(info->dev, "failed to set ADC debounce time\n");
199 return -EAGAIN;
200 }
201 break;
202 default:
203 dev_err(info->dev, "invalid ADC debounce time\n");
204 return -EINVAL;
205 }
206
207 return 0;
208};
209
210/*
211 * max8997_muic_set_path - Set hardware line according to attached cable
212 * @info: the instance including private data of max8997 MUIC
213 * @value: the path according to attached cable
214 * @attached: the state of cable (true:attached, false:detached)
215 *
216 * The max8997 MUIC device share outside H/W line among a varity of cables,
217 * so this function set internal path of H/W line according to the type of
218 * attached cable.
219 */
220static int max8997_muic_set_path(struct max8997_muic_info *info,
221 u8 val, bool attached)
222{
223 int ret = 0;
224 u8 ctrl1, ctrl2 = 0;
225
226 if (attached)
227 ctrl1 = val;
228 else
229 ctrl1 = CONTROL1_SW_OPEN;
230
231 ret = max8997_update_reg(info->muic,
232 MAX8997_MUIC_REG_CONTROL1, ctrl1, COMP_SW_MASK);
233 if (ret < 0) {
234 dev_err(info->dev, "failed to update MUIC register\n");
235 return -EAGAIN;
236 }
237
238 if (attached)
239 ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
240 else
241 ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
242
243 ret = max8997_update_reg(info->muic,
244 MAX8997_MUIC_REG_CONTROL2, ctrl2,
245 CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
246 if (ret < 0) {
247 dev_err(info->dev, "failed to update MUIC register\n");
248 return -EAGAIN;
249 }
250
251 dev_info(info->dev,
252 "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
253 ctrl1, ctrl2, attached ? "attached" : "detached");
254
255 return 0;
256}
257
258/*
259 * max8997_muic_get_cable_type - Return cable type and check cable state
260 * @info: the instance including private data of max8997 MUIC
261 * @group: the path according to attached cable
262 * @attached: store cable state and return
263 *
264 * This function check the cable state either attached or detached,
265 * and then divide precise type of cable according to cable group.
266 * - MAX8997_CABLE_GROUP_ADC
267 * - MAX8997_CABLE_GROUP_CHG
268 */
269static int max8997_muic_get_cable_type(struct max8997_muic_info *info,
270 enum max8997_muic_cable_group group, bool *attached)
271{
272 int cable_type = 0;
273 int adc;
274 int chg_type;
275
276 switch (group) {
277 case MAX8997_CABLE_GROUP_ADC:
278 /*
279 * Read ADC value to check cable type and decide cable state
280 * according to cable type
281 */
282 adc = info->status[0] & STATUS1_ADC_MASK;
283 adc >>= STATUS1_ADC_SHIFT;
284
285 /*
286 * Check current cable state/cable type and store cable type
287 * (info->prev_cable_type) for handling cable when cable is
288 * detached.
289 */
290 if (adc == MAX8997_MUIC_ADC_OPEN) {
291 *attached = false;
292
293 cable_type = info->prev_cable_type;
294 info->prev_cable_type = MAX8997_MUIC_ADC_OPEN;
295 } else {
296 *attached = true;
297
298 cable_type = info->prev_cable_type = adc;
299 }
300 break;
301 case MAX8997_CABLE_GROUP_CHG:
302 /*
303 * Read charger type to check cable type and decide cable state
304 * according to type of charger cable.
305 */
306 chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
307 chg_type >>= STATUS2_CHGTYP_SHIFT;
308
309 if (chg_type == MAX8997_CHARGER_TYPE_NONE) {
310 *attached = false;
311
312 cable_type = info->prev_chg_type;
313 info->prev_chg_type = MAX8997_CHARGER_TYPE_NONE;
314 } else {
315 *attached = true;
316
317 /*
318 * Check current cable state/cable type and store cable
319 * type(info->prev_chg_type) for handling cable when
320 * charger cable is detached.
321 */
322 cable_type = info->prev_chg_type = chg_type;
323 }
324
325 break;
326 default:
327 dev_err(info->dev, "Unknown cable group (%d)\n", group);
328 cable_type = -EINVAL;
329 break;
330 }
331
332 return cable_type;
333}
334
127static int max8997_muic_handle_usb(struct max8997_muic_info *info, 335static int max8997_muic_handle_usb(struct max8997_muic_info *info,
128 enum max8997_muic_usb_type usb_type, bool attached) 336 enum max8997_muic_usb_type usb_type, bool attached)
129{ 337{
130 int ret = 0; 338 int ret = 0;
131 339
132 if (usb_type == MAX8997_USB_HOST) { 340 if (usb_type == MAX8997_USB_HOST) {
133 /* switch to USB */ 341 ret = max8997_muic_set_path(info, info->path_usb, attached);
134 ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1, 342 if (ret < 0) {
135 attached ? MAX8997_SW_USB : MAX8997_SW_OPEN,
136 SW_MASK);
137 if (ret) {
138 dev_err(info->dev, "failed to update muic register\n"); 343 dev_err(info->dev, "failed to update muic register\n");
139 goto out; 344 return ret;
140 } 345 }
141 } 346 }
142 347
@@ -148,41 +353,39 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
148 extcon_set_cable_state(info->edev, "USB", attached); 353 extcon_set_cable_state(info->edev, "USB", attached);
149 break; 354 break;
150 default: 355 default:
151 ret = -EINVAL; 356 dev_err(info->dev, "failed to detect %s usb cable\n",
152 break; 357 attached ? "attached" : "detached");
358 return -EINVAL;
153 } 359 }
154 360
155out: 361 return 0;
156 return ret;
157} 362}
158 363
159static int max8997_muic_handle_dock(struct max8997_muic_info *info, 364static int max8997_muic_handle_dock(struct max8997_muic_info *info,
160 int adc, bool attached) 365 int cable_type, bool attached)
161{ 366{
162 int ret = 0; 367 int ret = 0;
163 368
164 /* switch to AUDIO */ 369 ret = max8997_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
165 ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
166 attached ? MAX8997_SW_AUDIO : MAX8997_SW_OPEN,
167 SW_MASK);
168 if (ret) { 370 if (ret) {
169 dev_err(info->dev, "failed to update muic register\n"); 371 dev_err(info->dev, "failed to update muic register\n");
170 goto out; 372 return ret;
171 } 373 }
172 374
173 switch (adc) { 375 switch (cable_type) {
174 case MAX8997_ADC_DESKDOCK: 376 case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
175 extcon_set_cable_state(info->edev, "Dock-desk", attached); 377 extcon_set_cable_state(info->edev, "Dock-desk", attached);
176 break; 378 break;
177 case MAX8997_ADC_CARDOCK: 379 case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
178 extcon_set_cable_state(info->edev, "Dock-card", attached); 380 extcon_set_cable_state(info->edev, "Dock-card", attached);
179 break; 381 break;
180 default: 382 default:
181 ret = -EINVAL; 383 dev_err(info->dev, "failed to detect %s dock device\n",
182 break; 384 attached ? "attached" : "detached");
385 return -EINVAL;
183 } 386 }
184out: 387
185 return ret; 388 return 0;
186} 389}
187 390
188static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info, 391static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
@@ -191,199 +394,188 @@ static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
191 int ret = 0; 394 int ret = 0;
192 395
193 /* switch to UART */ 396 /* switch to UART */
194 ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1, 397 ret = max8997_muic_set_path(info, info->path_uart, attached);
195 attached ? MAX8997_SW_UART : MAX8997_SW_OPEN,
196 SW_MASK);
197 if (ret) { 398 if (ret) {
198 dev_err(info->dev, "failed to update muic register\n"); 399 dev_err(info->dev, "failed to update muic register\n");
199 goto out; 400 return -EINVAL;
200 } 401 }
201 402
202 extcon_set_cable_state(info->edev, "JIG", attached); 403 extcon_set_cable_state(info->edev, "JIG", attached);
203out:
204 return ret;
205}
206
207static int max8997_muic_handle_adc_detach(struct max8997_muic_info *info)
208{
209 int ret = 0;
210 404
211 switch (info->pre_adc) { 405 return 0;
212 case MAX8997_ADC_GROUND:
213 ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, false);
214 break;
215 case MAX8997_ADC_MHL:
216 extcon_set_cable_state(info->edev, "MHL", false);
217 break;
218 case MAX8997_ADC_JIG_USB_1:
219 case MAX8997_ADC_JIG_USB_2:
220 ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, false);
221 break;
222 case MAX8997_ADC_DESKDOCK:
223 case MAX8997_ADC_CARDOCK:
224 ret = max8997_muic_handle_dock(info, info->pre_adc, false);
225 break;
226 case MAX8997_ADC_JIG_UART:
227 ret = max8997_muic_handle_jig_uart(info, false);
228 break;
229 default:
230 break;
231 }
232
233 return ret;
234} 406}
235 407
236static int max8997_muic_handle_adc(struct max8997_muic_info *info, int adc) 408static int max8997_muic_adc_handler(struct max8997_muic_info *info)
237{ 409{
410 int cable_type;
411 bool attached;
238 int ret = 0; 412 int ret = 0;
239 413
240 switch (adc) { 414 /* Check cable state which is either detached or attached */
241 case MAX8997_ADC_GROUND: 415 cable_type = max8997_muic_get_cable_type(info,
242 ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, true); 416 MAX8997_CABLE_GROUP_ADC, &attached);
243 break; 417
244 case MAX8997_ADC_MHL: 418 switch (cable_type) {
245 extcon_set_cable_state(info->edev, "MHL", true); 419 case MAX8997_MUIC_ADC_GROUND:
246 break; 420 ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, attached);
247 case MAX8997_ADC_JIG_USB_1: 421 if (ret < 0)
248 case MAX8997_ADC_JIG_USB_2: 422 return ret;
249 ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, true); 423 break;
250 break; 424 case MAX8997_MUIC_ADC_MHL:
251 case MAX8997_ADC_DESKDOCK: 425 extcon_set_cable_state(info->edev, "MHL", attached);
252 case MAX8997_ADC_CARDOCK: 426 break;
253 ret = max8997_muic_handle_dock(info, adc, true); 427 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
254 break; 428 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
255 case MAX8997_ADC_JIG_UART: 429 ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, attached);
256 ret = max8997_muic_handle_jig_uart(info, true); 430 if (ret < 0)
257 break; 431 return ret;
258 case MAX8997_ADC_OPEN: 432 break;
259 ret = max8997_muic_handle_adc_detach(info); 433 case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
260 break; 434 case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
261 default: 435 ret = max8997_muic_handle_dock(info, cable_type, attached);
262 ret = -EINVAL; 436 if (ret < 0)
263 goto out; 437 return ret;
264 } 438 break;
265 439 case MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF:
266 info->pre_adc = adc; 440 ret = max8997_muic_handle_jig_uart(info, attached);
267out: 441 break;
268 return ret; 442 case MAX8997_MUIC_ADC_REMOTE_S1_BUTTON:
269} 443 case MAX8997_MUIC_ADC_REMOTE_S2_BUTTON:
270 444 case MAX8997_MUIC_ADC_REMOTE_S3_BUTTON:
271static int max8997_muic_handle_charger_type_detach( 445 case MAX8997_MUIC_ADC_REMOTE_S4_BUTTON:
272 struct max8997_muic_info *info) 446 case MAX8997_MUIC_ADC_REMOTE_S5_BUTTON:
273{ 447 case MAX8997_MUIC_ADC_REMOTE_S6_BUTTON:
274 switch (info->pre_charger_type) { 448 case MAX8997_MUIC_ADC_REMOTE_S7_BUTTON:
275 case MAX8997_CHARGER_TYPE_USB: 449 case MAX8997_MUIC_ADC_REMOTE_S8_BUTTON:
276 extcon_set_cable_state(info->edev, "USB", false); 450 case MAX8997_MUIC_ADC_REMOTE_S9_BUTTON:
277 break; 451 case MAX8997_MUIC_ADC_REMOTE_S10_BUTTON:
278 case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT: 452 case MAX8997_MUIC_ADC_REMOTE_S11_BUTTON:
279 extcon_set_cable_state(info->edev, "Charge-downstream", false); 453 case MAX8997_MUIC_ADC_REMOTE_S12_BUTTON:
280 break; 454 case MAX8997_MUIC_ADC_RESERVED_ACC_1:
281 case MAX8997_CHARGER_TYPE_DEDICATED_CHG: 455 case MAX8997_MUIC_ADC_RESERVED_ACC_2:
282 extcon_set_cable_state(info->edev, "TA", false); 456 case MAX8997_MUIC_ADC_RESERVED_ACC_3:
283 break; 457 case MAX8997_MUIC_ADC_RESERVED_ACC_4:
284 case MAX8997_CHARGER_TYPE_500MA: 458 case MAX8997_MUIC_ADC_RESERVED_ACC_5:
285 extcon_set_cable_state(info->edev, "Slow-charger", false); 459 case MAX8997_MUIC_ADC_CEA936_AUDIO:
286 break; 460 case MAX8997_MUIC_ADC_PHONE_POWERED_DEV:
287 case MAX8997_CHARGER_TYPE_1A: 461 case MAX8997_MUIC_ADC_TTY_CONVERTER:
288 extcon_set_cable_state(info->edev, "Fast-charger", false); 462 case MAX8997_MUIC_ADC_UART_CABLE:
289 break; 463 case MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG:
464 case MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG:
465 case MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE:
466 /*
467 * This cable isn't used in general case if it is specially
468 * needed to detect additional cable, should implement
469 * proper operation when this cable is attached/detached.
470 */
471 dev_info(info->dev,
472 "cable is %s but it isn't used (type:0x%x)\n",
473 attached ? "attached" : "detached", cable_type);
474 return -EAGAIN;
290 default: 475 default:
476 dev_err(info->dev,
477 "failed to detect %s unknown cable (type:0x%x)\n",
478 attached ? "attached" : "detached", cable_type);
291 return -EINVAL; 479 return -EINVAL;
292 break;
293 } 480 }
294 481
295 return 0; 482 return 0;
296} 483}
297 484
298static int max8997_muic_handle_charger_type(struct max8997_muic_info *info, 485static int max8997_muic_chg_handler(struct max8997_muic_info *info)
299 enum max8997_muic_charger_type charger_type)
300{ 486{
301 u8 adc; 487 int chg_type;
302 int ret; 488 bool attached;
489 int adc;
303 490
304 ret = max8997_read_reg(info->muic, MAX8997_MUIC_REG_STATUS1, &adc); 491 chg_type = max8997_muic_get_cable_type(info,
305 if (ret) { 492 MAX8997_CABLE_GROUP_CHG, &attached);
306 dev_err(info->dev, "failed to read muic register\n");
307 goto out;
308 }
309 493
310 switch (charger_type) { 494 switch (chg_type) {
311 case MAX8997_CHARGER_TYPE_NONE: 495 case MAX8997_CHARGER_TYPE_NONE:
312 ret = max8997_muic_handle_charger_type_detach(info);
313 break; 496 break;
314 case MAX8997_CHARGER_TYPE_USB: 497 case MAX8997_CHARGER_TYPE_USB:
315 if ((adc & STATUS1_ADC_MASK) == MAX8997_ADC_OPEN) { 498 adc = info->status[0] & STATUS1_ADC_MASK;
499 adc >>= STATUS1_ADC_SHIFT;
500
501 if ((adc & STATUS1_ADC_MASK) == MAX8997_MUIC_ADC_OPEN) {
316 max8997_muic_handle_usb(info, 502 max8997_muic_handle_usb(info,
317 MAX8997_USB_DEVICE, true); 503 MAX8997_USB_DEVICE, attached);
318 } 504 }
319 break; 505 break;
320 case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT: 506 case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
321 extcon_set_cable_state(info->edev, "Charge-downstream", true); 507 extcon_set_cable_state(info->edev, "Charge-downstream", attached);
322 break; 508 break;
323 case MAX8997_CHARGER_TYPE_DEDICATED_CHG: 509 case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
324 extcon_set_cable_state(info->edev, "TA", true); 510 extcon_set_cable_state(info->edev, "TA", attached);
325 break; 511 break;
326 case MAX8997_CHARGER_TYPE_500MA: 512 case MAX8997_CHARGER_TYPE_500MA:
327 extcon_set_cable_state(info->edev, "Slow-charger", true); 513 extcon_set_cable_state(info->edev, "Slow-charger", attached);
328 break; 514 break;
329 case MAX8997_CHARGER_TYPE_1A: 515 case MAX8997_CHARGER_TYPE_1A:
330 extcon_set_cable_state(info->edev, "Fast-charger", true); 516 extcon_set_cable_state(info->edev, "Fast-charger", attached);
331 break; 517 break;
332 default: 518 default:
333 ret = -EINVAL; 519 dev_err(info->dev,
334 goto out; 520 "failed to detect %s unknown chg cable (type:0x%x)\n",
521 attached ? "attached" : "detached", chg_type);
522 return -EINVAL;
335 } 523 }
336 524
337 info->pre_charger_type = charger_type; 525 return 0;
338out:
339 return ret;
340} 526}
341 527
342static void max8997_muic_irq_work(struct work_struct *work) 528static void max8997_muic_irq_work(struct work_struct *work)
343{ 529{
344 struct max8997_muic_info *info = container_of(work, 530 struct max8997_muic_info *info = container_of(work,
345 struct max8997_muic_info, irq_work); 531 struct max8997_muic_info, irq_work);
346 u8 status[2];
347 u8 adc, chg_type;
348 int irq_type = 0; 532 int irq_type = 0;
349 int i, ret; 533 int i, ret;
350 534
535 if (!info->edev)
536 return;
537
351 mutex_lock(&info->mutex); 538 mutex_lock(&info->mutex);
352 539
540 for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
541 if (info->irq == muic_irqs[i].virq)
542 irq_type = muic_irqs[i].irq;
543
353 ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1, 544 ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
354 2, status); 545 2, info->status);
355 if (ret) { 546 if (ret) {
356 dev_err(info->dev, "failed to read muic register\n"); 547 dev_err(info->dev, "failed to read muic register\n");
357 mutex_unlock(&info->mutex); 548 mutex_unlock(&info->mutex);
358 return; 549 return;
359 } 550 }
360 551
361 dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
362 status[0], status[1]);
363
364 for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
365 if (info->irq == muic_irqs[i].virq)
366 irq_type = muic_irqs[i].irq;
367
368 switch (irq_type) { 552 switch (irq_type) {
553 case MAX8997_MUICIRQ_ADCError:
554 case MAX8997_MUICIRQ_ADCLow:
369 case MAX8997_MUICIRQ_ADC: 555 case MAX8997_MUICIRQ_ADC:
370 adc = status[0] & STATUS1_ADC_MASK; 556 /* Handle all of cable except for charger cable */
371 adc >>= STATUS1_ADC_SHIFT; 557 ret = max8997_muic_adc_handler(info);
372
373 max8997_muic_handle_adc(info, adc);
374 break; 558 break;
559 case MAX8997_MUICIRQ_VBVolt:
560 case MAX8997_MUICIRQ_DBChg:
561 case MAX8997_MUICIRQ_DCDTmr:
562 case MAX8997_MUICIRQ_ChgDetRun:
375 case MAX8997_MUICIRQ_ChgTyp: 563 case MAX8997_MUICIRQ_ChgTyp:
376 chg_type = status[1] & STATUS2_CHGTYP_MASK; 564 /* Handle charger cable */
377 chg_type >>= STATUS2_CHGTYP_SHIFT; 565 ret = max8997_muic_chg_handler(info);
378 566 break;
379 max8997_muic_handle_charger_type(info, chg_type); 567 case MAX8997_MUICIRQ_OVP:
380 break; 568 break;
381 default: 569 default:
382 dev_info(info->dev, "misc interrupt: irq %d occurred\n", 570 dev_info(info->dev, "misc interrupt: irq %d occurred\n",
383 irq_type); 571 irq_type);
384 break; 572 mutex_unlock(&info->mutex);
573 return;
385 } 574 }
386 575
576 if (ret < 0)
577 dev_err(info->dev, "failed to handle MUIC interrupt\n");
578
387 mutex_unlock(&info->mutex); 579 mutex_unlock(&info->mutex);
388 580
389 return; 581 return;
@@ -401,29 +593,60 @@ static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
401 return IRQ_HANDLED; 593 return IRQ_HANDLED;
402} 594}
403 595
404static void max8997_muic_detect_dev(struct max8997_muic_info *info) 596static int max8997_muic_detect_dev(struct max8997_muic_info *info)
405{ 597{
406 int ret; 598 int ret = 0;
407 u8 status[2], adc, chg_type; 599 int adc;
600 int chg_type;
601 bool attached;
408 602
409 ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1, 603 mutex_lock(&info->mutex);
410 2, status); 604
605 /* Read STATUSx register to detect accessory */
606 ret = max8997_bulk_read(info->muic,
607 MAX8997_MUIC_REG_STATUS1, 2, info->status);
411 if (ret) { 608 if (ret) {
412 dev_err(info->dev, "failed to read muic register\n"); 609 dev_err(info->dev, "failed to read MUIC register\n");
413 return; 610 mutex_unlock(&info->mutex);
611 return -EINVAL;
414 } 612 }
415 613
416 dev_info(info->dev, "STATUS1:0x%x, STATUS2:0x%x\n", 614 adc = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_ADC,
417 status[0], status[1]); 615 &attached);
616 if (attached && adc != MAX8997_MUIC_ADC_OPEN) {
617 ret = max8997_muic_adc_handler(info);
618 if (ret < 0) {
619 dev_err(info->dev, "Cannot detect ADC cable\n");
620 mutex_unlock(&info->mutex);
621 return ret;
622 }
623 }
418 624
419 adc = status[0] & STATUS1_ADC_MASK; 625 chg_type = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_CHG,
420 adc >>= STATUS1_ADC_SHIFT; 626 &attached);
627 if (attached && chg_type != MAX8997_CHARGER_TYPE_NONE) {
628 ret = max8997_muic_chg_handler(info);
629 if (ret < 0) {
630 dev_err(info->dev, "Cannot detect charger cable\n");
631 mutex_unlock(&info->mutex);
632 return ret;
633 }
634 }
635
636 mutex_unlock(&info->mutex);
637
638 return 0;
639}
421 640
422 chg_type = status[1] & STATUS2_CHGTYP_MASK; 641static void max8997_muic_detect_cable_wq(struct work_struct *work)
423 chg_type >>= STATUS2_CHGTYP_SHIFT; 642{
643 struct max8997_muic_info *info = container_of(to_delayed_work(work),
644 struct max8997_muic_info, wq_detcable);
645 int ret;
424 646
425 max8997_muic_handle_adc(info, adc); 647 ret = max8997_muic_detect_dev(info);
426 max8997_muic_handle_charger_type(info, chg_type); 648 if (ret < 0)
649 pr_err("failed to detect cable type\n");
427} 650}
428 651
429static int max8997_muic_probe(struct platform_device *pdev) 652static int max8997_muic_probe(struct platform_device *pdev)
@@ -431,6 +654,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
431 struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent); 654 struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
432 struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev); 655 struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev);
433 struct max8997_muic_info *info; 656 struct max8997_muic_info *info;
657 int delay_jiffies;
434 int ret, i; 658 int ret, i;
435 659
436 info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info), 660 info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
@@ -459,8 +683,10 @@ static int max8997_muic_probe(struct platform_device *pdev)
459 } 683 }
460 muic_irq->virq = virq; 684 muic_irq->virq = virq;
461 685
462 ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler, 686 ret = request_threaded_irq(virq, NULL,
463 0, muic_irq->name, info); 687 max8997_muic_irq_handler,
688 IRQF_NO_SUSPEND,
689 muic_irq->name, info);
464 if (ret) { 690 if (ret) {
465 dev_err(&pdev->dev, 691 dev_err(&pdev->dev,
466 "failed: irq request (IRQ: %d," 692 "failed: irq request (IRQ: %d,"
@@ -496,10 +722,42 @@ static int max8997_muic_probe(struct platform_device *pdev)
496 } 722 }
497 } 723 }
498 724
499 /* Initial device detection */ 725 /*
500 max8997_muic_detect_dev(info); 726 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
727 * h/w path of COMP2/COMN1 on CONTROL1 register.
728 */
729 if (pdata->muic_pdata->path_uart)
730 info->path_uart = pdata->muic_pdata->path_uart;
731 else
732 info->path_uart = CONTROL1_SW_UART;
733
734 if (pdata->muic_pdata->path_usb)
735 info->path_usb = pdata->muic_pdata->path_usb;
736 else
737 info->path_usb = CONTROL1_SW_USB;
738
739 /* Set initial path for UART */
740 max8997_muic_set_path(info, info->path_uart, true);
741
742 /* Set ADC debounce time */
743 max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
744
745 /*
746 * Detect accessory after completing the initialization of platform
747 *
748 * - Use delayed workqueue to detect cable state and then
749 * notify cable state to notifiee/platform through uevent.
750 * After completing the booting of platform, the extcon provider
751 * driver should notify cable state to upper layer.
752 */
753 INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
754 if (pdata->muic_pdata->detcable_delay_ms)
755 delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
756 else
757 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
758 schedule_delayed_work(&info->wq_detcable, delay_jiffies);
501 759
502 return ret; 760 return 0;
503 761
504err_irq: 762err_irq:
505 while (--i >= 0) 763 while (--i >= 0)
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 3d62781b8993..aa3fec0d9dc6 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -568,8 +568,7 @@ static int mousevsc_remove(struct hv_device *dev)
568 568
569static const struct hv_vmbus_device_id id_table[] = { 569static const struct hv_vmbus_device_id id_table[] = {
570 /* Mouse guid */ 570 /* Mouse guid */
571 { VMBUS_DEVICE(0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c, 571 { HV_MOUSE_GUID, },
572 0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A) },
573 { }, 572 { },
574}; 573};
575 574
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 773a2f25a8f0..0b122f8c7005 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -55,7 +55,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
55 [channel->monitor_grp].pending); 55 [channel->monitor_grp].pending);
56 56
57 } else { 57 } else {
58 vmbus_set_event(channel->offermsg.child_relid); 58 vmbus_set_event(channel);
59 } 59 }
60} 60}
61 61
@@ -181,7 +181,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
181 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; 181 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
182 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >> 182 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
183 PAGE_SHIFT; 183 PAGE_SHIFT;
184 open_msg->server_contextarea_gpadlhandle = 0; 184 open_msg->target_vp = newchannel->target_vp;
185 185
186 if (userdatalen > MAX_USER_DEFINED_BYTES) { 186 if (userdatalen > MAX_USER_DEFINED_BYTES) {
187 err = -EINVAL; 187 err = -EINVAL;
@@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
564 struct scatterlist bufferlist[3]; 564 struct scatterlist bufferlist[3];
565 u64 aligned_data = 0; 565 u64 aligned_data = 0;
566 int ret; 566 int ret;
567 bool signal = false;
567 568
568 569
569 /* Setup the descriptor */ 570 /* Setup the descriptor */
@@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
580 sg_set_buf(&bufferlist[2], &aligned_data, 581 sg_set_buf(&bufferlist[2], &aligned_data,
581 packetlen_aligned - packetlen); 582 packetlen_aligned - packetlen);
582 583
583 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3); 584 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
584 585
585 if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound)) 586 if (ret == 0 && signal)
586 vmbus_setevent(channel); 587 vmbus_setevent(channel);
587 588
588 return ret; 589 return ret;
@@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
606 u32 packetlen_aligned; 607 u32 packetlen_aligned;
607 struct scatterlist bufferlist[3]; 608 struct scatterlist bufferlist[3];
608 u64 aligned_data = 0; 609 u64 aligned_data = 0;
610 bool signal = false;
609 611
610 if (pagecount > MAX_PAGE_BUFFER_COUNT) 612 if (pagecount > MAX_PAGE_BUFFER_COUNT)
611 return -EINVAL; 613 return -EINVAL;
@@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
641 sg_set_buf(&bufferlist[2], &aligned_data, 643 sg_set_buf(&bufferlist[2], &aligned_data,
642 packetlen_aligned - packetlen); 644 packetlen_aligned - packetlen);
643 645
644 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3); 646 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
645 647
646 if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound)) 648 if (ret == 0 && signal)
647 vmbus_setevent(channel); 649 vmbus_setevent(channel);
648 650
649 return ret; 651 return ret;
@@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
665 u32 packetlen_aligned; 667 u32 packetlen_aligned;
666 struct scatterlist bufferlist[3]; 668 struct scatterlist bufferlist[3];
667 u64 aligned_data = 0; 669 u64 aligned_data = 0;
670 bool signal = false;
668 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 671 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
669 multi_pagebuffer->len); 672 multi_pagebuffer->len);
670 673
@@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
703 sg_set_buf(&bufferlist[2], &aligned_data, 706 sg_set_buf(&bufferlist[2], &aligned_data,
704 packetlen_aligned - packetlen); 707 packetlen_aligned - packetlen);
705 708
706 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3); 709 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
707 710
708 if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound)) 711 if (ret == 0 && signal)
709 vmbus_setevent(channel); 712 vmbus_setevent(channel);
710 713
711 return ret; 714 return ret;
@@ -732,6 +735,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
732 u32 packetlen; 735 u32 packetlen;
733 u32 userlen; 736 u32 userlen;
734 int ret; 737 int ret;
738 bool signal = false;
735 739
736 *buffer_actual_len = 0; 740 *buffer_actual_len = 0;
737 *requestid = 0; 741 *requestid = 0;
@@ -758,8 +762,10 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
758 762
759 /* Copy over the packet to the user buffer */ 763 /* Copy over the packet to the user buffer */
760 ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen, 764 ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
761 (desc.offset8 << 3)); 765 (desc.offset8 << 3), &signal);
762 766
767 if (signal)
768 vmbus_setevent(channel);
763 769
764 return 0; 770 return 0;
765} 771}
@@ -774,8 +780,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
774{ 780{
775 struct vmpacket_descriptor desc; 781 struct vmpacket_descriptor desc;
776 u32 packetlen; 782 u32 packetlen;
777 u32 userlen;
778 int ret; 783 int ret;
784 bool signal = false;
779 785
780 *buffer_actual_len = 0; 786 *buffer_actual_len = 0;
781 *requestid = 0; 787 *requestid = 0;
@@ -788,7 +794,6 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
788 794
789 795
790 packetlen = desc.len8 << 3; 796 packetlen = desc.len8 << 3;
791 userlen = packetlen - (desc.offset8 << 3);
792 797
793 *buffer_actual_len = packetlen; 798 *buffer_actual_len = packetlen;
794 799
@@ -802,7 +807,11 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
802 *requestid = desc.trans_id; 807 *requestid = desc.trans_id;
803 808
804 /* Copy over the entire packet to the user buffer */ 809 /* Copy over the entire packet to the user buffer */
805 ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0); 810 ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
811 &signal);
812
813 if (signal)
814 vmbus_setevent(channel);
806 815
807 return 0; 816 return 0;
808} 817}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f84c5cff8d4..53a8600162a5 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -257,6 +257,70 @@ static void vmbus_process_offer(struct work_struct *work)
257 } 257 }
258} 258}
259 259
260enum {
261 IDE = 0,
262 SCSI,
263 NIC,
264 MAX_PERF_CHN,
265};
266
267/*
268 * This is an array of device_ids (device types) that are performance critical.
269 * We attempt to distribute the interrupt load for these devices across
270 * all available CPUs.
271 */
272static const struct hv_vmbus_device_id hp_devs[] = {
273 /* IDE */
274 { HV_IDE_GUID, },
275 /* Storage - SCSI */
276 { HV_SCSI_GUID, },
277 /* Network */
278 { HV_NIC_GUID, },
279};
280
281
282/*
283 * We use this state to statically distribute the channel interrupt load.
284 */
285static u32 next_vp;
286
287/*
288 * Starting with Win8, we can statically distribute the incoming
289 * channel interrupt load by binding a channel to VCPU. We
290 * implement here a simple round robin scheme for distributing
291 * the interrupt load.
292 * We will bind channels that are not performance critical to cpu 0 and
293 * performance critical channels (IDE, SCSI and Network) will be uniformly
294 * distributed across all available CPUs.
295 */
296static u32 get_vp_index(uuid_le *type_guid)
297{
298 u32 cur_cpu;
299 int i;
300 bool perf_chn = false;
301 u32 max_cpus = num_online_cpus();
302
303 for (i = IDE; i < MAX_PERF_CHN; i++) {
304 if (!memcmp(type_guid->b, hp_devs[i].guid,
305 sizeof(uuid_le))) {
306 perf_chn = true;
307 break;
308 }
309 }
310 if ((vmbus_proto_version == VERSION_WS2008) ||
311 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
312 /*
313 * Prior to win8, all channel interrupts are
314 * delivered on cpu 0.
315 * Also if the channel is not a performance critical
316 * channel, bind it to cpu 0.
317 */
318 return 0;
319 }
320 cur_cpu = (++next_vp % max_cpus);
321 return 0;
322}
323
260/* 324/*
261 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition. 325 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
262 * 326 *
@@ -275,6 +339,35 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
275 return; 339 return;
276 } 340 }
277 341
342 /*
343 * By default we setup state to enable batched
344 * reading. A specific service can choose to
345 * disable this prior to opening the channel.
346 */
347 newchannel->batched_reading = true;
348
349 /*
350 * Setup state for signalling the host.
351 */
352 newchannel->sig_event = (struct hv_input_signal_event *)
353 (ALIGN((unsigned long)
354 &newchannel->sig_buf,
355 HV_HYPERCALL_PARAM_ALIGN));
356
357 newchannel->sig_event->connectionid.asu32 = 0;
358 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
359 newchannel->sig_event->flag_number = 0;
360 newchannel->sig_event->rsvdz = 0;
361
362 if (vmbus_proto_version != VERSION_WS2008) {
363 newchannel->is_dedicated_interrupt =
364 (offer->is_dedicated_interrupt != 0);
365 newchannel->sig_event->connectionid.u.id =
366 offer->connection_id;
367 }
368
369 newchannel->target_vp = get_vp_index(&offer->offer.if_type);
370
278 memcpy(&newchannel->offermsg, offer, 371 memcpy(&newchannel->offermsg, offer,
279 sizeof(struct vmbus_channel_offer_channel)); 372 sizeof(struct vmbus_channel_offer_channel));
280 newchannel->monitor_grp = (u8)offer->monitorid / 32; 373 newchannel->monitor_grp = (u8)offer->monitorid / 32;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 650c9f0b6642..253a74ba245c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -30,6 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
32#include <linux/hyperv.h> 32#include <linux/hyperv.h>
33#include <linux/export.h>
33#include <asm/hyperv.h> 34#include <asm/hyperv.h>
34#include "hyperv_vmbus.h" 35#include "hyperv_vmbus.h"
35 36
@@ -40,15 +41,99 @@ struct vmbus_connection vmbus_connection = {
40}; 41};
41 42
42/* 43/*
44 * Negotiated protocol version with the host.
45 */
46__u32 vmbus_proto_version;
47EXPORT_SYMBOL_GPL(vmbus_proto_version);
48
49static __u32 vmbus_get_next_version(__u32 current_version)
50{
51 switch (current_version) {
52 case (VERSION_WIN7):
53 return VERSION_WS2008;
54
55 case (VERSION_WIN8):
56 return VERSION_WIN7;
57
58 case (VERSION_WS2008):
59 default:
60 return VERSION_INVAL;
61 }
62}
63
64static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
65 __u32 version)
66{
67 int ret = 0;
68 struct vmbus_channel_initiate_contact *msg;
69 unsigned long flags;
70 int t;
71
72 init_completion(&msginfo->waitevent);
73
74 msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
75
76 msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
77 msg->vmbus_version_requested = version;
78 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
79 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
80 msg->monitor_page2 = virt_to_phys(
81 (void *)((unsigned long)vmbus_connection.monitor_pages +
82 PAGE_SIZE));
83
84 /*
85 * Add to list before we send the request since we may
86 * receive the response before returning from this routine
87 */
88 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
89 list_add_tail(&msginfo->msglistentry,
90 &vmbus_connection.chn_msg_list);
91
92 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
93
94 ret = vmbus_post_msg(msg,
95 sizeof(struct vmbus_channel_initiate_contact));
96 if (ret != 0) {
97 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
98 list_del(&msginfo->msglistentry);
99 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
100 flags);
101 return ret;
102 }
103
104 /* Wait for the connection response */
105 t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
106 if (t == 0) {
107 spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
108 flags);
109 list_del(&msginfo->msglistentry);
110 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
111 flags);
112 return -ETIMEDOUT;
113 }
114
115 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
116 list_del(&msginfo->msglistentry);
117 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
118
119 /* Check if successful */
120 if (msginfo->response.version_response.version_supported) {
121 vmbus_connection.conn_state = CONNECTED;
122 } else {
123 return -ECONNREFUSED;
124 }
125
126 return ret;
127}
128
129/*
43 * vmbus_connect - Sends a connect request on the partition service connection 130 * vmbus_connect - Sends a connect request on the partition service connection
44 */ 131 */
45int vmbus_connect(void) 132int vmbus_connect(void)
46{ 133{
47 int ret = 0; 134 int ret = 0;
48 int t;
49 struct vmbus_channel_msginfo *msginfo = NULL; 135 struct vmbus_channel_msginfo *msginfo = NULL;
50 struct vmbus_channel_initiate_contact *msg; 136 __u32 version;
51 unsigned long flags;
52 137
53 /* Initialize the vmbus connection */ 138 /* Initialize the vmbus connection */
54 vmbus_connection.conn_state = CONNECTING; 139 vmbus_connection.conn_state = CONNECTING;
@@ -99,69 +184,38 @@ int vmbus_connect(void)
99 goto cleanup; 184 goto cleanup;
100 } 185 }
101 186
102 init_completion(&msginfo->waitevent);
103
104 msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
105
106 msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
107 msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
108 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
109 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
110 msg->monitor_page2 = virt_to_phys(
111 (void *)((unsigned long)vmbus_connection.monitor_pages +
112 PAGE_SIZE));
113
114 /* 187 /*
115 * Add to list before we send the request since we may 188 * Negotiate a compatible VMBUS version number with the
116 * receive the response before returning from this routine 189 * host. We start with the highest number we can support
190 * and work our way down until we negotiate a compatible
191 * version.
117 */ 192 */
118 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
119 list_add_tail(&msginfo->msglistentry,
120 &vmbus_connection.chn_msg_list);
121 193
122 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 194 version = VERSION_CURRENT;
123 195
124 ret = vmbus_post_msg(msg, 196 do {
125 sizeof(struct vmbus_channel_initiate_contact)); 197 ret = vmbus_negotiate_version(msginfo, version);
126 if (ret != 0) { 198 if (ret == 0)
127 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 199 break;
128 list_del(&msginfo->msglistentry);
129 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
130 flags);
131 goto cleanup;
132 }
133 200
134 /* Wait for the connection response */ 201 version = vmbus_get_next_version(version);
135 t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); 202 } while (version != VERSION_INVAL);
136 if (t == 0) {
137 spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
138 flags);
139 list_del(&msginfo->msglistentry);
140 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
141 flags);
142 ret = -ETIMEDOUT;
143 goto cleanup;
144 }
145 203
146 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 204 if (version == VERSION_INVAL)
147 list_del(&msginfo->msglistentry);
148 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
149
150 /* Check if successful */
151 if (msginfo->response.version_response.version_supported) {
152 vmbus_connection.conn_state = CONNECTED;
153 } else {
154 pr_err("Unable to connect, "
155 "Version %d not supported by Hyper-V\n",
156 VMBUS_REVISION_NUMBER);
157 ret = -ECONNREFUSED;
158 goto cleanup; 205 goto cleanup;
159 } 206
207 vmbus_proto_version = version;
208 pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
209 host_info_eax, host_info_ebx >> 16,
210 host_info_ebx & 0xFFFF, host_info_ecx,
211 host_info_edx >> 24, host_info_edx & 0xFFFFFF,
212 version >> 16, version & 0xFFFF);
160 213
161 kfree(msginfo); 214 kfree(msginfo);
162 return 0; 215 return 0;
163 216
164cleanup: 217cleanup:
218 pr_err("Unable to connect to host\n");
165 vmbus_connection.conn_state = DISCONNECTED; 219 vmbus_connection.conn_state = DISCONNECTED;
166 220
167 if (vmbus_connection.work_queue) 221 if (vmbus_connection.work_queue)
@@ -212,6 +266,9 @@ static void process_chn_event(u32 relid)
212{ 266{
213 struct vmbus_channel *channel; 267 struct vmbus_channel *channel;
214 unsigned long flags; 268 unsigned long flags;
269 void *arg;
270 bool read_state;
271 u32 bytes_to_read;
215 272
216 /* 273 /*
217 * Find the channel based on this relid and invokes the 274 * Find the channel based on this relid and invokes the
@@ -234,10 +291,29 @@ static void process_chn_event(u32 relid)
234 */ 291 */
235 292
236 spin_lock_irqsave(&channel->inbound_lock, flags); 293 spin_lock_irqsave(&channel->inbound_lock, flags);
237 if (channel->onchannel_callback != NULL) 294 if (channel->onchannel_callback != NULL) {
238 channel->onchannel_callback(channel->channel_callback_context); 295 arg = channel->channel_callback_context;
239 else 296 read_state = channel->batched_reading;
297 /*
298 * This callback reads the messages sent by the host.
299 * We can optimize host to guest signaling by ensuring:
300 * 1. While reading the channel, we disable interrupts from
301 * host.
302 * 2. Ensure that we process all posted messages from the host
303 * before returning from this callback.
304 * 3. Once we return, enable signaling from the host. Once this
305 * state is set we check to see if additional packets are
306 * available to read. In this case we repeat the process.
307 */
308
309 do {
310 hv_begin_read(&channel->inbound);
311 channel->onchannel_callback(arg);
312 bytes_to_read = hv_end_read(&channel->inbound);
313 } while (read_state && (bytes_to_read != 0));
314 } else {
240 pr_err("no channel callback for relid - %u\n", relid); 315 pr_err("no channel callback for relid - %u\n", relid);
316 }
241 317
242 spin_unlock_irqrestore(&channel->inbound_lock, flags); 318 spin_unlock_irqrestore(&channel->inbound_lock, flags);
243} 319}
@@ -248,10 +324,32 @@ static void process_chn_event(u32 relid)
248void vmbus_on_event(unsigned long data) 324void vmbus_on_event(unsigned long data)
249{ 325{
250 u32 dword; 326 u32 dword;
251 u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5; 327 u32 maxdword;
252 int bit; 328 int bit;
253 u32 relid; 329 u32 relid;
254 u32 *recv_int_page = vmbus_connection.recv_int_page; 330 u32 *recv_int_page = NULL;
331 void *page_addr;
332 int cpu = smp_processor_id();
333 union hv_synic_event_flags *event;
334
335 if ((vmbus_proto_version == VERSION_WS2008) ||
336 (vmbus_proto_version == VERSION_WIN7)) {
337 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
338 recv_int_page = vmbus_connection.recv_int_page;
339 } else {
340 /*
341 * When the host is win8 and beyond, the event page
342 * can be directly checked to get the id of the channel
343 * that has the interrupt pending.
344 */
345 maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
346 page_addr = hv_context.synic_event_page[cpu];
347 event = (union hv_synic_event_flags *)page_addr +
348 VMBUS_MESSAGE_SINT;
349 recv_int_page = event->flags32;
350 }
351
352
255 353
256 /* Check events */ 354 /* Check events */
257 if (!recv_int_page) 355 if (!recv_int_page)
@@ -307,12 +405,16 @@ int vmbus_post_msg(void *buffer, size_t buflen)
307/* 405/*
308 * vmbus_set_event - Send an event notification to the parent 406 * vmbus_set_event - Send an event notification to the parent
309 */ 407 */
310int vmbus_set_event(u32 child_relid) 408int vmbus_set_event(struct vmbus_channel *channel)
311{ 409{
312 /* Each u32 represents 32 channels */ 410 u32 child_relid = channel->offermsg.child_relid;
313 sync_set_bit(child_relid & 31, 411
314 (unsigned long *)vmbus_connection.send_int_page + 412 if (!channel->is_dedicated_interrupt) {
315 (child_relid >> 5)); 413 /* Each u32 represents 32 channels */
414 sync_set_bit(child_relid & 31,
415 (unsigned long *)vmbus_connection.send_int_page +
416 (child_relid >> 5));
417 }
316 418
317 return hv_signal_event(); 419 return hv_signal_event(channel->sig_event);
318} 420}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 3648f8f0f368..1c5481da6e4a 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -27,6 +27,7 @@
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <linux/hyperv.h> 28#include <linux/hyperv.h>
29#include <linux/version.h> 29#include <linux/version.h>
30#include <linux/interrupt.h>
30#include <asm/hyperv.h> 31#include <asm/hyperv.h>
31#include "hyperv_vmbus.h" 32#include "hyperv_vmbus.h"
32 33
@@ -34,13 +35,16 @@
34struct hv_context hv_context = { 35struct hv_context hv_context = {
35 .synic_initialized = false, 36 .synic_initialized = false,
36 .hypercall_page = NULL, 37 .hypercall_page = NULL,
37 .signal_event_param = NULL,
38 .signal_event_buffer = NULL,
39}; 38};
40 39
41/* 40/*
42 * query_hypervisor_info - Get version info of the windows hypervisor 41 * query_hypervisor_info - Get version info of the windows hypervisor
43 */ 42 */
43unsigned int host_info_eax;
44unsigned int host_info_ebx;
45unsigned int host_info_ecx;
46unsigned int host_info_edx;
47
44static int query_hypervisor_info(void) 48static int query_hypervisor_info(void)
45{ 49{
46 unsigned int eax; 50 unsigned int eax;
@@ -70,13 +74,10 @@ static int query_hypervisor_info(void)
70 edx = 0; 74 edx = 0;
71 op = HVCPUID_VERSION; 75 op = HVCPUID_VERSION;
72 cpuid(op, &eax, &ebx, &ecx, &edx); 76 cpuid(op, &eax, &ebx, &ecx, &edx);
73 pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n", 77 host_info_eax = eax;
74 eax, 78 host_info_ebx = ebx;
75 ebx >> 16, 79 host_info_ecx = ecx;
76 ebx & 0xFFFF, 80 host_info_edx = edx;
77 ecx,
78 edx >> 24,
79 edx & 0xFFFFFF);
80 } 81 }
81 return max_leaf; 82 return max_leaf;
82} 83}
@@ -137,6 +138,10 @@ int hv_init(void)
137 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); 138 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
138 memset(hv_context.synic_message_page, 0, 139 memset(hv_context.synic_message_page, 0,
139 sizeof(void *) * NR_CPUS); 140 sizeof(void *) * NR_CPUS);
141 memset(hv_context.vp_index, 0,
142 sizeof(int) * NR_CPUS);
143 memset(hv_context.event_dpc, 0,
144 sizeof(void *) * NR_CPUS);
140 145
141 max_leaf = query_hypervisor_info(); 146 max_leaf = query_hypervisor_info();
142 147
@@ -168,24 +173,6 @@ int hv_init(void)
168 173
169 hv_context.hypercall_page = virtaddr; 174 hv_context.hypercall_page = virtaddr;
170 175
171 /* Setup the global signal event param for the signal event hypercall */
172 hv_context.signal_event_buffer =
173 kmalloc(sizeof(struct hv_input_signal_event_buffer),
174 GFP_KERNEL);
175 if (!hv_context.signal_event_buffer)
176 goto cleanup;
177
178 hv_context.signal_event_param =
179 (struct hv_input_signal_event *)
180 (ALIGN((unsigned long)
181 hv_context.signal_event_buffer,
182 HV_HYPERCALL_PARAM_ALIGN));
183 hv_context.signal_event_param->connectionid.asu32 = 0;
184 hv_context.signal_event_param->connectionid.u.id =
185 VMBUS_EVENT_CONNECTION_ID;
186 hv_context.signal_event_param->flag_number = 0;
187 hv_context.signal_event_param->rsvdz = 0;
188
189 return 0; 176 return 0;
190 177
191cleanup: 178cleanup:
@@ -213,10 +200,6 @@ void hv_cleanup(void)
213 /* Reset our OS id */ 200 /* Reset our OS id */
214 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); 201 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
215 202
216 kfree(hv_context.signal_event_buffer);
217 hv_context.signal_event_buffer = NULL;
218 hv_context.signal_event_param = NULL;
219
220 if (hv_context.hypercall_page) { 203 if (hv_context.hypercall_page) {
221 hypercall_msr.as_uint64 = 0; 204 hypercall_msr.as_uint64 = 0;
222 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 205 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -273,13 +256,12 @@ int hv_post_message(union hv_connection_id connection_id,
273 * 256 *
274 * This involves a hypercall. 257 * This involves a hypercall.
275 */ 258 */
276u16 hv_signal_event(void) 259u16 hv_signal_event(void *con_id)
277{ 260{
278 u16 status; 261 u16 status;
279 262
280 status = do_hypercall(HVCALL_SIGNAL_EVENT, 263 status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF);
281 hv_context.signal_event_param, 264
282 NULL) & 0xFFFF;
283 return status; 265 return status;
284} 266}
285 267
@@ -297,6 +279,7 @@ void hv_synic_init(void *irqarg)
297 union hv_synic_siefp siefp; 279 union hv_synic_siefp siefp;
298 union hv_synic_sint shared_sint; 280 union hv_synic_sint shared_sint;
299 union hv_synic_scontrol sctrl; 281 union hv_synic_scontrol sctrl;
282 u64 vp_index;
300 283
301 u32 irq_vector = *((u32 *)(irqarg)); 284 u32 irq_vector = *((u32 *)(irqarg));
302 int cpu = smp_processor_id(); 285 int cpu = smp_processor_id();
@@ -307,6 +290,15 @@ void hv_synic_init(void *irqarg)
307 /* Check the version */ 290 /* Check the version */
308 rdmsrl(HV_X64_MSR_SVERSION, version); 291 rdmsrl(HV_X64_MSR_SVERSION, version);
309 292
293 hv_context.event_dpc[cpu] = (struct tasklet_struct *)
294 kmalloc(sizeof(struct tasklet_struct),
295 GFP_ATOMIC);
296 if (hv_context.event_dpc[cpu] == NULL) {
297 pr_err("Unable to allocate event dpc\n");
298 goto cleanup;
299 }
300 tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
301
310 hv_context.synic_message_page[cpu] = 302 hv_context.synic_message_page[cpu] =
311 (void *)get_zeroed_page(GFP_ATOMIC); 303 (void *)get_zeroed_page(GFP_ATOMIC);
312 304
@@ -345,7 +337,7 @@ void hv_synic_init(void *irqarg)
345 shared_sint.as_uint64 = 0; 337 shared_sint.as_uint64 = 0;
346 shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */ 338 shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
347 shared_sint.masked = false; 339 shared_sint.masked = false;
348 shared_sint.auto_eoi = false; 340 shared_sint.auto_eoi = true;
349 341
350 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 342 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
351 343
@@ -356,6 +348,14 @@ void hv_synic_init(void *irqarg)
356 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); 348 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
357 349
358 hv_context.synic_initialized = true; 350 hv_context.synic_initialized = true;
351
352 /*
353 * Setup the mapping between Hyper-V's notion
354 * of cpuid and Linux' notion of cpuid.
355 * This array will be indexed using Linux cpuid.
356 */
357 rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
358 hv_context.vp_index[cpu] = (u32)vp_index;
359 return; 359 return;
360 360
361cleanup: 361cleanup:
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index dd289fd179ca..37873213e24f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -29,7 +29,6 @@
29#include <linux/memory_hotplug.h> 29#include <linux/memory_hotplug.h>
30#include <linux/memory.h> 30#include <linux/memory.h>
31#include <linux/notifier.h> 31#include <linux/notifier.h>
32#include <linux/mman.h>
33#include <linux/percpu_counter.h> 32#include <linux/percpu_counter.h>
34 33
35#include <linux/hyperv.h> 34#include <linux/hyperv.h>
@@ -415,10 +414,17 @@ struct dm_info_msg {
415 414
416static bool hot_add; 415static bool hot_add;
417static bool do_hot_add; 416static bool do_hot_add;
417/*
418 * Delay reporting memory pressure by
419 * the specified number of seconds.
420 */
421static uint pressure_report_delay = 30;
418 422
419module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); 423module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
420MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); 424MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
421 425
426module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
427MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
422static atomic_t trans_id = ATOMIC_INIT(0); 428static atomic_t trans_id = ATOMIC_INIT(0);
423 429
424static int dm_ring_size = (5 * PAGE_SIZE); 430static int dm_ring_size = (5 * PAGE_SIZE);
@@ -517,6 +523,34 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
517 } 523 }
518} 524}
519 525
526unsigned long compute_balloon_floor(void)
527{
528 unsigned long min_pages;
529#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
530 /* Simple continuous piecewiese linear function:
531 * max MiB -> min MiB gradient
532 * 0 0
533 * 16 16
534 * 32 24
535 * 128 72 (1/2)
536 * 512 168 (1/4)
537 * 2048 360 (1/8)
538 * 8192 552 (1/32)
539 * 32768 1320
540 * 131072 4392
541 */
542 if (totalram_pages < MB2PAGES(128))
543 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
544 else if (totalram_pages < MB2PAGES(512))
545 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
546 else if (totalram_pages < MB2PAGES(2048))
547 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
548 else
549 min_pages = MB2PAGES(296) + (totalram_pages >> 5);
550#undef MB2PAGES
551 return min_pages;
552}
553
520/* 554/*
521 * Post our status as it relates memory pressure to the 555 * Post our status as it relates memory pressure to the
522 * host. Host expects the guests to post this status 556 * host. Host expects the guests to post this status
@@ -530,15 +564,30 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
530static void post_status(struct hv_dynmem_device *dm) 564static void post_status(struct hv_dynmem_device *dm)
531{ 565{
532 struct dm_status status; 566 struct dm_status status;
567 struct sysinfo val;
533 568
534 569 if (pressure_report_delay > 0) {
570 --pressure_report_delay;
571 return;
572 }
573 si_meminfo(&val);
535 memset(&status, 0, sizeof(struct dm_status)); 574 memset(&status, 0, sizeof(struct dm_status));
536 status.hdr.type = DM_STATUS_REPORT; 575 status.hdr.type = DM_STATUS_REPORT;
537 status.hdr.size = sizeof(struct dm_status); 576 status.hdr.size = sizeof(struct dm_status);
538 status.hdr.trans_id = atomic_inc_return(&trans_id); 577 status.hdr.trans_id = atomic_inc_return(&trans_id);
539 578
540 579 /*
541 status.num_committed = vm_memory_committed(); 580 * The host expects the guest to report free memory.
581 * Further, the host expects the pressure information to
582 * include the ballooned out pages.
583 * For a given amount of memory that we are managing, we
584 * need to compute a floor below which we should not balloon.
585 * Compute this and add it to the pressure report.
586 */
587 status.num_avail = val.freeram;
588 status.num_committed = vm_memory_committed() +
589 dm->num_pages_ballooned +
590 compute_balloon_floor();
542 591
543 vmbus_sendpacket(dm->dev->channel, &status, 592 vmbus_sendpacket(dm->dev->channel, &status,
544 sizeof(struct dm_status), 593 sizeof(struct dm_status),
@@ -547,8 +596,6 @@ static void post_status(struct hv_dynmem_device *dm)
547 596
548} 597}
549 598
550
551
552static void free_balloon_pages(struct hv_dynmem_device *dm, 599static void free_balloon_pages(struct hv_dynmem_device *dm,
553 union dm_mem_page_range *range_array) 600 union dm_mem_page_range *range_array)
554{ 601{
@@ -1013,9 +1060,7 @@ static int balloon_remove(struct hv_device *dev)
1013static const struct hv_vmbus_device_id id_table[] = { 1060static const struct hv_vmbus_device_id id_table[] = {
1014 /* Dynamic Memory Class ID */ 1061 /* Dynamic Memory Class ID */
1015 /* 525074DC-8985-46e2-8057-A307DC18A502 */ 1062 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1016 { VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, 1063 { HV_DM_GUID, },
1017 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1018 },
1019 { }, 1064 { },
1020}; 1065};
1021 1066
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index a0667de7a04c..1d4cbd8e8261 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -49,6 +49,16 @@ static struct hv_util_service util_kvp = {
49 .util_deinit = hv_kvp_deinit, 49 .util_deinit = hv_kvp_deinit,
50}; 50};
51 51
52static void perform_shutdown(struct work_struct *dummy)
53{
54 orderly_poweroff(true);
55}
56
57/*
58 * Perform the shutdown operation in a thread context.
59 */
60static DECLARE_WORK(shutdown_work, perform_shutdown);
61
52static void shutdown_onchannelcallback(void *context) 62static void shutdown_onchannelcallback(void *context)
53{ 63{
54 struct vmbus_channel *channel = context; 64 struct vmbus_channel *channel = context;
@@ -106,7 +116,7 @@ static void shutdown_onchannelcallback(void *context)
106 } 116 }
107 117
108 if (execute_shutdown == true) 118 if (execute_shutdown == true)
109 orderly_poweroff(true); 119 schedule_work(&shutdown_work);
110} 120}
111 121
112/* 122/*
@@ -274,6 +284,16 @@ static int util_probe(struct hv_device *dev,
274 } 284 }
275 } 285 }
276 286
287 /*
288 * The set of services managed by the util driver are not performance
289 * critical and do not need batched reading. Furthermore, some services
290 * such as KVP can only handle one message from the host at a time.
291 * Turn off batched reading for all util drivers before we open the
292 * channel.
293 */
294
295 set_channel_read_state(dev->channel, false);
296
277 ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0, 297 ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
278 srv->util_cb, dev->channel); 298 srv->util_cb, dev->channel);
279 if (ret) 299 if (ret)
@@ -304,21 +324,21 @@ static int util_remove(struct hv_device *dev)
304 324
305static const struct hv_vmbus_device_id id_table[] = { 325static const struct hv_vmbus_device_id id_table[] = {
306 /* Shutdown guid */ 326 /* Shutdown guid */
307 { VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49, 327 { HV_SHUTDOWN_GUID,
308 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB) 328 .driver_data = (unsigned long)&util_shutdown
309 .driver_data = (unsigned long)&util_shutdown }, 329 },
310 /* Time synch guid */ 330 /* Time synch guid */
311 { VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 331 { HV_TS_GUID,
312 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) 332 .driver_data = (unsigned long)&util_timesynch
313 .driver_data = (unsigned long)&util_timesynch }, 333 },
314 /* Heartbeat guid */ 334 /* Heartbeat guid */
315 { VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 335 { HV_HEART_BEAT_GUID,
316 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) 336 .driver_data = (unsigned long)&util_heartbeat
317 .driver_data = (unsigned long)&util_heartbeat }, 337 },
318 /* KVP guid */ 338 /* KVP guid */
319 { VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, 339 { HV_KVP_GUID,
320 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6) 340 .driver_data = (unsigned long)&util_kvp
321 .driver_data = (unsigned long)&util_kvp }, 341 },
322 { }, 342 { },
323}; 343};
324 344
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d8d1fadb398a..12f2f9e989f7 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -101,15 +101,6 @@ enum hv_message_type {
101/* Define invalid partition identifier. */ 101/* Define invalid partition identifier. */
102#define HV_PARTITION_ID_INVALID ((u64)0x0) 102#define HV_PARTITION_ID_INVALID ((u64)0x0)
103 103
104/* Define connection identifier type. */
105union hv_connection_id {
106 u32 asu32;
107 struct {
108 u32 id:24;
109 u32 reserved:8;
110 } u;
111};
112
113/* Define port identifier type. */ 104/* Define port identifier type. */
114union hv_port_id { 105union hv_port_id {
115 u32 asu32; 106 u32 asu32;
@@ -338,13 +329,6 @@ struct hv_input_post_message {
338 u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; 329 u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
339}; 330};
340 331
341/* Definition of the hv_signal_event hypercall input structure. */
342struct hv_input_signal_event {
343 union hv_connection_id connectionid;
344 u16 flag_number;
345 u16 rsvdz;
346};
347
348/* 332/*
349 * Versioning definitions used for guests reporting themselves to the 333 * Versioning definitions used for guests reporting themselves to the
350 * hypervisor, and visa versa. 334 * hypervisor, and visa versa.
@@ -498,11 +482,6 @@ static const uuid_le VMBUS_SERVICE_ID = {
498 482
499 483
500 484
501struct hv_input_signal_event_buffer {
502 u64 align8;
503 struct hv_input_signal_event event;
504};
505
506struct hv_context { 485struct hv_context {
507 /* We only support running on top of Hyper-V 486 /* We only support running on top of Hyper-V
508 * So at this point this really can only contain the Hyper-V ID 487 * So at this point this really can only contain the Hyper-V ID
@@ -513,16 +492,24 @@ struct hv_context {
513 492
514 bool synic_initialized; 493 bool synic_initialized;
515 494
516 /*
517 * This is used as an input param to HvCallSignalEvent hypercall. The
518 * input param is immutable in our usage and must be dynamic mem (vs
519 * stack or global). */
520 struct hv_input_signal_event_buffer *signal_event_buffer;
521 /* 8-bytes aligned of the buffer above */
522 struct hv_input_signal_event *signal_event_param;
523
524 void *synic_message_page[NR_CPUS]; 495 void *synic_message_page[NR_CPUS];
525 void *synic_event_page[NR_CPUS]; 496 void *synic_event_page[NR_CPUS];
497 /*
498 * Hypervisor's notion of virtual processor ID is different from
499 * Linux' notion of CPU ID. This information can only be retrieved
500 * in the context of the calling CPU. Setup a map for easy access
501 * to this information:
502 *
503 * vp_index[a] is the Hyper-V's processor ID corresponding to
504 * Linux cpuid 'a'.
505 */
506 u32 vp_index[NR_CPUS];
507 /*
508 * Starting with win8, we can take channel interrupts on any CPU;
509 * we will manage the tasklet that handles events on a per CPU
510 * basis.
511 */
512 struct tasklet_struct *event_dpc[NR_CPUS];
526}; 513};
527 514
528extern struct hv_context hv_context; 515extern struct hv_context hv_context;
@@ -538,12 +525,19 @@ extern int hv_post_message(union hv_connection_id connection_id,
538 enum hv_message_type message_type, 525 enum hv_message_type message_type,
539 void *payload, size_t payload_size); 526 void *payload, size_t payload_size);
540 527
541extern u16 hv_signal_event(void); 528extern u16 hv_signal_event(void *con_id);
542 529
543extern void hv_synic_init(void *irqarg); 530extern void hv_synic_init(void *irqarg);
544 531
545extern void hv_synic_cleanup(void *arg); 532extern void hv_synic_cleanup(void *arg);
546 533
534/*
535 * Host version information.
536 */
537extern unsigned int host_info_eax;
538extern unsigned int host_info_ebx;
539extern unsigned int host_info_ecx;
540extern unsigned int host_info_edx;
547 541
548/* Interface */ 542/* Interface */
549 543
@@ -555,7 +549,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
555 549
556int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, 550int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
557 struct scatterlist *sglist, 551 struct scatterlist *sglist,
558 u32 sgcount); 552 u32 sgcount, bool *signal);
559 553
560int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer, 554int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
561 u32 buflen); 555 u32 buflen);
@@ -563,13 +557,16 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
563int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info, 557int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
564 void *buffer, 558 void *buffer,
565 u32 buflen, 559 u32 buflen,
566 u32 offset); 560 u32 offset, bool *signal);
567 561
568u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
569 562
570void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 563void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
571 struct hv_ring_buffer_debug_info *debug_info); 564 struct hv_ring_buffer_debug_info *debug_info);
572 565
566void hv_begin_read(struct hv_ring_buffer_info *rbi);
567
568u32 hv_end_read(struct hv_ring_buffer_info *rbi);
569
573/* 570/*
574 * Maximum channels is determined by the size of the interrupt page 571 * Maximum channels is determined by the size of the interrupt page
575 * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt 572 * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@@ -657,7 +654,7 @@ int vmbus_connect(void);
657 654
658int vmbus_post_msg(void *buffer, size_t buflen); 655int vmbus_post_msg(void *buffer, size_t buflen);
659 656
660int vmbus_set_event(u32 child_relid); 657int vmbus_set_event(struct vmbus_channel *channel);
661 658
662void vmbus_on_event(unsigned long data); 659void vmbus_on_event(unsigned long data);
663 660
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 7233c88f01b8..cafa72ffdc30 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -29,6 +29,105 @@
29 29
30#include "hyperv_vmbus.h" 30#include "hyperv_vmbus.h"
31 31
32void hv_begin_read(struct hv_ring_buffer_info *rbi)
33{
34 rbi->ring_buffer->interrupt_mask = 1;
35 smp_mb();
36}
37
38u32 hv_end_read(struct hv_ring_buffer_info *rbi)
39{
40 u32 read;
41 u32 write;
42
43 rbi->ring_buffer->interrupt_mask = 0;
44 smp_mb();
45
46 /*
47 * Now check to see if the ring buffer is still empty.
48 * If it is not, we raced and we need to process new
49 * incoming messages.
50 */
51 hv_get_ringbuffer_availbytes(rbi, &read, &write);
52
53 return read;
54}
55
56/*
57 * When we write to the ring buffer, check if the host needs to
58 * be signaled. Here is the details of this protocol:
59 *
60 * 1. The host guarantees that while it is draining the
61 * ring buffer, it will set the interrupt_mask to
62 * indicate it does not need to be interrupted when
63 * new data is placed.
64 *
65 * 2. The host guarantees that it will completely drain
66 * the ring buffer before exiting the read loop. Further,
67 * once the ring buffer is empty, it will clear the
68 * interrupt_mask and re-check to see if new data has
69 * arrived.
70 */
71
72static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
73{
74 if (rbi->ring_buffer->interrupt_mask)
75 return false;
76
77 /*
78 * This is the only case we need to signal when the
79 * ring transitions from being empty to non-empty.
80 */
81 if (old_write == rbi->ring_buffer->read_index)
82 return true;
83
84 return false;
85}
86
87/*
88 * To optimize the flow management on the send-side,
89 * when the sender is blocked because of lack of
90 * sufficient space in the ring buffer, potential the
91 * consumer of the ring buffer can signal the producer.
92 * This is controlled by the following parameters:
93 *
94 * 1. pending_send_sz: This is the size in bytes that the
95 * producer is trying to send.
96 * 2. The feature bit feat_pending_send_sz set to indicate if
97 * the consumer of the ring will signal when the ring
98 * state transitions from being full to a state where
99 * there is room for the producer to send the pending packet.
100 */
101
102static bool hv_need_to_signal_on_read(u32 old_rd,
103 struct hv_ring_buffer_info *rbi)
104{
105 u32 prev_write_sz;
106 u32 cur_write_sz;
107 u32 r_size;
108 u32 write_loc = rbi->ring_buffer->write_index;
109 u32 read_loc = rbi->ring_buffer->read_index;
110 u32 pending_sz = rbi->ring_buffer->pending_send_sz;
111
112 /*
113 * If the other end is not blocked on write don't bother.
114 */
115 if (pending_sz == 0)
116 return false;
117
118 r_size = rbi->ring_datasize;
119 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
120 read_loc - write_loc;
121
122 prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
123 old_rd - write_loc;
124
125
126 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
127 return true;
128
129 return false;
130}
32 131
33/* 132/*
34 * hv_get_next_write_location() 133 * hv_get_next_write_location()
@@ -239,19 +338,6 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
239 } 338 }
240} 339}
241 340
242
243/*
244 *
245 * hv_get_ringbuffer_interrupt_mask()
246 *
247 * Get the interrupt mask for the specified ring buffer
248 *
249 */
250u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
251{
252 return rbi->ring_buffer->interrupt_mask;
253}
254
255/* 341/*
256 * 342 *
257 * hv_ringbuffer_init() 343 * hv_ringbuffer_init()
@@ -298,7 +384,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
298 * 384 *
299 */ 385 */
300int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, 386int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
301 struct scatterlist *sglist, u32 sgcount) 387 struct scatterlist *sglist, u32 sgcount, bool *signal)
302{ 388{
303 int i = 0; 389 int i = 0;
304 u32 bytes_avail_towrite; 390 u32 bytes_avail_towrite;
@@ -307,6 +393,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
307 393
308 struct scatterlist *sg; 394 struct scatterlist *sg;
309 u32 next_write_location; 395 u32 next_write_location;
396 u32 old_write;
310 u64 prev_indices = 0; 397 u64 prev_indices = 0;
311 unsigned long flags; 398 unsigned long flags;
312 399
@@ -335,6 +422,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
335 /* Write to the ring buffer */ 422 /* Write to the ring buffer */
336 next_write_location = hv_get_next_write_location(outring_info); 423 next_write_location = hv_get_next_write_location(outring_info);
337 424
425 old_write = next_write_location;
426
338 for_each_sg(sglist, sg, sgcount, i) 427 for_each_sg(sglist, sg, sgcount, i)
339 { 428 {
340 next_write_location = hv_copyto_ringbuffer(outring_info, 429 next_write_location = hv_copyto_ringbuffer(outring_info,
@@ -351,14 +440,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
351 &prev_indices, 440 &prev_indices,
352 sizeof(u64)); 441 sizeof(u64));
353 442
354 /* Make sure we flush all writes before updating the writeIndex */ 443 /* Issue a full memory barrier before updating the write index */
355 smp_wmb(); 444 smp_mb();
356 445
357 /* Now, update the write location */ 446 /* Now, update the write location */
358 hv_set_next_write_location(outring_info, next_write_location); 447 hv_set_next_write_location(outring_info, next_write_location);
359 448
360 449
361 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 450 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
451
452 *signal = hv_need_to_signal(old_write, outring_info);
362 return 0; 453 return 0;
363} 454}
364 455
@@ -414,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
414 * 505 *
415 */ 506 */
416int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, 507int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
417 u32 buflen, u32 offset) 508 u32 buflen, u32 offset, bool *signal)
418{ 509{
419 u32 bytes_avail_towrite; 510 u32 bytes_avail_towrite;
420 u32 bytes_avail_toread; 511 u32 bytes_avail_toread;
421 u32 next_read_location = 0; 512 u32 next_read_location = 0;
422 u64 prev_indices = 0; 513 u64 prev_indices = 0;
423 unsigned long flags; 514 unsigned long flags;
515 u32 old_read;
424 516
425 if (buflen <= 0) 517 if (buflen <= 0)
426 return -EINVAL; 518 return -EINVAL;
@@ -431,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
431 &bytes_avail_toread, 523 &bytes_avail_toread,
432 &bytes_avail_towrite); 524 &bytes_avail_towrite);
433 525
526 old_read = bytes_avail_toread;
527
434 /* Make sure there is something to read */ 528 /* Make sure there is something to read */
435 if (bytes_avail_toread < buflen) { 529 if (bytes_avail_toread < buflen) {
436 spin_unlock_irqrestore(&inring_info->ring_lock, flags); 530 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -461,5 +555,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
461 555
462 spin_unlock_irqrestore(&inring_info->ring_lock, flags); 556 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
463 557
558 *signal = hv_need_to_signal_on_read(old_read, inring_info);
559
464 return 0; 560 return 0;
465} 561}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8e1a9ec53003..cf19dfa5ead1 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
33#include <acpi/acpi_bus.h> 33#include <acpi/acpi_bus.h>
34#include <linux/completion.h> 34#include <linux/completion.h>
35#include <linux/hyperv.h> 35#include <linux/hyperv.h>
36#include <linux/kernel_stat.h>
36#include <asm/hyperv.h> 37#include <asm/hyperv.h>
37#include <asm/hypervisor.h> 38#include <asm/hypervisor.h>
38#include "hyperv_vmbus.h" 39#include "hyperv_vmbus.h"
@@ -41,7 +42,6 @@
41static struct acpi_device *hv_acpi_dev; 42static struct acpi_device *hv_acpi_dev;
42 43
43static struct tasklet_struct msg_dpc; 44static struct tasklet_struct msg_dpc;
44static struct tasklet_struct event_dpc;
45static struct completion probe_event; 45static struct completion probe_event;
46static int irq; 46static int irq;
47 47
@@ -454,21 +454,40 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
454 union hv_synic_event_flags *event; 454 union hv_synic_event_flags *event;
455 bool handled = false; 455 bool handled = false;
456 456
457 page_addr = hv_context.synic_event_page[cpu];
458 if (page_addr == NULL)
459 return IRQ_NONE;
460
461 event = (union hv_synic_event_flags *)page_addr +
462 VMBUS_MESSAGE_SINT;
457 /* 463 /*
458 * Check for events before checking for messages. This is the order 464 * Check for events before checking for messages. This is the order
459 * in which events and messages are checked in Windows guests on 465 * in which events and messages are checked in Windows guests on
460 * Hyper-V, and the Windows team suggested we do the same. 466 * Hyper-V, and the Windows team suggested we do the same.
461 */ 467 */
462 468
463 page_addr = hv_context.synic_event_page[cpu]; 469 if ((vmbus_proto_version == VERSION_WS2008) ||
464 event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; 470 (vmbus_proto_version == VERSION_WIN7)) {
465 471
466 /* Since we are a child, we only need to check bit 0 */ 472 /* Since we are a child, we only need to check bit 0 */
467 if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { 473 if (sync_test_and_clear_bit(0,
474 (unsigned long *) &event->flags32[0])) {
475 handled = true;
476 }
477 } else {
478 /*
479 * Our host is win8 or above. The signaling mechanism
480 * has changed and we can directly look at the event page.
481 * If bit n is set then we have an interrup on the channel
482 * whose id is n.
483 */
468 handled = true; 484 handled = true;
469 tasklet_schedule(&event_dpc);
470 } 485 }
471 486
487 if (handled)
488 tasklet_schedule(hv_context.event_dpc[cpu]);
489
490
472 page_addr = hv_context.synic_message_page[cpu]; 491 page_addr = hv_context.synic_message_page[cpu];
473 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 492 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
474 493
@@ -485,6 +504,19 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
485} 504}
486 505
487/* 506/*
507 * vmbus interrupt flow handler:
508 * vmbus interrupts can concurrently occur on multiple CPUs and
509 * can be handled concurrently.
510 */
511
512static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
513{
514 kstat_incr_irqs_this_cpu(irq, desc);
515
516 desc->action->handler(irq, desc->action->dev_id);
517}
518
519/*
488 * vmbus_bus_init -Main vmbus driver initialization routine. 520 * vmbus_bus_init -Main vmbus driver initialization routine.
489 * 521 *
490 * Here, we 522 * Here, we
@@ -506,7 +538,6 @@ static int vmbus_bus_init(int irq)
506 } 538 }
507 539
508 tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0); 540 tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
509 tasklet_init(&event_dpc, vmbus_on_event, 0);
510 541
511 ret = bus_register(&hv_bus); 542 ret = bus_register(&hv_bus);
512 if (ret) 543 if (ret)
@@ -520,6 +551,13 @@ static int vmbus_bus_init(int irq)
520 goto err_unregister; 551 goto err_unregister;
521 } 552 }
522 553
554 /*
555 * Vmbus interrupts can be handled concurrently on
556 * different CPUs. Establish an appropriate interrupt flow
557 * handler that can support this model.
558 */
559 irq_set_handler(irq, vmbus_flow_handler);
560
523 vector = IRQ0_VECTOR + irq; 561 vector = IRQ0_VECTOR + irq;
524 562
525 /* 563 /*
@@ -575,8 +613,6 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
575 613
576 ret = driver_register(&hv_driver->driver); 614 ret = driver_register(&hv_driver->driver);
577 615
578 vmbus_request_offers();
579
580 return ret; 616 return ret;
581} 617}
582EXPORT_SYMBOL_GPL(__vmbus_driver_register); 618EXPORT_SYMBOL_GPL(__vmbus_driver_register);
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index ab20a0851dd2..141094e7c06e 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -20,7 +20,6 @@
20#include <linux/serial.h> 20#include <linux/serial.h>
21#include <linux/tty_flip.h> 21#include <linux/tty_flip.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/atomic.h>
24#include <linux/io.h> 23#include <linux/io.h>
25#include <linux/ipack.h> 24#include <linux/ipack.h>
26#include "ipoctal.h" 25#include "ipoctal.h"
@@ -38,21 +37,19 @@ struct ipoctal_channel {
38 spinlock_t lock; 37 spinlock_t lock;
39 unsigned int pointer_read; 38 unsigned int pointer_read;
40 unsigned int pointer_write; 39 unsigned int pointer_write;
41 atomic_t open;
42 struct tty_port tty_port; 40 struct tty_port tty_port;
43 union scc2698_channel __iomem *regs; 41 union scc2698_channel __iomem *regs;
44 union scc2698_block __iomem *block_regs; 42 union scc2698_block __iomem *block_regs;
45 unsigned int board_id; 43 unsigned int board_id;
46 unsigned char *board_write;
47 u8 isr_rx_rdy_mask; 44 u8 isr_rx_rdy_mask;
48 u8 isr_tx_rdy_mask; 45 u8 isr_tx_rdy_mask;
46 unsigned int rx_enable;
49}; 47};
50 48
51struct ipoctal { 49struct ipoctal {
52 struct ipack_device *dev; 50 struct ipack_device *dev;
53 unsigned int board_id; 51 unsigned int board_id;
54 struct ipoctal_channel channel[NR_CHANNELS]; 52 struct ipoctal_channel channel[NR_CHANNELS];
55 unsigned char write;
56 struct tty_driver *tty_drv; 53 struct tty_driver *tty_drv;
57 u8 __iomem *mem8_space; 54 u8 __iomem *mem8_space;
58 u8 __iomem *int_space; 55 u8 __iomem *int_space;
@@ -64,28 +61,23 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
64 61
65 channel = dev_get_drvdata(tty->dev); 62 channel = dev_get_drvdata(tty->dev);
66 63
64 /*
65 * Enable RX. TX will be enabled when
66 * there is something to send
67 */
67 iowrite8(CR_ENABLE_RX, &channel->regs->w.cr); 68 iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
69 channel->rx_enable = 1;
68 return 0; 70 return 0;
69} 71}
70 72
71static int ipoctal_open(struct tty_struct *tty, struct file *file) 73static int ipoctal_open(struct tty_struct *tty, struct file *file)
72{ 74{
73 int res;
74 struct ipoctal_channel *channel; 75 struct ipoctal_channel *channel;
75 76
76 channel = dev_get_drvdata(tty->dev); 77 channel = dev_get_drvdata(tty->dev);
77
78 if (atomic_read(&channel->open))
79 return -EBUSY;
80
81 tty->driver_data = channel; 78 tty->driver_data = channel;
82 79
83 res = tty_port_open(&channel->tty_port, tty, file); 80 return tty_port_open(&channel->tty_port, tty, file);
84 if (res)
85 return res;
86
87 atomic_inc(&channel->open);
88 return 0;
89} 81}
90 82
91static void ipoctal_reset_stats(struct ipoctal_stats *stats) 83static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -111,9 +103,7 @@ static void ipoctal_close(struct tty_struct *tty, struct file *filp)
111 struct ipoctal_channel *channel = tty->driver_data; 103 struct ipoctal_channel *channel = tty->driver_data;
112 104
113 tty_port_close(&channel->tty_port, tty, filp); 105 tty_port_close(&channel->tty_port, tty, filp);
114 106 ipoctal_free_channel(channel);
115 if (atomic_dec_and_test(&channel->open))
116 ipoctal_free_channel(channel);
117} 107}
118 108
119static int ipoctal_get_icount(struct tty_struct *tty, 109static int ipoctal_get_icount(struct tty_struct *tty,
@@ -137,11 +127,12 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr)
137{ 127{
138 struct tty_port *port = &channel->tty_port; 128 struct tty_port *port = &channel->tty_port;
139 unsigned char value; 129 unsigned char value;
140 unsigned char flag = TTY_NORMAL; 130 unsigned char flag;
141 u8 isr; 131 u8 isr;
142 132
143 do { 133 do {
144 value = ioread8(&channel->regs->r.rhr); 134 value = ioread8(&channel->regs->r.rhr);
135 flag = TTY_NORMAL;
145 /* Error: count statistics */ 136 /* Error: count statistics */
146 if (sr & SR_ERROR) { 137 if (sr & SR_ERROR) {
147 iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr); 138 iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -183,10 +174,8 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
183 unsigned char value; 174 unsigned char value;
184 unsigned int *pointer_write = &channel->pointer_write; 175 unsigned int *pointer_write = &channel->pointer_write;
185 176
186 if (channel->nb_bytes <= 0) { 177 if (channel->nb_bytes == 0)
187 channel->nb_bytes = 0;
188 return; 178 return;
189 }
190 179
191 value = channel->tty_port.xmit_buf[*pointer_write]; 180 value = channel->tty_port.xmit_buf[*pointer_write];
192 iowrite8(value, &channel->regs->w.thr); 181 iowrite8(value, &channel->regs->w.thr);
@@ -194,39 +183,27 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
194 (*pointer_write)++; 183 (*pointer_write)++;
195 *pointer_write = *pointer_write % PAGE_SIZE; 184 *pointer_write = *pointer_write % PAGE_SIZE;
196 channel->nb_bytes--; 185 channel->nb_bytes--;
197
198 if ((channel->nb_bytes == 0) &&
199 (waitqueue_active(&channel->queue))) {
200
201 if (channel->board_id != IPACK1_DEVICE_ID_SBS_OCTAL_485) {
202 *channel->board_write = 1;
203 wake_up_interruptible(&channel->queue);
204 }
205 }
206} 186}
207 187
208static void ipoctal_irq_channel(struct ipoctal_channel *channel) 188static void ipoctal_irq_channel(struct ipoctal_channel *channel)
209{ 189{
210 u8 isr, sr; 190 u8 isr, sr;
211 191
212 /* If there is no client, skip the check */ 192 spin_lock(&channel->lock);
213 if (!atomic_read(&channel->open))
214 return;
215
216 /* The HW is organized in pair of channels. See which register we need 193 /* The HW is organized in pair of channels. See which register we need
217 * to read from */ 194 * to read from */
218 isr = ioread8(&channel->block_regs->r.isr); 195 isr = ioread8(&channel->block_regs->r.isr);
219 sr = ioread8(&channel->regs->r.sr); 196 sr = ioread8(&channel->regs->r.sr);
220 197
221 /* In case of RS-485, change from TX to RX when finishing TX. 198 if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
222 * Half-duplex. */
223 if ((channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) &&
224 (sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
225 iowrite8(CR_DISABLE_TX, &channel->regs->w.cr); 199 iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
226 iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr); 200 /* In case of RS-485, change from TX to RX when finishing TX.
227 iowrite8(CR_ENABLE_RX, &channel->regs->w.cr); 201 * Half-duplex. */
228 *channel->board_write = 1; 202 if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
229 wake_up_interruptible(&channel->queue); 203 iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
204 iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
205 channel->rx_enable = 1;
206 }
230 } 207 }
231 208
232 /* RX data */ 209 /* RX data */
@@ -237,7 +214,7 @@ static void ipoctal_irq_channel(struct ipoctal_channel *channel)
237 if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY)) 214 if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY))
238 ipoctal_irq_tx(channel); 215 ipoctal_irq_tx(channel);
239 216
240 tty_flip_buffer_push(&channel->tty_port); 217 spin_unlock(&channel->lock);
241} 218}
242 219
243static irqreturn_t ipoctal_irq_handler(void *arg) 220static irqreturn_t ipoctal_irq_handler(void *arg)
@@ -245,14 +222,14 @@ static irqreturn_t ipoctal_irq_handler(void *arg)
245 unsigned int i; 222 unsigned int i;
246 struct ipoctal *ipoctal = (struct ipoctal *) arg; 223 struct ipoctal *ipoctal = (struct ipoctal *) arg;
247 224
248 /* Check all channels */
249 for (i = 0; i < NR_CHANNELS; i++)
250 ipoctal_irq_channel(&ipoctal->channel[i]);
251
252 /* Clear the IPack device interrupt */ 225 /* Clear the IPack device interrupt */
253 readw(ipoctal->int_space + ACK_INT_REQ0); 226 readw(ipoctal->int_space + ACK_INT_REQ0);
254 readw(ipoctal->int_space + ACK_INT_REQ1); 227 readw(ipoctal->int_space + ACK_INT_REQ1);
255 228
229 /* Check all channels */
230 for (i = 0; i < NR_CHANNELS; i++)
231 ipoctal_irq_channel(&ipoctal->channel[i]);
232
256 return IRQ_HANDLED; 233 return IRQ_HANDLED;
257} 234}
258 235
@@ -306,7 +283,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
306 ipoctal->mem8_space = 283 ipoctal->mem8_space =
307 devm_ioremap_nocache(&ipoctal->dev->dev, 284 devm_ioremap_nocache(&ipoctal->dev->dev,
308 region->start, 0x8000); 285 region->start, 0x8000);
309 if (!addr) { 286 if (!ipoctal->mem8_space) {
310 dev_err(&ipoctal->dev->dev, 287 dev_err(&ipoctal->dev->dev,
311 "Unable to map slot [%d:%d] MEM8 space!\n", 288 "Unable to map slot [%d:%d] MEM8 space!\n",
312 bus_nr, slot); 289 bus_nr, slot);
@@ -319,7 +296,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
319 struct ipoctal_channel *channel = &ipoctal->channel[i]; 296 struct ipoctal_channel *channel = &ipoctal->channel[i];
320 channel->regs = chan_regs + i; 297 channel->regs = chan_regs + i;
321 channel->block_regs = block_regs + (i >> 1); 298 channel->block_regs = block_regs + (i >> 1);
322 channel->board_write = &ipoctal->write;
323 channel->board_id = ipoctal->board_id; 299 channel->board_id = ipoctal->board_id;
324 if (i & 1) { 300 if (i & 1) {
325 channel->isr_tx_rdy_mask = ISR_TxRDY_B; 301 channel->isr_tx_rdy_mask = ISR_TxRDY_B;
@@ -330,6 +306,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
330 } 306 }
331 307
332 iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr); 308 iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
309 channel->rx_enable = 0;
333 iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr); 310 iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
334 iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr); 311 iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
335 iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY, 312 iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY,
@@ -402,8 +379,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
402 379
403 ipoctal_reset_stats(&channel->stats); 380 ipoctal_reset_stats(&channel->stats);
404 channel->nb_bytes = 0; 381 channel->nb_bytes = 0;
405 init_waitqueue_head(&channel->queue);
406
407 spin_lock_init(&channel->lock); 382 spin_lock_init(&channel->lock);
408 channel->pointer_read = 0; 383 channel->pointer_read = 0;
409 channel->pointer_write = 0; 384 channel->pointer_write = 0;
@@ -414,12 +389,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
414 continue; 389 continue;
415 } 390 }
416 dev_set_drvdata(tty_dev, channel); 391 dev_set_drvdata(tty_dev, channel);
417
418 /*
419 * Enable again the RX. TX will be enabled when
420 * there is something to send
421 */
422 iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
423 } 392 }
424 393
425 return 0; 394 return 0;
@@ -459,6 +428,7 @@ static int ipoctal_write_tty(struct tty_struct *tty,
459 /* As the IP-OCTAL 485 only supports half duplex, do it manually */ 428 /* As the IP-OCTAL 485 only supports half duplex, do it manually */
460 if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) { 429 if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
461 iowrite8(CR_DISABLE_RX, &channel->regs->w.cr); 430 iowrite8(CR_DISABLE_RX, &channel->regs->w.cr);
431 channel->rx_enable = 0;
462 iowrite8(CR_CMD_ASSERT_RTSN, &channel->regs->w.cr); 432 iowrite8(CR_CMD_ASSERT_RTSN, &channel->regs->w.cr);
463 } 433 }
464 434
@@ -467,10 +437,6 @@ static int ipoctal_write_tty(struct tty_struct *tty,
467 * operations 437 * operations
468 */ 438 */
469 iowrite8(CR_ENABLE_TX, &channel->regs->w.cr); 439 iowrite8(CR_ENABLE_TX, &channel->regs->w.cr);
470 wait_event_interruptible(channel->queue, *channel->board_write);
471 iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
472
473 *channel->board_write = 0;
474 return char_copied; 440 return char_copied;
475} 441}
476 442
@@ -622,8 +588,9 @@ static void ipoctal_set_termios(struct tty_struct *tty,
622 iowrite8(mr2, &channel->regs->w.mr); 588 iowrite8(mr2, &channel->regs->w.mr);
623 iowrite8(csr, &channel->regs->w.csr); 589 iowrite8(csr, &channel->regs->w.csr);
624 590
625 /* Enable again the RX */ 591 /* Enable again the RX, if it was before */
626 iowrite8(CR_ENABLE_RX, &channel->regs->w.cr); 592 if (channel->rx_enable)
593 iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
627} 594}
628 595
629static void ipoctal_hangup(struct tty_struct *tty) 596static void ipoctal_hangup(struct tty_struct *tty)
@@ -643,6 +610,7 @@ static void ipoctal_hangup(struct tty_struct *tty)
643 tty_port_hangup(&channel->tty_port); 610 tty_port_hangup(&channel->tty_port);
644 611
645 iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr); 612 iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
613 channel->rx_enable = 0;
646 iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr); 614 iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
647 iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr); 615 iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
648 iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr); 616 iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -652,6 +620,22 @@ static void ipoctal_hangup(struct tty_struct *tty)
652 wake_up_interruptible(&channel->tty_port.open_wait); 620 wake_up_interruptible(&channel->tty_port.open_wait);
653} 621}
654 622
623static void ipoctal_shutdown(struct tty_struct *tty)
624{
625 struct ipoctal_channel *channel = tty->driver_data;
626
627 if (channel == NULL)
628 return;
629
630 iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
631 channel->rx_enable = 0;
632 iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
633 iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
634 iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
635 iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr);
636 clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags);
637}
638
655static const struct tty_operations ipoctal_fops = { 639static const struct tty_operations ipoctal_fops = {
656 .ioctl = NULL, 640 .ioctl = NULL,
657 .open = ipoctal_open, 641 .open = ipoctal_open,
@@ -662,6 +646,7 @@ static const struct tty_operations ipoctal_fops = {
662 .chars_in_buffer = ipoctal_chars_in_buffer, 646 .chars_in_buffer = ipoctal_chars_in_buffer,
663 .get_icount = ipoctal_get_icount, 647 .get_icount = ipoctal_get_icount,
664 .hangup = ipoctal_hangup, 648 .hangup = ipoctal_hangup,
649 .shutdown = ipoctal_shutdown,
665}; 650};
666 651
667static int ipoctal_probe(struct ipack_device *dev) 652static int ipoctal_probe(struct ipack_device *dev)
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index f6fcb87b3504..a9d9d41d95d3 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -84,6 +84,12 @@ int wm5102_patch(struct arizona *arizona)
84} 84}
85 85
86static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = { 86static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
87 [ARIZONA_IRQ_MICD_CLAMP_FALL] = {
88 .mask = ARIZONA_MICD_CLAMP_FALL_EINT1
89 },
90 [ARIZONA_IRQ_MICD_CLAMP_RISE] = {
91 .mask = ARIZONA_MICD_CLAMP_RISE_EINT1
92 },
87 [ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 }, 93 [ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
88 [ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 }, 94 [ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
89 [ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 }, 95 [ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
@@ -313,6 +319,7 @@ static const struct reg_default wm5102_reg_default[] = {
313 { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */ 319 { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
314 { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */ 320 { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
315 { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */ 321 { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
322 { 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
316 { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */ 323 { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
317 { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */ 324 { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
318 { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */ 325 { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
@@ -1107,6 +1114,8 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
1107 case ARIZONA_ACCESSORY_DETECT_MODE_1: 1114 case ARIZONA_ACCESSORY_DETECT_MODE_1:
1108 case ARIZONA_HEADPHONE_DETECT_1: 1115 case ARIZONA_HEADPHONE_DETECT_1:
1109 case ARIZONA_HEADPHONE_DETECT_2: 1116 case ARIZONA_HEADPHONE_DETECT_2:
1117 case ARIZONA_HP_DACVAL:
1118 case ARIZONA_MICD_CLAMP_CONTROL:
1110 case ARIZONA_MIC_DETECT_1: 1119 case ARIZONA_MIC_DETECT_1:
1111 case ARIZONA_MIC_DETECT_2: 1120 case ARIZONA_MIC_DETECT_2:
1112 case ARIZONA_MIC_DETECT_3: 1121 case ARIZONA_MIC_DETECT_3:
@@ -1876,6 +1885,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
1876 case ARIZONA_DSP1_STATUS_2: 1885 case ARIZONA_DSP1_STATUS_2:
1877 case ARIZONA_DSP1_STATUS_3: 1886 case ARIZONA_DSP1_STATUS_3:
1878 case ARIZONA_HEADPHONE_DETECT_2: 1887 case ARIZONA_HEADPHONE_DETECT_2:
1888 case ARIZONA_HP_DACVAL:
1879 case ARIZONA_MIC_DETECT_3: 1889 case ARIZONA_MIC_DETECT_3:
1880 return true; 1890 return true;
1881 default: 1891 default:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 668a5822ab4e..e83fdfe0c8ca 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -499,6 +499,17 @@ config USB_SWITCH_FSA9480
499 stereo and mono audio, video, microphone and UART data to use 499 stereo and mono audio, video, microphone and UART data to use
500 a common connector port. 500 a common connector port.
501 501
502config LATTICE_ECP3_CONFIG
503 tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
504 depends on SPI && SYSFS
505 select FW_LOADER
506 default n
507 help
508 This option enables support for bitstream configuration (programming
509 or loading) of the Lattice ECP3 FPGA family via SPI.
510
511 If unsure, say N.
512
502source "drivers/misc/c2port/Kconfig" 513source "drivers/misc/c2port/Kconfig"
503source "drivers/misc/eeprom/Kconfig" 514source "drivers/misc/eeprom/Kconfig"
504source "drivers/misc/cb710/Kconfig" 515source "drivers/misc/cb710/Kconfig"
@@ -507,4 +518,5 @@ source "drivers/misc/lis3lv02d/Kconfig"
507source "drivers/misc/carma/Kconfig" 518source "drivers/misc/carma/Kconfig"
508source "drivers/misc/altera-stapl/Kconfig" 519source "drivers/misc/altera-stapl/Kconfig"
509source "drivers/misc/mei/Kconfig" 520source "drivers/misc/mei/Kconfig"
521source "drivers/misc/vmw_vmci/Kconfig"
510endmenu 522endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2129377c0de6..35a1463c72d9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,3 +49,6 @@ obj-y += carma/
49obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o 49obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
50obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ 50obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
51obj-$(CONFIG_INTEL_MEI) += mei/ 51obj-$(CONFIG_INTEL_MEI) += mei/
52obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
53obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
54obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
index 22429b8b1068..5acb9c5b49c4 100644
--- a/drivers/misc/cb710/Kconfig
+++ b/drivers/misc/cb710/Kconfig
@@ -1,6 +1,6 @@
1config CB710_CORE 1config CB710_CORE
2 tristate "ENE CB710/720 Flash memory card reader support" 2 tristate "ENE CB710/720 Flash memory card reader support"
3 depends on PCI 3 depends on PCI && GENERIC_HARDIRQS
4 help 4 help
5 This option enables support for PCI ENE CB710/720 Flash memory card 5 This option enables support for PCI ENE CB710/720 Flash memory card
6 reader found in some laptops (ie. some versions of HP Compaq nx9500). 6 reader found in some laptops (ie. some versions of HP Compaq nx9500).
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
new file mode 100644
index 000000000000..155700bfd2b6
--- /dev/null
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (C) 2012 Stefan Roese <sr@denx.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/device.h>
11#include <linux/firmware.h>
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/spi/spi.h>
17#include <linux/platform_device.h>
18#include <linux/delay.h>
19
20#define FIRMWARE_NAME "lattice-ecp3.bit"
21
22/*
23 * The JTAG ID's of the supported FPGA's. The ID is 32bit wide
24 * reversed as noted in the manual.
25 */
26#define ID_ECP3_17 0xc2088080
27#define ID_ECP3_35 0xc2048080
28
29/* FPGA commands */
30#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */
31#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */
32#define FPGA_CMD_CLEAR 0x70
33#define FPGA_CMD_REFRESH 0x71
34#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */
35#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */
36#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */
37
38/*
39 * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf
40 * (LatticeECP3 Slave SPI Port User's Guide)
41 */
42#define FPGA_STATUS_DONE 0x00004000
43#define FPGA_STATUS_CLEARED 0x00010000
44
45#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */
46#define FPGA_CLEAR_MSLEEP 10
47#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP)
48
49struct fpga_data {
50 struct completion fw_loaded;
51};
52
53struct ecp3_dev {
54 u32 jedec_id;
55 char *name;
56};
57
58static const struct ecp3_dev ecp3_dev[] = {
59 {
60 .jedec_id = ID_ECP3_17,
61 .name = "Lattice ECP3-17",
62 },
63 {
64 .jedec_id = ID_ECP3_35,
65 .name = "Lattice ECP3-35",
66 },
67};
68
69static void firmware_load(const struct firmware *fw, void *context)
70{
71 struct spi_device *spi = (struct spi_device *)context;
72 struct fpga_data *data = dev_get_drvdata(&spi->dev);
73 u8 *buffer;
74 int ret;
75 u8 txbuf[8];
76 u8 rxbuf[8];
77 int rx_len = 8;
78 int i;
79 u32 jedec_id;
80 u32 status;
81
82 if (fw->size == 0) {
83 dev_err(&spi->dev, "Error: Firmware size is 0!\n");
84 return;
85 }
86
87 /* Fill dummy data (24 stuffing bits for commands) */
88 txbuf[1] = 0x00;
89 txbuf[2] = 0x00;
90 txbuf[3] = 0x00;
91
92 /* Trying to speak with the FPGA via SPI... */
93 txbuf[0] = FPGA_CMD_READ_ID;
94 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
95 dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]);
96 jedec_id = *(u32 *)&rxbuf[4];
97
98 for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
99 if (jedec_id == ecp3_dev[i].jedec_id)
100 break;
101 }
102 if (i == ARRAY_SIZE(ecp3_dev)) {
103 dev_err(&spi->dev,
104 "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
105 jedec_id);
106 return;
107 }
108
109 dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
110
111 txbuf[0] = FPGA_CMD_READ_STATUS;
112 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
113 dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
114
115 buffer = kzalloc(fw->size + 8, GFP_KERNEL);
116 if (!buffer) {
117 dev_err(&spi->dev, "Error: Can't allocate memory!\n");
118 return;
119 }
120
121 /*
122 * Insert WRITE_INC command into stream (one SPI frame)
123 */
124 buffer[0] = FPGA_CMD_WRITE_INC;
125 buffer[1] = 0xff;
126 buffer[2] = 0xff;
127 buffer[3] = 0xff;
128 memcpy(buffer + 4, fw->data, fw->size);
129
130 txbuf[0] = FPGA_CMD_REFRESH;
131 ret = spi_write(spi, txbuf, 4);
132
133 txbuf[0] = FPGA_CMD_WRITE_EN;
134 ret = spi_write(spi, txbuf, 4);
135
136 txbuf[0] = FPGA_CMD_CLEAR;
137 ret = spi_write(spi, txbuf, 4);
138
139 /*
140 * Wait for FPGA memory to become cleared
141 */
142 for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
143 txbuf[0] = FPGA_CMD_READ_STATUS;
144 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
145 status = *(u32 *)&rxbuf[4];
146 if (status == FPGA_STATUS_CLEARED)
147 break;
148
149 msleep(FPGA_CLEAR_MSLEEP);
150 }
151
152 if (i == FPGA_CLEAR_LOOP_COUNT) {
153 dev_err(&spi->dev,
154 "Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
155 status);
156 kfree(buffer);
157 return;
158 }
159
160 dev_info(&spi->dev, "Configuring the FPGA...\n");
161 ret = spi_write(spi, buffer, fw->size + 8);
162
163 txbuf[0] = FPGA_CMD_WRITE_DIS;
164 ret = spi_write(spi, txbuf, 4);
165
166 txbuf[0] = FPGA_CMD_READ_STATUS;
167 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
168 dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
169 status = *(u32 *)&rxbuf[4];
170
171 /* Check result */
172 if (status & FPGA_STATUS_DONE)
173 dev_info(&spi->dev, "FPGA succesfully configured!\n");
174 else
175 dev_info(&spi->dev, "FPGA not configured (DONE not set)\n");
176
177 /*
178 * Don't forget to release the firmware again
179 */
180 release_firmware(fw);
181
182 kfree(buffer);
183
184 complete(&data->fw_loaded);
185}
186
187static int lattice_ecp3_probe(struct spi_device *spi)
188{
189 struct fpga_data *data;
190 int err;
191
192 data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
193 if (!data) {
194 dev_err(&spi->dev, "Memory allocation for fpga_data failed\n");
195 return -ENOMEM;
196 }
197 spi_set_drvdata(spi, data);
198
199 init_completion(&data->fw_loaded);
200 err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
201 FIRMWARE_NAME, &spi->dev,
202 GFP_KERNEL, spi, firmware_load);
203 if (err) {
204 dev_err(&spi->dev, "Firmware loading failed with %d!\n", err);
205 return err;
206 }
207
208 dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n");
209
210 return 0;
211}
212
213static int lattice_ecp3_remove(struct spi_device *spi)
214{
215 struct fpga_data *data = spi_get_drvdata(spi);
216
217 wait_for_completion(&data->fw_loaded);
218
219 return 0;
220}
221
222static const struct spi_device_id lattice_ecp3_id[] = {
223 { "ecp3-17", 0 },
224 { "ecp3-35", 0 },
225 { }
226};
227MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
228
229static struct spi_driver lattice_ecp3_driver = {
230 .driver = {
231 .name = "lattice-ecp3",
232 .owner = THIS_MODULE,
233 },
234 .probe = lattice_ecp3_probe,
235 .remove = lattice_ecp3_remove,
236 .id_table = lattice_ecp3_id,
237};
238
239module_spi_driver(lattice_ecp3_driver);
240
241MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
242MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI");
243MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 5a79ccde2fdf..d21b4d006a55 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,11 +1,22 @@
1config INTEL_MEI 1config INTEL_MEI
2 tristate "Intel Management Engine Interface (Intel MEI)" 2 tristate "Intel Management Engine Interface"
3 depends on X86 && PCI && WATCHDOG_CORE 3 depends on X86 && PCI && WATCHDOG_CORE
4 help 4 help
5 The Intel Management Engine (Intel ME) provides Manageability, 5 The Intel Management Engine (Intel ME) provides Manageability,
6 Security and Media services for system containing Intel chipsets. 6 Security and Media services for system containing Intel chipsets.
7 if selected /dev/mei misc device will be created. 7 if selected /dev/mei misc device will be created.
8 8
9 For more information see
10 <http://software.intel.com/en-us/manageability/>
11
12config INTEL_MEI_ME
13 bool "ME Enabled Intel Chipsets"
14 depends on INTEL_MEI
15 depends on X86 && PCI && WATCHDOG_CORE
16 default y
17 help
18 MEI support for ME Enabled Intel chipsets.
19
9 Supported Chipsets are: 20 Supported Chipsets are:
10 7 Series Chipset Family 21 7 Series Chipset Family
11 6 Series Chipset Family 22 6 Series Chipset Family
@@ -24,5 +35,3 @@ config INTEL_MEI
24 82Q33 Express 35 82Q33 Express
25 82X38/X48 Express 36 82X38/X48 Express
26 37
27 For more information see
28 <http://software.intel.com/en-us/manageability/>
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 0017842e166c..040af6c7b147 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -4,9 +4,11 @@
4# 4#
5obj-$(CONFIG_INTEL_MEI) += mei.o 5obj-$(CONFIG_INTEL_MEI) += mei.o
6mei-objs := init.o 6mei-objs := init.o
7mei-objs += hbm.o
7mei-objs += interrupt.o 8mei-objs += interrupt.o
8mei-objs += interface.o 9mei-objs += client.o
9mei-objs += iorw.o
10mei-objs += main.o 10mei-objs += main.o
11mei-objs += amthif.o 11mei-objs += amthif.o
12mei-objs += wd.o 12mei-objs += wd.o
13mei-$(CONFIG_INTEL_MEI_ME) += pci-me.o
14mei-$(CONFIG_INTEL_MEI_ME) += hw-me.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index e40ffd9502d1..c86d7e3839a4 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -31,15 +31,16 @@
31#include <linux/jiffies.h> 31#include <linux/jiffies.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33 33
34#include <linux/mei.h>
34 35
35#include "mei_dev.h" 36#include "mei_dev.h"
36#include "hw.h" 37#include "hbm.h"
37#include <linux/mei.h> 38#include "hw-me.h"
38#include "interface.h" 39#include "client.h"
39 40
40const uuid_le mei_amthi_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac, 41const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
41 0xa8, 0x46, 0xe0, 0xff, 0x65, 42 0xac, 0xa8, 0x46, 0xe0,
42 0x81, 0x4c); 43 0xff, 0x65, 0x81, 0x4c);
43 44
44/** 45/**
45 * mei_amthif_reset_params - initializes mei device iamthif 46 * mei_amthif_reset_params - initializes mei device iamthif
@@ -64,22 +65,24 @@ void mei_amthif_reset_params(struct mei_device *dev)
64 * @dev: the device structure 65 * @dev: the device structure
65 * 66 *
66 */ 67 */
67void mei_amthif_host_init(struct mei_device *dev) 68int mei_amthif_host_init(struct mei_device *dev)
68{ 69{
69 int i; 70 struct mei_cl *cl = &dev->iamthif_cl;
70 unsigned char *msg_buf; 71 unsigned char *msg_buf;
72 int ret, i;
73
74 dev->iamthif_state = MEI_IAMTHIF_IDLE;
71 75
72 mei_cl_init(&dev->iamthif_cl, dev); 76 mei_cl_init(cl, dev);
73 dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
74 77
75 /* find ME amthi client */ 78 i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
76 i = mei_me_cl_link(dev, &dev->iamthif_cl,
77 &mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID);
78 if (i < 0) { 79 if (i < 0) {
79 dev_info(&dev->pdev->dev, "failed to find iamthif client.\n"); 80 dev_info(&dev->pdev->dev, "amthif: failed to find the client\n");
80 return; 81 return -ENOENT;
81 } 82 }
82 83
84 cl->me_client_id = dev->me_clients[i].client_id;
85
83 /* Assign iamthif_mtu to the value received from ME */ 86 /* Assign iamthif_mtu to the value received from ME */
84 87
85 dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length; 88 dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
@@ -93,19 +96,29 @@ void mei_amthif_host_init(struct mei_device *dev)
93 msg_buf = kcalloc(dev->iamthif_mtu, 96 msg_buf = kcalloc(dev->iamthif_mtu,
94 sizeof(unsigned char), GFP_KERNEL); 97 sizeof(unsigned char), GFP_KERNEL);
95 if (!msg_buf) { 98 if (!msg_buf) {
96 dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n"); 99 dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n");
97 return; 100 return -ENOMEM;
98 } 101 }
99 102
100 dev->iamthif_msg_buf = msg_buf; 103 dev->iamthif_msg_buf = msg_buf;
101 104
102 if (mei_connect(dev, &dev->iamthif_cl)) { 105 ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
103 dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n"); 106
104 dev->iamthif_cl.state = MEI_FILE_DISCONNECTED; 107 if (ret < 0) {
105 dev->iamthif_cl.host_client_id = 0; 108 dev_err(&dev->pdev->dev, "amthif: failed link client\n");
109 return -ENOENT;
110 }
111
112 cl->state = MEI_FILE_CONNECTING;
113
114 if (mei_hbm_cl_connect_req(dev, cl)) {
115 dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n");
116 cl->state = MEI_FILE_DISCONNECTED;
117 cl->host_client_id = 0;
106 } else { 118 } else {
107 dev->iamthif_cl.timer_count = MEI_CONNECT_TIMEOUT; 119 cl->timer_count = MEI_CONNECT_TIMEOUT;
108 } 120 }
121 return 0;
109} 122}
110 123
111/** 124/**
@@ -168,10 +181,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
168 i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id); 181 i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
169 182
170 if (i < 0) { 183 if (i < 0) {
171 dev_dbg(&dev->pdev->dev, "amthi client not found.\n"); 184 dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
172 return -ENODEV; 185 return -ENODEV;
173 } 186 }
174 dev_dbg(&dev->pdev->dev, "checking amthi data\n"); 187 dev_dbg(&dev->pdev->dev, "checking amthif data\n");
175 cb = mei_amthif_find_read_list_entry(dev, file); 188 cb = mei_amthif_find_read_list_entry(dev, file);
176 189
177 /* Check for if we can block or not*/ 190 /* Check for if we can block or not*/
@@ -179,7 +192,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
179 return -EAGAIN; 192 return -EAGAIN;
180 193
181 194
182 dev_dbg(&dev->pdev->dev, "waiting for amthi data\n"); 195 dev_dbg(&dev->pdev->dev, "waiting for amthif data\n");
183 while (cb == NULL) { 196 while (cb == NULL) {
184 /* unlock the Mutex */ 197 /* unlock the Mutex */
185 mutex_unlock(&dev->device_lock); 198 mutex_unlock(&dev->device_lock);
@@ -197,17 +210,17 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
197 } 210 }
198 211
199 212
200 dev_dbg(&dev->pdev->dev, "Got amthi data\n"); 213 dev_dbg(&dev->pdev->dev, "Got amthif data\n");
201 dev->iamthif_timer = 0; 214 dev->iamthif_timer = 0;
202 215
203 if (cb) { 216 if (cb) {
204 timeout = cb->read_time + 217 timeout = cb->read_time +
205 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 218 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
206 dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n", 219 dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n",
207 timeout); 220 timeout);
208 221
209 if (time_after(jiffies, timeout)) { 222 if (time_after(jiffies, timeout)) {
210 dev_dbg(&dev->pdev->dev, "amthi Time out\n"); 223 dev_dbg(&dev->pdev->dev, "amthif Time out\n");
211 /* 15 sec for the message has expired */ 224 /* 15 sec for the message has expired */
212 list_del(&cb->list); 225 list_del(&cb->list);
213 rets = -ETIMEDOUT; 226 rets = -ETIMEDOUT;
@@ -227,9 +240,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
227 * remove message from deletion list 240 * remove message from deletion list
228 */ 241 */
229 242
230 dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n", 243 dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n",
231 cb->response_buffer.size); 244 cb->response_buffer.size);
232 dev_dbg(&dev->pdev->dev, "amthi cb->buf_idx - %lu\n", cb->buf_idx); 245 dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
233 246
234 /* length is being turncated to PAGE_SIZE, however, 247 /* length is being turncated to PAGE_SIZE, however,
235 * the buf_idx may point beyond */ 248 * the buf_idx may point beyond */
@@ -245,7 +258,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
245 } 258 }
246 } 259 }
247free: 260free:
248 dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n"); 261 dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n");
249 *offset = 0; 262 *offset = 0;
250 mei_io_cb_free(cb); 263 mei_io_cb_free(cb);
251out: 264out:
@@ -269,7 +282,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
269 if (!dev || !cb) 282 if (!dev || !cb)
270 return -ENODEV; 283 return -ENODEV;
271 284
272 dev_dbg(&dev->pdev->dev, "write data to amthi client.\n"); 285 dev_dbg(&dev->pdev->dev, "write data to amthif client.\n");
273 286
274 dev->iamthif_state = MEI_IAMTHIF_WRITING; 287 dev->iamthif_state = MEI_IAMTHIF_WRITING;
275 dev->iamthif_current_cb = cb; 288 dev->iamthif_current_cb = cb;
@@ -280,15 +293,15 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
280 memcpy(dev->iamthif_msg_buf, cb->request_buffer.data, 293 memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
281 cb->request_buffer.size); 294 cb->request_buffer.size);
282 295
283 ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl); 296 ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl);
284 if (ret < 0) 297 if (ret < 0)
285 return ret; 298 return ret;
286 299
287 if (ret && dev->mei_host_buffer_is_empty) { 300 if (ret && dev->hbuf_is_ready) {
288 ret = 0; 301 ret = 0;
289 dev->mei_host_buffer_is_empty = false; 302 dev->hbuf_is_ready = false;
290 if (cb->request_buffer.size > mei_hbuf_max_data(dev)) { 303 if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
291 mei_hdr.length = mei_hbuf_max_data(dev); 304 mei_hdr.length = mei_hbuf_max_len(dev);
292 mei_hdr.msg_complete = 0; 305 mei_hdr.msg_complete = 0;
293 } else { 306 } else {
294 mei_hdr.length = cb->request_buffer.size; 307 mei_hdr.length = cb->request_buffer.size;
@@ -300,25 +313,24 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
300 mei_hdr.reserved = 0; 313 mei_hdr.reserved = 0;
301 dev->iamthif_msg_buf_index += mei_hdr.length; 314 dev->iamthif_msg_buf_index += mei_hdr.length;
302 if (mei_write_message(dev, &mei_hdr, 315 if (mei_write_message(dev, &mei_hdr,
303 (unsigned char *)(dev->iamthif_msg_buf), 316 (unsigned char *)dev->iamthif_msg_buf))
304 mei_hdr.length))
305 return -ENODEV; 317 return -ENODEV;
306 318
307 if (mei_hdr.msg_complete) { 319 if (mei_hdr.msg_complete) {
308 if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl)) 320 if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl))
309 return -ENODEV; 321 return -ENODEV;
310 dev->iamthif_flow_control_pending = true; 322 dev->iamthif_flow_control_pending = true;
311 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; 323 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
312 dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n"); 324 dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n");
313 dev->iamthif_current_cb = cb; 325 dev->iamthif_current_cb = cb;
314 dev->iamthif_file_object = cb->file_object; 326 dev->iamthif_file_object = cb->file_object;
315 list_add_tail(&cb->list, &dev->write_waiting_list.list); 327 list_add_tail(&cb->list, &dev->write_waiting_list.list);
316 } else { 328 } else {
317 dev_dbg(&dev->pdev->dev, "message does not complete, so add amthi cb to write list.\n"); 329 dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n");
318 list_add_tail(&cb->list, &dev->write_list.list); 330 list_add_tail(&cb->list, &dev->write_list.list);
319 } 331 }
320 } else { 332 } else {
321 if (!(dev->mei_host_buffer_is_empty)) 333 if (!dev->hbuf_is_ready)
322 dev_dbg(&dev->pdev->dev, "host buffer is not empty"); 334 dev_dbg(&dev->pdev->dev, "host buffer is not empty");
323 335
324 dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n"); 336 dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
@@ -383,7 +395,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
383 dev->iamthif_timer = 0; 395 dev->iamthif_timer = 0;
384 dev->iamthif_file_object = NULL; 396 dev->iamthif_file_object = NULL;
385 397
386 dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n"); 398 dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n");
387 399
388 list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) { 400 list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) {
389 list_del(&pos->list); 401 list_del(&pos->list);
@@ -392,7 +404,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
392 status = mei_amthif_send_cmd(dev, pos); 404 status = mei_amthif_send_cmd(dev, pos);
393 if (status) { 405 if (status) {
394 dev_dbg(&dev->pdev->dev, 406 dev_dbg(&dev->pdev->dev,
395 "amthi write failed status = %d\n", 407 "amthif write failed status = %d\n",
396 status); 408 status);
397 return; 409 return;
398 } 410 }
@@ -412,7 +424,7 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
412 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && 424 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
413 dev->iamthif_file_object == file) { 425 dev->iamthif_file_object == file) {
414 mask |= (POLLIN | POLLRDNORM); 426 mask |= (POLLIN | POLLRDNORM);
415 dev_dbg(&dev->pdev->dev, "run next amthi cb\n"); 427 dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
416 mei_amthif_run_next_cmd(dev); 428 mei_amthif_run_next_cmd(dev);
417 } 429 }
418 return mask; 430 return mask;
@@ -434,54 +446,51 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
434int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots, 446int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
435 struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list) 447 struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
436{ 448{
437 struct mei_msg_hdr *mei_hdr; 449 struct mei_msg_hdr mei_hdr;
438 struct mei_cl *cl = cb->cl; 450 struct mei_cl *cl = cb->cl;
439 size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index; 451 size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
440 size_t msg_slots = mei_data2slots(len); 452 size_t msg_slots = mei_data2slots(len);
441 453
442 mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0]; 454 mei_hdr.host_addr = cl->host_client_id;
443 mei_hdr->host_addr = cl->host_client_id; 455 mei_hdr.me_addr = cl->me_client_id;
444 mei_hdr->me_addr = cl->me_client_id; 456 mei_hdr.reserved = 0;
445 mei_hdr->reserved = 0;
446 457
447 if (*slots >= msg_slots) { 458 if (*slots >= msg_slots) {
448 mei_hdr->length = len; 459 mei_hdr.length = len;
449 mei_hdr->msg_complete = 1; 460 mei_hdr.msg_complete = 1;
450 /* Split the message only if we can write the whole host buffer */ 461 /* Split the message only if we can write the whole host buffer */
451 } else if (*slots == dev->hbuf_depth) { 462 } else if (*slots == dev->hbuf_depth) {
452 msg_slots = *slots; 463 msg_slots = *slots;
453 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 464 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
454 mei_hdr->length = len; 465 mei_hdr.length = len;
455 mei_hdr->msg_complete = 0; 466 mei_hdr.msg_complete = 0;
456 } else { 467 } else {
457 /* wait for next time the host buffer is empty */ 468 /* wait for next time the host buffer is empty */
458 return 0; 469 return 0;
459 } 470 }
460 471
461 dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n", 472 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
462 mei_hdr->length, mei_hdr->msg_complete);
463 473
464 *slots -= msg_slots; 474 *slots -= msg_slots;
465 if (mei_write_message(dev, mei_hdr, 475 if (mei_write_message(dev, &mei_hdr,
466 dev->iamthif_msg_buf + dev->iamthif_msg_buf_index, 476 dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) {
467 mei_hdr->length)) {
468 dev->iamthif_state = MEI_IAMTHIF_IDLE; 477 dev->iamthif_state = MEI_IAMTHIF_IDLE;
469 cl->status = -ENODEV; 478 cl->status = -ENODEV;
470 list_del(&cb->list); 479 list_del(&cb->list);
471 return -ENODEV; 480 return -ENODEV;
472 } 481 }
473 482
474 if (mei_flow_ctrl_reduce(dev, cl)) 483 if (mei_cl_flow_ctrl_reduce(cl))
475 return -ENODEV; 484 return -ENODEV;
476 485
477 dev->iamthif_msg_buf_index += mei_hdr->length; 486 dev->iamthif_msg_buf_index += mei_hdr.length;
478 cl->status = 0; 487 cl->status = 0;
479 488
480 if (mei_hdr->msg_complete) { 489 if (mei_hdr.msg_complete) {
481 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; 490 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
482 dev->iamthif_flow_control_pending = true; 491 dev->iamthif_flow_control_pending = true;
483 492
484 /* save iamthif cb sent to amthi client */ 493 /* save iamthif cb sent to amthif client */
485 cb->buf_idx = dev->iamthif_msg_buf_index; 494 cb->buf_idx = dev->iamthif_msg_buf_index;
486 dev->iamthif_current_cb = cb; 495 dev->iamthif_current_cb = cb;
487 496
@@ -494,11 +503,11 @@ int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
494 503
495/** 504/**
496 * mei_amthif_irq_read_message - read routine after ISR to 505 * mei_amthif_irq_read_message - read routine after ISR to
497 * handle the read amthi message 506 * handle the read amthif message
498 * 507 *
499 * @complete_list: An instance of our list structure 508 * @complete_list: An instance of our list structure
500 * @dev: the device structure 509 * @dev: the device structure
501 * @mei_hdr: header of amthi message 510 * @mei_hdr: header of amthif message
502 * 511 *
503 * returns 0 on success, <0 on failure. 512 * returns 0 on success, <0 on failure.
504 */ 513 */
@@ -522,10 +531,10 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
522 return 0; 531 return 0;
523 532
524 dev_dbg(&dev->pdev->dev, 533 dev_dbg(&dev->pdev->dev,
525 "amthi_message_buffer_index =%d\n", 534 "amthif_message_buffer_index =%d\n",
526 mei_hdr->length); 535 mei_hdr->length);
527 536
528 dev_dbg(&dev->pdev->dev, "completed amthi read.\n "); 537 dev_dbg(&dev->pdev->dev, "completed amthif read.\n ");
529 if (!dev->iamthif_current_cb) 538 if (!dev->iamthif_current_cb)
530 return -ENODEV; 539 return -ENODEV;
531 540
@@ -540,8 +549,8 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
540 cb->read_time = jiffies; 549 cb->read_time = jiffies;
541 if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) { 550 if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) {
542 /* found the iamthif cb */ 551 /* found the iamthif cb */
543 dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n "); 552 dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n ");
544 dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n "); 553 dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n ");
545 list_add_tail(&cb->list, &complete_list->list); 554 list_add_tail(&cb->list, &complete_list->list);
546 } 555 }
547 return 0; 556 return 0;
@@ -563,7 +572,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
563 return -EMSGSIZE; 572 return -EMSGSIZE;
564 } 573 }
565 *slots -= mei_data2slots(sizeof(struct hbm_flow_control)); 574 *slots -= mei_data2slots(sizeof(struct hbm_flow_control));
566 if (mei_send_flow_control(dev, &dev->iamthif_cl)) { 575 if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
567 dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n"); 576 dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
568 return -EIO; 577 return -EIO;
569 } 578 }
@@ -574,7 +583,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
574 dev->iamthif_msg_buf_index = 0; 583 dev->iamthif_msg_buf_index = 0;
575 dev->iamthif_msg_buf_size = 0; 584 dev->iamthif_msg_buf_size = 0;
576 dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER; 585 dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
577 dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev); 586 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
578 return 0; 587 return 0;
579} 588}
580 589
@@ -593,7 +602,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
593 dev->iamthif_msg_buf, 602 dev->iamthif_msg_buf,
594 dev->iamthif_msg_buf_index); 603 dev->iamthif_msg_buf_index);
595 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); 604 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
596 dev_dbg(&dev->pdev->dev, "amthi read completed\n"); 605 dev_dbg(&dev->pdev->dev, "amthif read completed\n");
597 dev->iamthif_timer = jiffies; 606 dev->iamthif_timer = jiffies;
598 dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n", 607 dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
599 dev->iamthif_timer); 608 dev->iamthif_timer);
@@ -601,7 +610,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
601 mei_amthif_run_next_cmd(dev); 610 mei_amthif_run_next_cmd(dev);
602 } 611 }
603 612
604 dev_dbg(&dev->pdev->dev, "completing amthi call back.\n"); 613 dev_dbg(&dev->pdev->dev, "completing amthif call back.\n");
605 wake_up_interruptible(&dev->iamthif_cl.wait); 614 wake_up_interruptible(&dev->iamthif_cl.wait);
606} 615}
607 616
@@ -635,7 +644,8 @@ static bool mei_clear_list(struct mei_device *dev,
635 if (dev->iamthif_current_cb == cb_pos) { 644 if (dev->iamthif_current_cb == cb_pos) {
636 dev->iamthif_current_cb = NULL; 645 dev->iamthif_current_cb = NULL;
637 /* send flow control to iamthif client */ 646 /* send flow control to iamthif client */
638 mei_send_flow_control(dev, &dev->iamthif_cl); 647 mei_hbm_cl_flow_control_req(dev,
648 &dev->iamthif_cl);
639 } 649 }
640 /* free all allocated buffers */ 650 /* free all allocated buffers */
641 mei_io_cb_free(cb_pos); 651 mei_io_cb_free(cb_pos);
@@ -706,11 +716,11 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
706 if (dev->iamthif_file_object == file && 716 if (dev->iamthif_file_object == file &&
707 dev->iamthif_state != MEI_IAMTHIF_IDLE) { 717 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
708 718
709 dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n", 719 dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n",
710 dev->iamthif_state); 720 dev->iamthif_state);
711 dev->iamthif_canceled = true; 721 dev->iamthif_canceled = true;
712 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) { 722 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
713 dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n"); 723 dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n");
714 mei_amthif_run_next_cmd(dev); 724 mei_amthif_run_next_cmd(dev);
715 } 725 }
716 } 726 }
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
new file mode 100644
index 000000000000..1569afe935de
--- /dev/null
+++ b/drivers/misc/mei/client.c
@@ -0,0 +1,729 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
18#include <linux/sched.h>
19#include <linux/wait.h>
20#include <linux/delay.h>
21
22#include <linux/mei.h>
23
24#include "mei_dev.h"
25#include "hbm.h"
26#include "client.h"
27
28/**
29 * mei_me_cl_by_uuid - locate index of me client
30 *
31 * @dev: mei device
32 * returns me client index or -ENOENT if not found
33 */
34int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
35{
36 int i, res = -ENOENT;
37
38 for (i = 0; i < dev->me_clients_num; ++i)
39 if (uuid_le_cmp(*uuid,
40 dev->me_clients[i].props.protocol_name) == 0) {
41 res = i;
42 break;
43 }
44
45 return res;
46}
47
48
49/**
50 * mei_me_cl_by_id return index to me_clients for client_id
51 *
52 * @dev: the device structure
53 * @client_id: me client id
54 *
55 * Locking: called under "dev->device_lock" lock
56 *
57 * returns index on success, -ENOENT on failure.
58 */
59
60int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
61{
62 int i;
63 for (i = 0; i < dev->me_clients_num; i++)
64 if (dev->me_clients[i].client_id == client_id)
65 break;
66 if (WARN_ON(dev->me_clients[i].client_id != client_id))
67 return -ENOENT;
68
69 if (i == dev->me_clients_num)
70 return -ENOENT;
71
72 return i;
73}
74
75
76/**
77 * mei_io_list_flush - removes list entry belonging to cl.
78 *
79 * @list: An instance of our list structure
80 * @cl: host client
81 */
82void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
83{
84 struct mei_cl_cb *cb;
85 struct mei_cl_cb *next;
86
87 list_for_each_entry_safe(cb, next, &list->list, list) {
88 if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
89 list_del(&cb->list);
90 }
91}
92
93/**
94 * mei_io_cb_free - free mei_cb_private related memory
95 *
96 * @cb: mei callback struct
97 */
98void mei_io_cb_free(struct mei_cl_cb *cb)
99{
100 if (cb == NULL)
101 return;
102
103 kfree(cb->request_buffer.data);
104 kfree(cb->response_buffer.data);
105 kfree(cb);
106}
107
108/**
109 * mei_io_cb_init - allocate and initialize io callback
110 *
111 * @cl - mei client
112 * @file: pointer to file structure
113 *
114 * returns mei_cl_cb pointer or NULL;
115 */
116struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
117{
118 struct mei_cl_cb *cb;
119
120 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
121 if (!cb)
122 return NULL;
123
124 mei_io_list_init(cb);
125
126 cb->file_object = fp;
127 cb->cl = cl;
128 cb->buf_idx = 0;
129 return cb;
130}
131
132/**
133 * mei_io_cb_alloc_req_buf - allocate request buffer
134 *
135 * @cb - io callback structure
136 * @size: size of the buffer
137 *
138 * returns 0 on success
139 * -EINVAL if cb is NULL
140 * -ENOMEM if allocation failed
141 */
142int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
143{
144 if (!cb)
145 return -EINVAL;
146
147 if (length == 0)
148 return 0;
149
150 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
151 if (!cb->request_buffer.data)
152 return -ENOMEM;
153 cb->request_buffer.size = length;
154 return 0;
155}
156/**
157 * mei_io_cb_alloc_req_buf - allocate respose buffer
158 *
159 * @cb - io callback structure
160 * @size: size of the buffer
161 *
162 * returns 0 on success
163 * -EINVAL if cb is NULL
164 * -ENOMEM if allocation failed
165 */
166int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
167{
168 if (!cb)
169 return -EINVAL;
170
171 if (length == 0)
172 return 0;
173
174 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
175 if (!cb->response_buffer.data)
176 return -ENOMEM;
177 cb->response_buffer.size = length;
178 return 0;
179}
180
181
182
183/**
184 * mei_cl_flush_queues - flushes queue lists belonging to cl.
185 *
186 * @dev: the device structure
187 * @cl: host client
188 */
189int mei_cl_flush_queues(struct mei_cl *cl)
190{
191 if (WARN_ON(!cl || !cl->dev))
192 return -EINVAL;
193
194 dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
195 mei_io_list_flush(&cl->dev->read_list, cl);
196 mei_io_list_flush(&cl->dev->write_list, cl);
197 mei_io_list_flush(&cl->dev->write_waiting_list, cl);
198 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
199 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
200 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
201 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
202 return 0;
203}
204
205
206/**
207 * mei_cl_init - initializes intialize cl.
208 *
209 * @cl: host client to be initialized
210 * @dev: mei device
211 */
212void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
213{
214 memset(cl, 0, sizeof(struct mei_cl));
215 init_waitqueue_head(&cl->wait);
216 init_waitqueue_head(&cl->rx_wait);
217 init_waitqueue_head(&cl->tx_wait);
218 INIT_LIST_HEAD(&cl->link);
219 cl->reading_state = MEI_IDLE;
220 cl->writing_state = MEI_IDLE;
221 cl->dev = dev;
222}
223
224/**
225 * mei_cl_allocate - allocates cl structure and sets it up.
226 *
227 * @dev: mei device
228 * returns The allocated file or NULL on failure
229 */
230struct mei_cl *mei_cl_allocate(struct mei_device *dev)
231{
232 struct mei_cl *cl;
233
234 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
235 if (!cl)
236 return NULL;
237
238 mei_cl_init(cl, dev);
239
240 return cl;
241}
242
243/**
244 * mei_cl_find_read_cb - find this cl's callback in the read list
245 *
246 * @dev: device structure
247 * returns cb on success, NULL on error
248 */
249struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
250{
251 struct mei_device *dev = cl->dev;
252 struct mei_cl_cb *cb = NULL;
253 struct mei_cl_cb *next = NULL;
254
255 list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
256 if (mei_cl_cmp_id(cl, cb->cl))
257 return cb;
258 return NULL;
259}
260
261/** mei_cl_link: allocte host id in the host map
262 *
263 * @cl - host client
264 * @id - fixed host id or -1 for genereting one
265 * returns 0 on success
266 * -EINVAL on incorrect values
267 * -ENONET if client not found
268 */
269int mei_cl_link(struct mei_cl *cl, int id)
270{
271 struct mei_device *dev;
272
273 if (WARN_ON(!cl || !cl->dev))
274 return -EINVAL;
275
276 dev = cl->dev;
277
278 /* If Id is not asigned get one*/
279 if (id == MEI_HOST_CLIENT_ID_ANY)
280 id = find_first_zero_bit(dev->host_clients_map,
281 MEI_CLIENTS_MAX);
282
283 if (id >= MEI_CLIENTS_MAX) {
284 dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
285 return -ENOENT;
286 }
287
288 dev->open_handle_count++;
289
290 cl->host_client_id = id;
291 list_add_tail(&cl->link, &dev->file_list);
292
293 set_bit(id, dev->host_clients_map);
294
295 cl->state = MEI_FILE_INITIALIZING;
296
297 dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
298 return 0;
299}
300
301/**
302 * mei_cl_unlink - remove me_cl from the list
303 *
304 * @dev: the device structure
305 */
306int mei_cl_unlink(struct mei_cl *cl)
307{
308 struct mei_device *dev;
309 struct mei_cl *pos, *next;
310
311 /* don't shout on error exit path */
312 if (!cl)
313 return 0;
314
315 /* wd and amthif might not be initialized */
316 if (!cl->dev)
317 return 0;
318
319 dev = cl->dev;
320
321 list_for_each_entry_safe(pos, next, &dev->file_list, link) {
322 if (cl->host_client_id == pos->host_client_id) {
323 dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
324 pos->host_client_id, pos->me_client_id);
325 list_del_init(&pos->link);
326 break;
327 }
328 }
329 return 0;
330}
331
332
333void mei_host_client_init(struct work_struct *work)
334{
335 struct mei_device *dev = container_of(work,
336 struct mei_device, init_work);
337 struct mei_client_properties *client_props;
338 int i;
339
340 mutex_lock(&dev->device_lock);
341
342 bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
343 dev->open_handle_count = 0;
344
345 /*
346 * Reserving the first three client IDs
347 * 0: Reserved for MEI Bus Message communications
348 * 1: Reserved for Watchdog
349 * 2: Reserved for AMTHI
350 */
351 bitmap_set(dev->host_clients_map, 0, 3);
352
353 for (i = 0; i < dev->me_clients_num; i++) {
354 client_props = &dev->me_clients[i].props;
355
356 if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
357 mei_amthif_host_init(dev);
358 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
359 mei_wd_host_init(dev);
360 }
361
362 dev->dev_state = MEI_DEV_ENABLED;
363
364 mutex_unlock(&dev->device_lock);
365}
366
367
368/**
369 * mei_cl_disconnect - disconnect host clinet form the me one
370 *
371 * @cl: host client
372 *
373 * Locking: called under "dev->device_lock" lock
374 *
375 * returns 0 on success, <0 on failure.
376 */
377int mei_cl_disconnect(struct mei_cl *cl)
378{
379 struct mei_device *dev;
380 struct mei_cl_cb *cb;
381 int rets, err;
382
383 if (WARN_ON(!cl || !cl->dev))
384 return -ENODEV;
385
386 dev = cl->dev;
387
388 if (cl->state != MEI_FILE_DISCONNECTING)
389 return 0;
390
391 cb = mei_io_cb_init(cl, NULL);
392 if (!cb)
393 return -ENOMEM;
394
395 cb->fop_type = MEI_FOP_CLOSE;
396 if (dev->hbuf_is_ready) {
397 dev->hbuf_is_ready = false;
398 if (mei_hbm_cl_disconnect_req(dev, cl)) {
399 rets = -ENODEV;
400 dev_err(&dev->pdev->dev, "failed to disconnect.\n");
401 goto free;
402 }
403 mdelay(10); /* Wait for hardware disconnection ready */
404 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
405 } else {
406 dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
407 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
408
409 }
410 mutex_unlock(&dev->device_lock);
411
412 err = wait_event_timeout(dev->wait_recvd_msg,
413 MEI_FILE_DISCONNECTED == cl->state,
414 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
415
416 mutex_lock(&dev->device_lock);
417 if (MEI_FILE_DISCONNECTED == cl->state) {
418 rets = 0;
419 dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
420 } else {
421 rets = -ENODEV;
422 if (MEI_FILE_DISCONNECTED != cl->state)
423 dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
424
425 if (err)
426 dev_dbg(&dev->pdev->dev,
427 "wait failed disconnect err=%08x\n",
428 err);
429
430 dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
431 }
432
433 mei_io_list_flush(&dev->ctrl_rd_list, cl);
434 mei_io_list_flush(&dev->ctrl_wr_list, cl);
435free:
436 mei_io_cb_free(cb);
437 return rets;
438}
439
440
441/**
442 * mei_cl_is_other_connecting - checks if other
443 * client with the same me client id is connecting
444 *
445 * @cl: private data of the file object
446 *
447 * returns ture if other client is connected, 0 - otherwise.
448 */
449bool mei_cl_is_other_connecting(struct mei_cl *cl)
450{
451 struct mei_device *dev;
452 struct mei_cl *pos;
453 struct mei_cl *next;
454
455 if (WARN_ON(!cl || !cl->dev))
456 return false;
457
458 dev = cl->dev;
459
460 list_for_each_entry_safe(pos, next, &dev->file_list, link) {
461 if ((pos->state == MEI_FILE_CONNECTING) &&
462 (pos != cl) && cl->me_client_id == pos->me_client_id)
463 return true;
464
465 }
466
467 return false;
468}
469
470/**
471 * mei_cl_connect - connect host clinet to the me one
472 *
473 * @cl: host client
474 *
475 * Locking: called under "dev->device_lock" lock
476 *
477 * returns 0 on success, <0 on failure.
478 */
479int mei_cl_connect(struct mei_cl *cl, struct file *file)
480{
481 struct mei_device *dev;
482 struct mei_cl_cb *cb;
483 long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
484 int rets;
485
486 if (WARN_ON(!cl || !cl->dev))
487 return -ENODEV;
488
489 dev = cl->dev;
490
491 cb = mei_io_cb_init(cl, file);
492 if (!cb) {
493 rets = -ENOMEM;
494 goto out;
495 }
496
497 cb->fop_type = MEI_FOP_IOCTL;
498
499 if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
500 dev->hbuf_is_ready = false;
501
502 if (mei_hbm_cl_connect_req(dev, cl)) {
503 rets = -ENODEV;
504 goto out;
505 }
506 cl->timer_count = MEI_CONNECT_TIMEOUT;
507 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
508 } else {
509 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
510 }
511
512 mutex_unlock(&dev->device_lock);
513 rets = wait_event_timeout(dev->wait_recvd_msg,
514 (cl->state == MEI_FILE_CONNECTED ||
515 cl->state == MEI_FILE_DISCONNECTED),
516 timeout * HZ);
517 mutex_lock(&dev->device_lock);
518
519 if (cl->state != MEI_FILE_CONNECTED) {
520 rets = -EFAULT;
521
522 mei_io_list_flush(&dev->ctrl_rd_list, cl);
523 mei_io_list_flush(&dev->ctrl_wr_list, cl);
524 goto out;
525 }
526
527 rets = cl->status;
528
529out:
530 mei_io_cb_free(cb);
531 return rets;
532}
533
534/**
535 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
536 *
537 * @dev: the device structure
538 * @cl: private data of the file object
539 *
540 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
541 * -ENOENT if mei_cl is not present
542 * -EINVAL if single_recv_buf == 0
543 */
544int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
545{
546 struct mei_device *dev;
547 int i;
548
549 if (WARN_ON(!cl || !cl->dev))
550 return -EINVAL;
551
552 dev = cl->dev;
553
554 if (!dev->me_clients_num)
555 return 0;
556
557 if (cl->mei_flow_ctrl_creds > 0)
558 return 1;
559
560 for (i = 0; i < dev->me_clients_num; i++) {
561 struct mei_me_client *me_cl = &dev->me_clients[i];
562 if (me_cl->client_id == cl->me_client_id) {
563 if (me_cl->mei_flow_ctrl_creds) {
564 if (WARN_ON(me_cl->props.single_recv_buf == 0))
565 return -EINVAL;
566 return 1;
567 } else {
568 return 0;
569 }
570 }
571 }
572 return -ENOENT;
573}
574
575/**
576 * mei_cl_flow_ctrl_reduce - reduces flow_control.
577 *
578 * @dev: the device structure
579 * @cl: private data of the file object
580 * @returns
581 * 0 on success
582 * -ENOENT when me client is not found
583 * -EINVAL when ctrl credits are <= 0
584 */
585int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
586{
587 struct mei_device *dev;
588 int i;
589
590 if (WARN_ON(!cl || !cl->dev))
591 return -EINVAL;
592
593 dev = cl->dev;
594
595 if (!dev->me_clients_num)
596 return -ENOENT;
597
598 for (i = 0; i < dev->me_clients_num; i++) {
599 struct mei_me_client *me_cl = &dev->me_clients[i];
600 if (me_cl->client_id == cl->me_client_id) {
601 if (me_cl->props.single_recv_buf != 0) {
602 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
603 return -EINVAL;
604 dev->me_clients[i].mei_flow_ctrl_creds--;
605 } else {
606 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
607 return -EINVAL;
608 cl->mei_flow_ctrl_creds--;
609 }
610 return 0;
611 }
612 }
613 return -ENOENT;
614}
615
616/**
617 * mei_cl_start_read - the start read client message function.
618 *
619 * @cl: host client
620 *
621 * returns 0 on success, <0 on failure.
622 */
623int mei_cl_read_start(struct mei_cl *cl)
624{
625 struct mei_device *dev;
626 struct mei_cl_cb *cb;
627 int rets;
628 int i;
629
630 if (WARN_ON(!cl || !cl->dev))
631 return -ENODEV;
632
633 dev = cl->dev;
634
635 if (cl->state != MEI_FILE_CONNECTED)
636 return -ENODEV;
637
638 if (dev->dev_state != MEI_DEV_ENABLED)
639 return -ENODEV;
640
641 if (cl->read_cb) {
642 dev_dbg(&dev->pdev->dev, "read is pending.\n");
643 return -EBUSY;
644 }
645 i = mei_me_cl_by_id(dev, cl->me_client_id);
646 if (i < 0) {
647 dev_err(&dev->pdev->dev, "no such me client %d\n",
648 cl->me_client_id);
649 return -ENODEV;
650 }
651
652 cb = mei_io_cb_init(cl, NULL);
653 if (!cb)
654 return -ENOMEM;
655
656 rets = mei_io_cb_alloc_resp_buf(cb,
657 dev->me_clients[i].props.max_msg_length);
658 if (rets)
659 goto err;
660
661 cb->fop_type = MEI_FOP_READ;
662 cl->read_cb = cb;
663 if (dev->hbuf_is_ready) {
664 dev->hbuf_is_ready = false;
665 if (mei_hbm_cl_flow_control_req(dev, cl)) {
666 rets = -ENODEV;
667 goto err;
668 }
669 list_add_tail(&cb->list, &dev->read_list.list);
670 } else {
671 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
672 }
673 return rets;
674err:
675 mei_io_cb_free(cb);
676 return rets;
677}
678
679/**
680 * mei_cl_all_disconnect - disconnect forcefully all connected clients
681 *
682 * @dev - mei device
683 */
684
685void mei_cl_all_disconnect(struct mei_device *dev)
686{
687 struct mei_cl *cl, *next;
688
689 list_for_each_entry_safe(cl, next, &dev->file_list, link) {
690 cl->state = MEI_FILE_DISCONNECTED;
691 cl->mei_flow_ctrl_creds = 0;
692 cl->read_cb = NULL;
693 cl->timer_count = 0;
694 }
695}
696
697
698/**
699 * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted
700 *
701 * @dev - mei device
702 */
703void mei_cl_all_read_wakeup(struct mei_device *dev)
704{
705 struct mei_cl *cl, *next;
706 list_for_each_entry_safe(cl, next, &dev->file_list, link) {
707 if (waitqueue_active(&cl->rx_wait)) {
708 dev_dbg(&dev->pdev->dev, "Waking up client!\n");
709 wake_up_interruptible(&cl->rx_wait);
710 }
711 }
712}
713
714/**
715 * mei_cl_all_write_clear - clear all pending writes
716
717 * @dev - mei device
718 */
719void mei_cl_all_write_clear(struct mei_device *dev)
720{
721 struct mei_cl_cb *cb, *next;
722
723 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
724 list_del(&cb->list);
725 mei_io_cb_free(cb);
726 }
727}
728
729
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
new file mode 100644
index 000000000000..214b2397ec3e
--- /dev/null
+++ b/drivers/misc/mei/client.h
@@ -0,0 +1,102 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#ifndef _MEI_CLIENT_H_
18#define _MEI_CLIENT_H_
19
20#include <linux/types.h>
21#include <linux/watchdog.h>
22#include <linux/poll.h>
23#include <linux/mei.h>
24
25#include "mei_dev.h"
26
27int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
28int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
29
30/*
31 * MEI IO Functions
32 */
33struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
34void mei_io_cb_free(struct mei_cl_cb *priv_cb);
35int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
36int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
37
38
39/**
40 * mei_io_list_init - Sets up a queue list.
41 *
42 * @list: An instance cl callback structure
43 */
44static inline void mei_io_list_init(struct mei_cl_cb *list)
45{
46 INIT_LIST_HEAD(&list->list);
47}
48void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
49
50/*
51 * MEI Host Client Functions
52 */
53
54struct mei_cl *mei_cl_allocate(struct mei_device *dev);
55void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
56
57
58int mei_cl_link(struct mei_cl *cl, int id);
59int mei_cl_unlink(struct mei_cl *cl);
60
61int mei_cl_flush_queues(struct mei_cl *cl);
62struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
63
64/**
65 * mei_cl_cmp_id - tells if file private data have same id
66 *
67 * @fe1: private data of 1. file object
68 * @fe2: private data of 2. file object
69 *
70 * returns true - if ids are the same and not NULL
71 */
72static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
73 const struct mei_cl *cl2)
74{
75 return cl1 && cl2 &&
76 (cl1->host_client_id == cl2->host_client_id) &&
77 (cl1->me_client_id == cl2->me_client_id);
78}
79
80
81int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
82
83int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
84/*
85 * MEI input output function prototype
86 */
87bool mei_cl_is_other_connecting(struct mei_cl *cl);
88int mei_cl_disconnect(struct mei_cl *cl);
89
90int mei_cl_read_start(struct mei_cl *cl);
91
92int mei_cl_connect(struct mei_cl *cl, struct file *file);
93
94void mei_host_client_init(struct work_struct *work);
95
96
97void mei_cl_all_disconnect(struct mei_device *dev);
98void mei_cl_all_read_wakeup(struct mei_device *dev);
99void mei_cl_all_write_clear(struct mei_device *dev);
100
101
102#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
new file mode 100644
index 000000000000..fb9e63ba3bb1
--- /dev/null
+++ b/drivers/misc/mei/hbm.c
@@ -0,0 +1,669 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
18#include <linux/sched.h>
19#include <linux/wait.h>
20#include <linux/mei.h>
21
22#include "mei_dev.h"
23#include "hbm.h"
24#include "hw-me.h"
25
26/**
27 * mei_hbm_me_cl_allocate - allocates storage for me clients
28 *
29 * @dev: the device structure
30 *
31 * returns none.
32 */
33static void mei_hbm_me_cl_allocate(struct mei_device *dev)
34{
35 struct mei_me_client *clients;
36 int b;
37
38 /* count how many ME clients we have */
39 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
40 dev->me_clients_num++;
41
42 if (dev->me_clients_num <= 0)
43 return;
44
45 kfree(dev->me_clients);
46 dev->me_clients = NULL;
47
48 dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
49 dev->me_clients_num * sizeof(struct mei_me_client));
50 /* allocate storage for ME clients representation */
51 clients = kcalloc(dev->me_clients_num,
52 sizeof(struct mei_me_client), GFP_KERNEL);
53 if (!clients) {
54 dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
55 dev->dev_state = MEI_DEV_RESETING;
56 mei_reset(dev, 1);
57 return;
58 }
59 dev->me_clients = clients;
60 return;
61}
62
63/**
64 * mei_hbm_cl_hdr - construct client hbm header
65 * @cl: - client
66 * @hbm_cmd: host bus message command
67 * @buf: buffer for cl header
68 * @len: buffer length
69 */
70static inline
71void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
72{
73 struct mei_hbm_cl_cmd *cmd = buf;
74
75 memset(cmd, 0, len);
76
77 cmd->hbm_cmd = hbm_cmd;
78 cmd->host_addr = cl->host_client_id;
79 cmd->me_addr = cl->me_client_id;
80}
81
82/**
83 * same_disconn_addr - tells if they have the same address
84 *
85 * @file: private data of the file object.
86 * @disconn: disconnection request.
87 *
88 * returns true if addres are same
89 */
90static inline
91bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
92{
93 struct mei_hbm_cl_cmd *cmd = buf;
94 return cl->host_client_id == cmd->host_addr &&
95 cl->me_client_id == cmd->me_addr;
96}
97
98
99/**
100 * is_treat_specially_client - checks if the message belongs
101 * to the file private data.
102 *
103 * @cl: private data of the file object
104 * @rs: connect response bus message
105 *
106 */
107static bool is_treat_specially_client(struct mei_cl *cl,
108 struct hbm_client_connect_response *rs)
109{
110 if (mei_hbm_cl_addr_equal(cl, rs)) {
111 if (!rs->status) {
112 cl->state = MEI_FILE_CONNECTED;
113 cl->status = 0;
114
115 } else {
116 cl->state = MEI_FILE_DISCONNECTED;
117 cl->status = -ENODEV;
118 }
119 cl->timer_count = 0;
120
121 return true;
122 }
123 return false;
124}
125
126/**
127 * mei_hbm_start_req - sends start request message.
128 *
129 * @dev: the device structure
130 */
131void mei_hbm_start_req(struct mei_device *dev)
132{
133 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
134 struct hbm_host_version_request *start_req;
135 const size_t len = sizeof(struct hbm_host_version_request);
136
137 mei_hbm_hdr(mei_hdr, len);
138
139 /* host start message */
140 start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
141 memset(start_req, 0, len);
142 start_req->hbm_cmd = HOST_START_REQ_CMD;
143 start_req->host_version.major_version = HBM_MAJOR_VERSION;
144 start_req->host_version.minor_version = HBM_MINOR_VERSION;
145
146 dev->recvd_msg = false;
147 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
148 dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
149 dev->dev_state = MEI_DEV_RESETING;
150 mei_reset(dev, 1);
151 }
152 dev->init_clients_state = MEI_START_MESSAGE;
153 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
154 return ;
155}
156
157/**
158 * mei_hbm_enum_clients_req - sends enumeration client request message.
159 *
160 * @dev: the device structure
161 *
162 * returns none.
163 */
164static void mei_hbm_enum_clients_req(struct mei_device *dev)
165{
166 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
167 struct hbm_host_enum_request *enum_req;
168 const size_t len = sizeof(struct hbm_host_enum_request);
169 /* enumerate clients */
170 mei_hbm_hdr(mei_hdr, len);
171
172 enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
173 memset(enum_req, 0, len);
174 enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
175
176 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
177 dev->dev_state = MEI_DEV_RESETING;
178 dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
179 mei_reset(dev, 1);
180 }
181 dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
182 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
183 return;
184}
185
186/**
187 * mei_hbm_prop_requsest - request property for a single client
188 *
189 * @dev: the device structure
190 *
191 * returns none.
192 */
193
194static int mei_hbm_prop_req(struct mei_device *dev)
195{
196
197 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
198 struct hbm_props_request *prop_req;
199 const size_t len = sizeof(struct hbm_props_request);
200 unsigned long next_client_index;
201 u8 client_num;
202
203
204 client_num = dev->me_client_presentation_num;
205
206 next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
207 dev->me_client_index);
208
209 /* We got all client properties */
210 if (next_client_index == MEI_CLIENTS_MAX) {
211 schedule_work(&dev->init_work);
212
213 return 0;
214 }
215
216 dev->me_clients[client_num].client_id = next_client_index;
217 dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
218
219 mei_hbm_hdr(mei_hdr, len);
220 prop_req = (struct hbm_props_request *)dev->wr_msg.data;
221
222 memset(prop_req, 0, sizeof(struct hbm_props_request));
223
224
225 prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
226 prop_req->address = next_client_index;
227
228 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
229 dev->dev_state = MEI_DEV_RESETING;
230 dev_err(&dev->pdev->dev, "Properties request command failed\n");
231 mei_reset(dev, 1);
232
233 return -EIO;
234 }
235
236 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
237 dev->me_client_index = next_client_index;
238
239 return 0;
240}
241
242/**
243 * mei_hbm_stop_req_prepare - perpare stop request message
244 *
245 * @dev - mei device
246 * @mei_hdr - mei message header
247 * @data - hbm message body buffer
248 */
249static void mei_hbm_stop_req_prepare(struct mei_device *dev,
250 struct mei_msg_hdr *mei_hdr, unsigned char *data)
251{
252 struct hbm_host_stop_request *req =
253 (struct hbm_host_stop_request *)data;
254 const size_t len = sizeof(struct hbm_host_stop_request);
255
256 mei_hbm_hdr(mei_hdr, len);
257
258 memset(req, 0, len);
259 req->hbm_cmd = HOST_STOP_REQ_CMD;
260 req->reason = DRIVER_STOP_REQUEST;
261}
262
263/**
264 * mei_hbm_cl_flow_control_req - sends flow control requst.
265 *
266 * @dev: the device structure
267 * @cl: client info
268 *
269 * This function returns -EIO on write failure
270 */
271int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
272{
273 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
274 const size_t len = sizeof(struct hbm_flow_control);
275
276 mei_hbm_hdr(mei_hdr, len);
277 mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
278
279 dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
280 cl->host_client_id, cl->me_client_id);
281
282 return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
283}
284
285/**
286 * add_single_flow_creds - adds single buffer credentials.
287 *
288 * @file: private data ot the file object.
289 * @flow: flow control.
290 */
291static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
292 struct hbm_flow_control *flow)
293{
294 struct mei_me_client *client;
295 int i;
296
297 for (i = 0; i < dev->me_clients_num; i++) {
298 client = &dev->me_clients[i];
299 if (client && flow->me_addr == client->client_id) {
300 if (client->props.single_recv_buf) {
301 client->mei_flow_ctrl_creds++;
302 dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
303 flow->me_addr);
304 dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
305 client->mei_flow_ctrl_creds);
306 } else {
307 BUG(); /* error in flow control */
308 }
309 }
310 }
311}
312
313/**
314 * mei_hbm_cl_flow_control_res - flow control response from me
315 *
316 * @dev: the device structure
317 * @flow_control: flow control response bus message
318 */
319static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
320 struct hbm_flow_control *flow_control)
321{
322 struct mei_cl *cl = NULL;
323 struct mei_cl *next = NULL;
324
325 if (!flow_control->host_addr) {
326 /* single receive buffer */
327 mei_hbm_add_single_flow_creds(dev, flow_control);
328 return;
329 }
330
331 /* normal connection */
332 list_for_each_entry_safe(cl, next, &dev->file_list, link) {
333 if (mei_hbm_cl_addr_equal(cl, flow_control)) {
334 cl->mei_flow_ctrl_creds++;
335 dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
336 flow_control->host_addr, flow_control->me_addr);
337 dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
338 cl->mei_flow_ctrl_creds);
339 break;
340 }
341 }
342}
343
344
345/**
346 * mei_hbm_cl_disconnect_req - sends disconnect message to fw.
347 *
348 * @dev: the device structure
349 * @cl: a client to disconnect from
350 *
351 * This function returns -EIO on write failure
352 */
353int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
354{
355 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
356 const size_t len = sizeof(struct hbm_client_connect_request);
357
358 mei_hbm_hdr(mei_hdr, len);
359 mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len);
360
361 return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
362}
363
364/**
365 * mei_hbm_cl_disconnect_res - disconnect response from ME
366 *
367 * @dev: the device structure
368 * @rs: disconnect response bus message
369 */
370static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
371 struct hbm_client_connect_response *rs)
372{
373 struct mei_cl *cl;
374 struct mei_cl_cb *pos = NULL, *next = NULL;
375
376 dev_dbg(&dev->pdev->dev,
377 "disconnect_response:\n"
378 "ME Client = %d\n"
379 "Host Client = %d\n"
380 "Status = %d\n",
381 rs->me_addr,
382 rs->host_addr,
383 rs->status);
384
385 list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
386 cl = pos->cl;
387
388 if (!cl) {
389 list_del(&pos->list);
390 return;
391 }
392
393 dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
394 if (mei_hbm_cl_addr_equal(cl, rs)) {
395 list_del(&pos->list);
396 if (!rs->status)
397 cl->state = MEI_FILE_DISCONNECTED;
398
399 cl->status = 0;
400 cl->timer_count = 0;
401 break;
402 }
403 }
404}
405
406/**
407 * mei_hbm_cl_connect_req - send connection request to specific me client
408 *
409 * @dev: the device structure
410 * @cl: a client to connect to
411 *
412 * returns -EIO on write failure
413 */
414int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
415{
416 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
417 const size_t len = sizeof(struct hbm_client_connect_request);
418
419 mei_hbm_hdr(mei_hdr, len);
420 mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len);
421
422 return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
423}
424
425/**
426 * mei_hbm_cl_connect_res - connect resposne from the ME
427 *
428 * @dev: the device structure
429 * @rs: connect response bus message
430 */
431static void mei_hbm_cl_connect_res(struct mei_device *dev,
432 struct hbm_client_connect_response *rs)
433{
434
435 struct mei_cl *cl;
436 struct mei_cl_cb *pos = NULL, *next = NULL;
437
438 dev_dbg(&dev->pdev->dev,
439 "connect_response:\n"
440 "ME Client = %d\n"
441 "Host Client = %d\n"
442 "Status = %d\n",
443 rs->me_addr,
444 rs->host_addr,
445 rs->status);
446
447 /* if WD or iamthif client treat specially */
448
449 if (is_treat_specially_client(&dev->wd_cl, rs)) {
450 dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
451 mei_watchdog_register(dev);
452
453 return;
454 }
455
456 if (is_treat_specially_client(&dev->iamthif_cl, rs)) {
457 dev->iamthif_state = MEI_IAMTHIF_IDLE;
458 return;
459 }
460 list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
461
462 cl = pos->cl;
463 if (!cl) {
464 list_del(&pos->list);
465 return;
466 }
467 if (pos->fop_type == MEI_FOP_IOCTL) {
468 if (is_treat_specially_client(cl, rs)) {
469 list_del(&pos->list);
470 cl->status = 0;
471 cl->timer_count = 0;
472 break;
473 }
474 }
475 }
476}
477
478
479/**
480 * mei_client_disconnect_request - disconnect request initiated by me
481 * host sends disoconnect response
482 *
483 * @dev: the device structure.
484 * @disconnect_req: disconnect request bus message from the me
485 */
486static void mei_hbm_fw_disconnect_req(struct mei_device *dev,
487 struct hbm_client_connect_request *disconnect_req)
488{
489 struct mei_cl *cl, *next;
490 const size_t len = sizeof(struct hbm_client_connect_response);
491
492 list_for_each_entry_safe(cl, next, &dev->file_list, link) {
493 if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
494 dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
495 disconnect_req->host_addr,
496 disconnect_req->me_addr);
497 cl->state = MEI_FILE_DISCONNECTED;
498 cl->timer_count = 0;
499 if (cl == &dev->wd_cl)
500 dev->wd_pending = false;
501 else if (cl == &dev->iamthif_cl)
502 dev->iamthif_timer = 0;
503
504 /* prepare disconnect response */
505 mei_hbm_hdr(&dev->wr_ext_msg.hdr, len);
506 mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD,
507 dev->wr_ext_msg.data, len);
508 break;
509 }
510 }
511}
512
513
514/**
515 * mei_hbm_dispatch - bottom half read routine after ISR to
516 * handle the read bus message cmd processing.
517 *
518 * @dev: the device structure
519 * @mei_hdr: header of bus message
520 */
521void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
522{
523 struct mei_bus_message *mei_msg;
524 struct mei_me_client *me_client;
525 struct hbm_host_version_response *version_res;
526 struct hbm_client_connect_response *connect_res;
527 struct hbm_client_connect_response *disconnect_res;
528 struct hbm_client_connect_request *disconnect_req;
529 struct hbm_flow_control *flow_control;
530 struct hbm_props_response *props_res;
531 struct hbm_host_enum_response *enum_res;
532
533 /* read the message to our buffer */
534 BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
535 mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
536 mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
537
538 switch (mei_msg->hbm_cmd) {
539 case HOST_START_RES_CMD:
540 version_res = (struct hbm_host_version_response *)mei_msg;
541 if (!version_res->host_version_supported) {
542 dev->version = version_res->me_max_version;
543 dev_dbg(&dev->pdev->dev, "version mismatch.\n");
544
545 mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
546 dev->wr_msg.data);
547 mei_write_message(dev, &dev->wr_msg.hdr,
548 dev->wr_msg.data);
549 return;
550 }
551
552 dev->version.major_version = HBM_MAJOR_VERSION;
553 dev->version.minor_version = HBM_MINOR_VERSION;
554 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
555 dev->init_clients_state == MEI_START_MESSAGE) {
556 dev->init_clients_timer = 0;
557 mei_hbm_enum_clients_req(dev);
558 } else {
559 dev->recvd_msg = false;
560 dev_dbg(&dev->pdev->dev, "reset due to received hbm: host start\n");
561 mei_reset(dev, 1);
562 return;
563 }
564
565 dev->recvd_msg = true;
566 dev_dbg(&dev->pdev->dev, "host start response message received.\n");
567 break;
568
569 case CLIENT_CONNECT_RES_CMD:
570 connect_res = (struct hbm_client_connect_response *) mei_msg;
571 mei_hbm_cl_connect_res(dev, connect_res);
572 dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
573 wake_up(&dev->wait_recvd_msg);
574 break;
575
576 case CLIENT_DISCONNECT_RES_CMD:
577 disconnect_res = (struct hbm_client_connect_response *) mei_msg;
578 mei_hbm_cl_disconnect_res(dev, disconnect_res);
579 dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
580 wake_up(&dev->wait_recvd_msg);
581 break;
582
583 case MEI_FLOW_CONTROL_CMD:
584 flow_control = (struct hbm_flow_control *) mei_msg;
585 mei_hbm_cl_flow_control_res(dev, flow_control);
586 dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
587 break;
588
589 case HOST_CLIENT_PROPERTIES_RES_CMD:
590 props_res = (struct hbm_props_response *)mei_msg;
591 me_client = &dev->me_clients[dev->me_client_presentation_num];
592
593 if (props_res->status || !dev->me_clients) {
594 dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
595 mei_reset(dev, 1);
596 return;
597 }
598
599 if (me_client->client_id != props_res->address) {
600 dev_err(&dev->pdev->dev,
601 "Host client properties reply mismatch\n");
602 mei_reset(dev, 1);
603
604 return;
605 }
606
607 if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
608 dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
609 dev_err(&dev->pdev->dev,
610 "Unexpected client properties reply\n");
611 mei_reset(dev, 1);
612
613 return;
614 }
615
616 me_client->props = props_res->client_properties;
617 dev->me_client_index++;
618 dev->me_client_presentation_num++;
619
620 /* request property for the next client */
621 mei_hbm_prop_req(dev);
622
623 break;
624
625 case HOST_ENUM_RES_CMD:
626 enum_res = (struct hbm_host_enum_response *) mei_msg;
627 memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
628 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
629 dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
630 dev->init_clients_timer = 0;
631 dev->me_client_presentation_num = 0;
632 dev->me_client_index = 0;
633 mei_hbm_me_cl_allocate(dev);
634 dev->init_clients_state =
635 MEI_CLIENT_PROPERTIES_MESSAGE;
636
637 /* first property reqeust */
638 mei_hbm_prop_req(dev);
639 } else {
640 dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
641 mei_reset(dev, 1);
642 return;
643 }
644 break;
645
646 case HOST_STOP_RES_CMD:
647 dev->dev_state = MEI_DEV_DISABLED;
648 dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
649 mei_reset(dev, 1);
650 break;
651
652 case CLIENT_DISCONNECT_REQ_CMD:
653 /* search for client */
654 disconnect_req = (struct hbm_client_connect_request *)mei_msg;
655 mei_hbm_fw_disconnect_req(dev, disconnect_req);
656 break;
657
658 case ME_STOP_REQ_CMD:
659
660 mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
661 dev->wr_ext_msg.data);
662 break;
663 default:
664 BUG();
665 break;
666
667 }
668}
669
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
new file mode 100644
index 000000000000..b552afbaf85c
--- /dev/null
+++ b/drivers/misc/mei/hbm.h
@@ -0,0 +1,39 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#ifndef _MEI_HBM_H_
18#define _MEI_HBM_H_
19
20void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
21
22static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
23{
24 hdr->host_addr = 0;
25 hdr->me_addr = 0;
26 hdr->length = length;
27 hdr->msg_complete = 1;
28 hdr->reserved = 0;
29}
30
31void mei_hbm_start_req(struct mei_device *dev);
32
33int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
34int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
35int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
36
37
38#endif /* _MEI_HBM_H_ */
39
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
new file mode 100644
index 000000000000..6a203b6e8346
--- /dev/null
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -0,0 +1,167 @@
1/******************************************************************************
2 * Intel Management Engine Interface (Intel MEI) Linux driver
3 * Intel MEI Interface Header
4 *
5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
7 *
8 * GPL LICENSE SUMMARY
9 *
10 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called LICENSE.GPL.
28 *
29 * Contact Information:
30 * Intel Corporation.
31 * linux-mei@linux.intel.com
32 * http://www.intel.com
33 *
34 * BSD LICENSE
35 *
36 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66#ifndef _MEI_HW_MEI_REGS_H_
67#define _MEI_HW_MEI_REGS_H_
68
69/*
70 * MEI device IDs
71 */
72#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
73#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
74#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
75#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
76
77#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
78#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
79
80#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
81#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
82#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
83#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
84#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
85
86#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
87#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
88#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
89#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
90#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
91
92#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
93#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
94#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
95#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
96
97#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
98#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
99#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
100#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
101
102#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
103#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
104
105#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
106#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
107
108#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
109#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
110#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
111
112#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
113#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
114/*
115 * MEI HW Section
116 */
117
118/* MEI registers */
119/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
120#define H_CB_WW 0
121/* H_CSR - Host Control Status register */
122#define H_CSR 4
123/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
124#define ME_CB_RW 8
125/* ME_CSR_HA - ME Control Status Host Access register (read only) */
126#define ME_CSR_HA 0xC
127
128
129/* register bits of H_CSR (Host Control Status register) */
130/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
131#define H_CBD 0xFF000000
132/* Host Circular Buffer Write Pointer */
133#define H_CBWP 0x00FF0000
134/* Host Circular Buffer Read Pointer */
135#define H_CBRP 0x0000FF00
136/* Host Reset */
137#define H_RST 0x00000010
138/* Host Ready */
139#define H_RDY 0x00000008
140/* Host Interrupt Generate */
141#define H_IG 0x00000004
142/* Host Interrupt Status */
143#define H_IS 0x00000002
144/* Host Interrupt Enable */
145#define H_IE 0x00000001
146
147
148/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
149/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
150access to ME_CBD */
151#define ME_CBD_HRA 0xFF000000
152/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
153#define ME_CBWP_HRA 0x00FF0000
154/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
155#define ME_CBRP_HRA 0x0000FF00
156/* ME Reset HRA - host read only access to ME_RST */
157#define ME_RST_HRA 0x00000010
158/* ME Ready HRA - host read only access to ME_RDY */
159#define ME_RDY_HRA 0x00000008
160/* ME Interrupt Generate HRA - host read only access to ME_IG */
161#define ME_IG_HRA 0x00000004
162/* ME Interrupt Status HRA - host read only access to ME_IS */
163#define ME_IS_HRA 0x00000002
164/* ME Interrupt Enable HRA - host read only access to ME_IE */
165#define ME_IE_HRA 0x00000001
166
167#endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
new file mode 100644
index 000000000000..45ea7185c003
--- /dev/null
+++ b/drivers/misc/mei/hw-me.c
@@ -0,0 +1,576 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
18
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
21
22#include "mei_dev.h"
23#include "hw-me.h"
24
25#include "hbm.h"
26
27
28/**
29 * mei_reg_read - Reads 32bit data from the mei device
30 *
31 * @dev: the device structure
32 * @offset: offset from which to read the data
33 *
34 * returns register value (u32)
35 */
36static inline u32 mei_reg_read(const struct mei_me_hw *hw,
37 unsigned long offset)
38{
39 return ioread32(hw->mem_addr + offset);
40}
41
42
43/**
44 * mei_reg_write - Writes 32bit data to the mei device
45 *
46 * @dev: the device structure
47 * @offset: offset from which to write the data
48 * @value: register value to write (u32)
49 */
50static inline void mei_reg_write(const struct mei_me_hw *hw,
51 unsigned long offset, u32 value)
52{
53 iowrite32(value, hw->mem_addr + offset);
54}
55
56/**
57 * mei_mecbrw_read - Reads 32bit data from ME circular buffer
58 * read window register
59 *
60 * @dev: the device structure
61 *
62 * returns ME_CB_RW register value (u32)
63 */
64static u32 mei_me_mecbrw_read(const struct mei_device *dev)
65{
66 return mei_reg_read(to_me_hw(dev), ME_CB_RW);
67}
68/**
69 * mei_mecsr_read - Reads 32bit data from the ME CSR
70 *
71 * @dev: the device structure
72 *
73 * returns ME_CSR_HA register value (u32)
74 */
75static inline u32 mei_mecsr_read(const struct mei_me_hw *hw)
76{
77 return mei_reg_read(hw, ME_CSR_HA);
78}
79
80/**
81 * mei_hcsr_read - Reads 32bit data from the host CSR
82 *
83 * @dev: the device structure
84 *
85 * returns H_CSR register value (u32)
86 */
87static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
88{
89 return mei_reg_read(hw, H_CSR);
90}
91
92/**
93 * mei_hcsr_set - writes H_CSR register to the mei device,
94 * and ignores the H_IS bit for it is write-one-to-zero.
95 *
96 * @dev: the device structure
97 */
98static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
99{
100 hcsr &= ~H_IS;
101 mei_reg_write(hw, H_CSR, hcsr);
102}
103
104
105/**
106 * me_hw_config - configure hw dependent settings
107 *
108 * @dev: mei device
109 */
110static void mei_me_hw_config(struct mei_device *dev)
111{
112 u32 hcsr = mei_hcsr_read(to_me_hw(dev));
113 /* Doesn't change in runtime */
114 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
115}
116/**
117 * mei_clear_interrupts - clear and stop interrupts
118 *
119 * @dev: the device structure
120 */
121static void mei_me_intr_clear(struct mei_device *dev)
122{
123 struct mei_me_hw *hw = to_me_hw(dev);
124 u32 hcsr = mei_hcsr_read(hw);
125 if ((hcsr & H_IS) == H_IS)
126 mei_reg_write(hw, H_CSR, hcsr);
127}
128/**
129 * mei_me_intr_enable - enables mei device interrupts
130 *
131 * @dev: the device structure
132 */
133static void mei_me_intr_enable(struct mei_device *dev)
134{
135 struct mei_me_hw *hw = to_me_hw(dev);
136 u32 hcsr = mei_hcsr_read(hw);
137 hcsr |= H_IE;
138 mei_hcsr_set(hw, hcsr);
139}
140
141/**
142 * mei_disable_interrupts - disables mei device interrupts
143 *
144 * @dev: the device structure
145 */
146static void mei_me_intr_disable(struct mei_device *dev)
147{
148 struct mei_me_hw *hw = to_me_hw(dev);
149 u32 hcsr = mei_hcsr_read(hw);
150 hcsr &= ~H_IE;
151 mei_hcsr_set(hw, hcsr);
152}
153
154/**
155 * mei_me_hw_reset - resets fw via mei csr register.
156 *
157 * @dev: the device structure
158 * @interrupts_enabled: if interrupt should be enabled after reset.
159 */
160static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
161{
162 struct mei_me_hw *hw = to_me_hw(dev);
163 u32 hcsr = mei_hcsr_read(hw);
164
165 dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
166
167 hcsr |= (H_RST | H_IG);
168
169 if (intr_enable)
170 hcsr |= H_IE;
171 else
172 hcsr &= ~H_IE;
173
174 mei_hcsr_set(hw, hcsr);
175
176 hcsr = mei_hcsr_read(hw) | H_IG;
177 hcsr &= ~H_RST;
178
179 mei_hcsr_set(hw, hcsr);
180
181 hcsr = mei_hcsr_read(hw);
182
183 dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
184}
185
186/**
187 * mei_me_host_set_ready - enable device
188 *
189 * @dev - mei device
190 * returns bool
191 */
192
193static void mei_me_host_set_ready(struct mei_device *dev)
194{
195 struct mei_me_hw *hw = to_me_hw(dev);
196 hw->host_hw_state |= H_IE | H_IG | H_RDY;
197 mei_hcsr_set(hw, hw->host_hw_state);
198}
199/**
200 * mei_me_host_is_ready - check whether the host has turned ready
201 *
202 * @dev - mei device
203 * returns bool
204 */
205static bool mei_me_host_is_ready(struct mei_device *dev)
206{
207 struct mei_me_hw *hw = to_me_hw(dev);
208 hw->host_hw_state = mei_hcsr_read(hw);
209 return (hw->host_hw_state & H_RDY) == H_RDY;
210}
211
212/**
213 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
214 *
215 * @dev - mei device
216 * returns bool
217 */
218static bool mei_me_hw_is_ready(struct mei_device *dev)
219{
220 struct mei_me_hw *hw = to_me_hw(dev);
221 hw->me_hw_state = mei_mecsr_read(hw);
222 return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
223}
224
225/**
226 * mei_hbuf_filled_slots - gets number of device filled buffer slots
227 *
228 * @dev: the device structure
229 *
230 * returns number of filled slots
231 */
232static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
233{
234 struct mei_me_hw *hw = to_me_hw(dev);
235 char read_ptr, write_ptr;
236
237 hw->host_hw_state = mei_hcsr_read(hw);
238
239 read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
240 write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
241
242 return (unsigned char) (write_ptr - read_ptr);
243}
244
245/**
246 * mei_hbuf_is_empty - checks if host buffer is empty.
247 *
248 * @dev: the device structure
249 *
250 * returns true if empty, false - otherwise.
251 */
252static bool mei_me_hbuf_is_empty(struct mei_device *dev)
253{
254 return mei_hbuf_filled_slots(dev) == 0;
255}
256
257/**
258 * mei_me_hbuf_empty_slots - counts write empty slots.
259 *
260 * @dev: the device structure
261 *
262 * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
263 */
264static int mei_me_hbuf_empty_slots(struct mei_device *dev)
265{
266 unsigned char filled_slots, empty_slots;
267
268 filled_slots = mei_hbuf_filled_slots(dev);
269 empty_slots = dev->hbuf_depth - filled_slots;
270
271 /* check for overflow */
272 if (filled_slots > dev->hbuf_depth)
273 return -EOVERFLOW;
274
275 return empty_slots;
276}
277
278static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
279{
280 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
281}
282
283
284/**
285 * mei_write_message - writes a message to mei device.
286 *
287 * @dev: the device structure
288 * @header: mei HECI header of message
289 * @buf: message payload will be written
290 *
291 * This function returns -EIO if write has failed
292 */
293static int mei_me_write_message(struct mei_device *dev,
294 struct mei_msg_hdr *header,
295 unsigned char *buf)
296{
297 struct mei_me_hw *hw = to_me_hw(dev);
298 unsigned long rem, dw_cnt;
299 unsigned long length = header->length;
300 u32 *reg_buf = (u32 *)buf;
301 u32 hcsr;
302 int i;
303 int empty_slots;
304
305 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
306
307 empty_slots = mei_hbuf_empty_slots(dev);
308 dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
309
310 dw_cnt = mei_data2slots(length);
311 if (empty_slots < 0 || dw_cnt > empty_slots)
312 return -EIO;
313
314 mei_reg_write(hw, H_CB_WW, *((u32 *) header));
315
316 for (i = 0; i < length / 4; i++)
317 mei_reg_write(hw, H_CB_WW, reg_buf[i]);
318
319 rem = length & 0x3;
320 if (rem > 0) {
321 u32 reg = 0;
322 memcpy(&reg, &buf[length - rem], rem);
323 mei_reg_write(hw, H_CB_WW, reg);
324 }
325
326 hcsr = mei_hcsr_read(hw) | H_IG;
327 mei_hcsr_set(hw, hcsr);
328 if (!mei_me_hw_is_ready(dev))
329 return -EIO;
330
331 return 0;
332}
333
334/**
335 * mei_me_count_full_read_slots - counts read full slots.
336 *
337 * @dev: the device structure
338 *
339 * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
340 */
341static int mei_me_count_full_read_slots(struct mei_device *dev)
342{
343 struct mei_me_hw *hw = to_me_hw(dev);
344 char read_ptr, write_ptr;
345 unsigned char buffer_depth, filled_slots;
346
347 hw->me_hw_state = mei_mecsr_read(hw);
348 buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
349 read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
350 write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
351 filled_slots = (unsigned char) (write_ptr - read_ptr);
352
353 /* check for overflow */
354 if (filled_slots > buffer_depth)
355 return -EOVERFLOW;
356
357 dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
358 return (int)filled_slots;
359}
360
361/**
362 * mei_me_read_slots - reads a message from mei device.
363 *
364 * @dev: the device structure
365 * @buffer: message buffer will be written
366 * @buffer_length: message size will be read
367 */
368static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
369 unsigned long buffer_length)
370{
371 struct mei_me_hw *hw = to_me_hw(dev);
372 u32 *reg_buf = (u32 *)buffer;
373 u32 hcsr;
374
375 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
376 *reg_buf++ = mei_me_mecbrw_read(dev);
377
378 if (buffer_length > 0) {
379 u32 reg = mei_me_mecbrw_read(dev);
380 memcpy(reg_buf, &reg, buffer_length);
381 }
382
383 hcsr = mei_hcsr_read(hw) | H_IG;
384 mei_hcsr_set(hw, hcsr);
385 return 0;
386}
387
388/**
389 * mei_me_irq_quick_handler - The ISR of the MEI device
390 *
391 * @irq: The irq number
392 * @dev_id: pointer to the device structure
393 *
394 * returns irqreturn_t
395 */
396
397irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
398{
399 struct mei_device *dev = (struct mei_device *) dev_id;
400 struct mei_me_hw *hw = to_me_hw(dev);
401 u32 csr_reg = mei_hcsr_read(hw);
402
403 if ((csr_reg & H_IS) != H_IS)
404 return IRQ_NONE;
405
406 /* clear H_IS bit in H_CSR */
407 mei_reg_write(hw, H_CSR, csr_reg);
408
409 return IRQ_WAKE_THREAD;
410}
411
412/**
413 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
414 * processing.
415 *
416 * @irq: The irq number
417 * @dev_id: pointer to the device structure
418 *
419 * returns irqreturn_t
420 *
421 */
422irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
423{
424 struct mei_device *dev = (struct mei_device *) dev_id;
425 struct mei_cl_cb complete_list;
426 struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
427 struct mei_cl *cl;
428 s32 slots;
429 int rets;
430 bool bus_message_received;
431
432
433 dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
434 /* initialize our complete list */
435 mutex_lock(&dev->device_lock);
436 mei_io_list_init(&complete_list);
437
438 /* Ack the interrupt here
439 * In case of MSI we don't go through the quick handler */
440 if (pci_dev_msi_enabled(dev->pdev))
441 mei_clear_interrupts(dev);
442
443 /* check if ME wants a reset */
444 if (!mei_hw_is_ready(dev) &&
445 dev->dev_state != MEI_DEV_RESETING &&
446 dev->dev_state != MEI_DEV_INITIALIZING) {
447 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
448 mei_reset(dev, 1);
449 mutex_unlock(&dev->device_lock);
450 return IRQ_HANDLED;
451 }
452
453 /* check if we need to start the dev */
454 if (!mei_host_is_ready(dev)) {
455 if (mei_hw_is_ready(dev)) {
456 dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
457
458 mei_host_set_ready(dev);
459
460 dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
461 /* link is established * start sending messages. */
462
463 dev->dev_state = MEI_DEV_INIT_CLIENTS;
464
465 mei_hbm_start_req(dev);
466 mutex_unlock(&dev->device_lock);
467 return IRQ_HANDLED;
468 } else {
469 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
470 mutex_unlock(&dev->device_lock);
471 return IRQ_HANDLED;
472 }
473 }
474 /* check slots available for reading */
475 slots = mei_count_full_read_slots(dev);
476 while (slots > 0) {
477 /* we have urgent data to send so break the read */
478 if (dev->wr_ext_msg.hdr.length)
479 break;
480 dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
481 dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
482 rets = mei_irq_read_handler(dev, &complete_list, &slots);
483 if (rets)
484 goto end;
485 }
486 rets = mei_irq_write_handler(dev, &complete_list);
487end:
488 dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
489 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
490
491 bus_message_received = false;
492 if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
493 dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
494 bus_message_received = true;
495 }
496 mutex_unlock(&dev->device_lock);
497 if (bus_message_received) {
498 dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
499 wake_up_interruptible(&dev->wait_recvd_msg);
500 bus_message_received = false;
501 }
502 if (list_empty(&complete_list.list))
503 return IRQ_HANDLED;
504
505
506 list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
507 cl = cb_pos->cl;
508 list_del(&cb_pos->list);
509 if (cl) {
510 if (cl != &dev->iamthif_cl) {
511 dev_dbg(&dev->pdev->dev, "completing call back.\n");
512 mei_irq_complete_handler(cl, cb_pos);
513 cb_pos = NULL;
514 } else if (cl == &dev->iamthif_cl) {
515 mei_amthif_complete(dev, cb_pos);
516 }
517 }
518 }
519 return IRQ_HANDLED;
520}
521static const struct mei_hw_ops mei_me_hw_ops = {
522
523 .host_set_ready = mei_me_host_set_ready,
524 .host_is_ready = mei_me_host_is_ready,
525
526 .hw_is_ready = mei_me_hw_is_ready,
527 .hw_reset = mei_me_hw_reset,
528 .hw_config = mei_me_hw_config,
529
530 .intr_clear = mei_me_intr_clear,
531 .intr_enable = mei_me_intr_enable,
532 .intr_disable = mei_me_intr_disable,
533
534 .hbuf_free_slots = mei_me_hbuf_empty_slots,
535 .hbuf_is_ready = mei_me_hbuf_is_empty,
536 .hbuf_max_len = mei_me_hbuf_max_len,
537
538 .write = mei_me_write_message,
539
540 .rdbuf_full_slots = mei_me_count_full_read_slots,
541 .read_hdr = mei_me_mecbrw_read,
542 .read = mei_me_read_slots
543};
544
545/**
546 * init_mei_device - allocates and initializes the mei device structure
547 *
548 * @pdev: The pci device structure
549 *
550 * returns The mei_device_device pointer on success, NULL on failure.
551 */
552struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
553{
554 struct mei_device *dev;
555
556 dev = kzalloc(sizeof(struct mei_device) +
557 sizeof(struct mei_me_hw), GFP_KERNEL);
558 if (!dev)
559 return NULL;
560
561 mei_device_init(dev);
562
563 INIT_LIST_HEAD(&dev->wd_cl.link);
564 INIT_LIST_HEAD(&dev->iamthif_cl.link);
565 mei_io_list_init(&dev->amthif_cmd_list);
566 mei_io_list_init(&dev->amthif_rd_complete_list);
567
568 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
569 INIT_WORK(&dev->init_work, mei_host_client_init);
570
571 dev->ops = &mei_me_hw_ops;
572
573 dev->pdev = pdev;
574 return dev;
575}
576
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
new file mode 100644
index 000000000000..8518d3eeb838
--- /dev/null
+++ b/drivers/misc/mei/hw-me.h
@@ -0,0 +1,48 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17
18
19#ifndef _MEI_INTERFACE_H_
20#define _MEI_INTERFACE_H_
21
22#include <linux/mei.h>
23#include "mei_dev.h"
24#include "client.h"
25
26struct mei_me_hw {
27 void __iomem *mem_addr;
28 /*
29 * hw states of host and fw(ME)
30 */
31 u32 host_hw_state;
32 u32 me_hw_state;
33};
34
35#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
36
37struct mei_device *mei_me_dev_init(struct pci_dev *pdev);
38
39/* get slots (dwords) from a message length + header (bytes) */
40static inline unsigned char mei_data2slots(size_t length)
41{
42 return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
43}
44
45irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
46irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
47
48#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index be8ca6b333ca..cb2f556b4252 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -31,109 +31,6 @@
31#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ 31#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
32#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ 32#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
33 33
34/*
35 * Internal Clients Number
36 */
37#define MEI_WD_HOST_CLIENT_ID 1
38#define MEI_IAMTHIF_HOST_CLIENT_ID 2
39
40/*
41 * MEI device IDs
42 */
43#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
44#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
45#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
46#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
47
48#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
49#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
50
51#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
52#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
53#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
54#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
55#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
56
57#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
58#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
59#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
60#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
61#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
62
63#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
64#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
65#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
66#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
67
68#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
69#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
70#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
71#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
72
73#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
74#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
75
76#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
77#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
78
79#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
80#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
81#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
82
83#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
84#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
85/*
86 * MEI HW Section
87 */
88
89/* MEI registers */
90/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
91#define H_CB_WW 0
92/* H_CSR - Host Control Status register */
93#define H_CSR 4
94/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
95#define ME_CB_RW 8
96/* ME_CSR_HA - ME Control Status Host Access register (read only) */
97#define ME_CSR_HA 0xC
98
99
100/* register bits of H_CSR (Host Control Status register) */
101/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
102#define H_CBD 0xFF000000
103/* Host Circular Buffer Write Pointer */
104#define H_CBWP 0x00FF0000
105/* Host Circular Buffer Read Pointer */
106#define H_CBRP 0x0000FF00
107/* Host Reset */
108#define H_RST 0x00000010
109/* Host Ready */
110#define H_RDY 0x00000008
111/* Host Interrupt Generate */
112#define H_IG 0x00000004
113/* Host Interrupt Status */
114#define H_IS 0x00000002
115/* Host Interrupt Enable */
116#define H_IE 0x00000001
117
118
119/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
120/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
121access to ME_CBD */
122#define ME_CBD_HRA 0xFF000000
123/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
124#define ME_CBWP_HRA 0x00FF0000
125/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
126#define ME_CBRP_HRA 0x0000FF00
127/* ME Reset HRA - host read only access to ME_RST */
128#define ME_RST_HRA 0x00000010
129/* ME Ready HRA - host read only access to ME_RDY */
130#define ME_RDY_HRA 0x00000008
131/* ME Interrupt Generate HRA - host read only access to ME_IG */
132#define ME_IG_HRA 0x00000004
133/* ME Interrupt Status HRA - host read only access to ME_IS */
134#define ME_IS_HRA 0x00000002
135/* ME Interrupt Enable HRA - host read only access to ME_IE */
136#define ME_IE_HRA 0x00000001
137 34
138/* 35/*
139 * MEI Version 36 * MEI Version
@@ -224,6 +121,22 @@ struct mei_bus_message {
224 u8 data[0]; 121 u8 data[0];
225} __packed; 122} __packed;
226 123
124/**
125 * struct hbm_cl_cmd - client specific host bus command
126 * CONNECT, DISCONNECT, and FlOW CONTROL
127 *
128 * @hbm_cmd - bus message command header
129 * @me_addr - address of the client in ME
130 * @host_addr - address of the client in the driver
131 * @data
132 */
133struct mei_hbm_cl_cmd {
134 u8 hbm_cmd;
135 u8 me_addr;
136 u8 host_addr;
137 u8 data;
138};
139
227struct hbm_version { 140struct hbm_version {
228 u8 minor_version; 141 u8 minor_version;
229 u8 major_version; 142 u8 major_version;
@@ -333,11 +246,5 @@ struct hbm_flow_control {
333 u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH]; 246 u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
334} __packed; 247} __packed;
335 248
336struct mei_me_client {
337 struct mei_client_properties props;
338 u8 client_id;
339 u8 mei_flow_ctrl_creds;
340} __packed;
341
342 249
343#endif 250#endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index a54cd5567ca2..6ec530168afb 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -19,11 +19,11 @@
19#include <linux/wait.h> 19#include <linux/wait.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21 21
22#include "mei_dev.h"
23#include "hw.h"
24#include "interface.h"
25#include <linux/mei.h> 22#include <linux/mei.h>
26 23
24#include "mei_dev.h"
25#include "client.h"
26
27const char *mei_dev_state_str(int state) 27const char *mei_dev_state_str(int state)
28{ 28{
29#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state 29#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
@@ -42,84 +42,20 @@ const char *mei_dev_state_str(int state)
42#undef MEI_DEV_STATE 42#undef MEI_DEV_STATE
43} 43}
44 44
45 45void mei_device_init(struct mei_device *dev)
46
47/**
48 * mei_io_list_flush - removes list entry belonging to cl.
49 *
50 * @list: An instance of our list structure
51 * @cl: private data of the file object
52 */
53void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
54{
55 struct mei_cl_cb *pos;
56 struct mei_cl_cb *next;
57
58 list_for_each_entry_safe(pos, next, &list->list, list) {
59 if (pos->cl) {
60 if (mei_cl_cmp_id(cl, pos->cl))
61 list_del(&pos->list);
62 }
63 }
64}
65/**
66 * mei_cl_flush_queues - flushes queue lists belonging to cl.
67 *
68 * @dev: the device structure
69 * @cl: private data of the file object
70 */
71int mei_cl_flush_queues(struct mei_cl *cl)
72{ 46{
73 if (!cl || !cl->dev)
74 return -EINVAL;
75
76 dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
77 mei_io_list_flush(&cl->dev->read_list, cl);
78 mei_io_list_flush(&cl->dev->write_list, cl);
79 mei_io_list_flush(&cl->dev->write_waiting_list, cl);
80 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
81 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
82 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
83 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
84 return 0;
85}
86
87
88
89/**
90 * init_mei_device - allocates and initializes the mei device structure
91 *
92 * @pdev: The pci device structure
93 *
94 * returns The mei_device_device pointer on success, NULL on failure.
95 */
96struct mei_device *mei_device_init(struct pci_dev *pdev)
97{
98 struct mei_device *dev;
99
100 dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL);
101 if (!dev)
102 return NULL;
103
104 /* setup our list array */ 47 /* setup our list array */
105 INIT_LIST_HEAD(&dev->file_list); 48 INIT_LIST_HEAD(&dev->file_list);
106 INIT_LIST_HEAD(&dev->wd_cl.link);
107 INIT_LIST_HEAD(&dev->iamthif_cl.link);
108 mutex_init(&dev->device_lock); 49 mutex_init(&dev->device_lock);
109 init_waitqueue_head(&dev->wait_recvd_msg); 50 init_waitqueue_head(&dev->wait_recvd_msg);
110 init_waitqueue_head(&dev->wait_stop_wd); 51 init_waitqueue_head(&dev->wait_stop_wd);
111 dev->dev_state = MEI_DEV_INITIALIZING; 52 dev->dev_state = MEI_DEV_INITIALIZING;
112 dev->iamthif_state = MEI_IAMTHIF_IDLE;
113 53
114 mei_io_list_init(&dev->read_list); 54 mei_io_list_init(&dev->read_list);
115 mei_io_list_init(&dev->write_list); 55 mei_io_list_init(&dev->write_list);
116 mei_io_list_init(&dev->write_waiting_list); 56 mei_io_list_init(&dev->write_waiting_list);
117 mei_io_list_init(&dev->ctrl_wr_list); 57 mei_io_list_init(&dev->ctrl_wr_list);
118 mei_io_list_init(&dev->ctrl_rd_list); 58 mei_io_list_init(&dev->ctrl_rd_list);
119 mei_io_list_init(&dev->amthif_cmd_list);
120 mei_io_list_init(&dev->amthif_rd_complete_list);
121 dev->pdev = pdev;
122 return dev;
123} 59}
124 60
125/** 61/**
@@ -131,101 +67,64 @@ struct mei_device *mei_device_init(struct pci_dev *pdev)
131 */ 67 */
132int mei_hw_init(struct mei_device *dev) 68int mei_hw_init(struct mei_device *dev)
133{ 69{
134 int err = 0; 70 int ret = 0;
135 int ret;
136 71
137 mutex_lock(&dev->device_lock); 72 mutex_lock(&dev->device_lock);
138 73
139 dev->host_hw_state = mei_hcsr_read(dev);
140 dev->me_hw_state = mei_mecsr_read(dev);
141 dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, mestate = 0x%08x.\n",
142 dev->host_hw_state, dev->me_hw_state);
143
144 /* acknowledge interrupt and stop interupts */ 74 /* acknowledge interrupt and stop interupts */
145 if ((dev->host_hw_state & H_IS) == H_IS) 75 mei_clear_interrupts(dev);
146 mei_reg_write(dev, H_CSR, dev->host_hw_state);
147 76
148 /* Doesn't change in runtime */ 77 mei_hw_config(dev);
149 dev->hbuf_depth = (dev->host_hw_state & H_CBD) >> 24;
150 78
151 dev->recvd_msg = false; 79 dev->recvd_msg = false;
152 dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); 80 dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
153 81
154 mei_reset(dev, 1); 82 mei_reset(dev, 1);
155 83
156 dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
157 dev->host_hw_state, dev->me_hw_state);
158
159 /* wait for ME to turn on ME_RDY */ 84 /* wait for ME to turn on ME_RDY */
160 if (!dev->recvd_msg) { 85 if (!dev->recvd_msg) {
161 mutex_unlock(&dev->device_lock); 86 mutex_unlock(&dev->device_lock);
162 err = wait_event_interruptible_timeout(dev->wait_recvd_msg, 87 ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
163 dev->recvd_msg, 88 dev->recvd_msg,
164 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); 89 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
165 mutex_lock(&dev->device_lock); 90 mutex_lock(&dev->device_lock);
166 } 91 }
167 92
168 if (err <= 0 && !dev->recvd_msg) { 93 if (ret <= 0 && !dev->recvd_msg) {
169 dev->dev_state = MEI_DEV_DISABLED; 94 dev->dev_state = MEI_DEV_DISABLED;
170 dev_dbg(&dev->pdev->dev, 95 dev_dbg(&dev->pdev->dev,
171 "wait_event_interruptible_timeout failed" 96 "wait_event_interruptible_timeout failed"
172 "on wait for ME to turn on ME_RDY.\n"); 97 "on wait for ME to turn on ME_RDY.\n");
173 ret = -ENODEV; 98 goto err;
174 goto out;
175 } 99 }
176 100
177 if (!(((dev->host_hw_state & H_RDY) == H_RDY) &&
178 ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) {
179 dev->dev_state = MEI_DEV_DISABLED;
180 dev_dbg(&dev->pdev->dev,
181 "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
182 dev->host_hw_state, dev->me_hw_state);
183
184 if (!(dev->host_hw_state & H_RDY))
185 dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
186 101
187 if (!(dev->me_hw_state & ME_RDY_HRA)) 102 if (!mei_host_is_ready(dev)) {
188 dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n"); 103 dev_err(&dev->pdev->dev, "host is not ready.\n");
104 goto err;
105 }
189 106
190 dev_err(&dev->pdev->dev, "link layer initialization failed.\n"); 107 if (!mei_hw_is_ready(dev)) {
191 ret = -ENODEV; 108 dev_err(&dev->pdev->dev, "ME is not ready.\n");
192 goto out; 109 goto err;
193 } 110 }
194 111
195 if (dev->version.major_version != HBM_MAJOR_VERSION || 112 if (dev->version.major_version != HBM_MAJOR_VERSION ||
196 dev->version.minor_version != HBM_MINOR_VERSION) { 113 dev->version.minor_version != HBM_MINOR_VERSION) {
197 dev_dbg(&dev->pdev->dev, "MEI start failed.\n"); 114 dev_dbg(&dev->pdev->dev, "MEI start failed.\n");
198 ret = -ENODEV; 115 goto err;
199 goto out;
200 } 116 }
201 117
202 dev->recvd_msg = false; 118 dev->recvd_msg = false;
203 dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
204 dev->host_hw_state, dev->me_hw_state);
205 dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n");
206 dev_dbg(&dev->pdev->dev, "link layer has been established.\n"); 119 dev_dbg(&dev->pdev->dev, "link layer has been established.\n");
207 dev_dbg(&dev->pdev->dev, "MEI start success.\n");
208 ret = 0;
209 120
210out:
211 mutex_unlock(&dev->device_lock); 121 mutex_unlock(&dev->device_lock);
212 return ret; 122 return 0;
213} 123err:
214 124 dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
215/** 125 dev->dev_state = MEI_DEV_DISABLED;
216 * mei_hw_reset - resets fw via mei csr register. 126 mutex_unlock(&dev->device_lock);
217 * 127 return -ENODEV;
218 * @dev: the device structure
219 * @interrupts_enabled: if interrupt should be enabled after reset.
220 */
221static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
222{
223 dev->host_hw_state |= (H_RST | H_IG);
224
225 if (interrupts_enabled)
226 mei_enable_interrupts(dev);
227 else
228 mei_disable_interrupts(dev);
229} 128}
230 129
231/** 130/**
@@ -236,56 +135,34 @@ static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
236 */ 135 */
237void mei_reset(struct mei_device *dev, int interrupts_enabled) 136void mei_reset(struct mei_device *dev, int interrupts_enabled)
238{ 137{
239 struct mei_cl *cl_pos = NULL;
240 struct mei_cl *cl_next = NULL;
241 struct mei_cl_cb *cb_pos = NULL;
242 struct mei_cl_cb *cb_next = NULL;
243 bool unexpected; 138 bool unexpected;
244 139
245 if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) { 140 if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET)
246 dev->need_reset = true;
247 return; 141 return;
248 }
249 142
250 unexpected = (dev->dev_state != MEI_DEV_INITIALIZING && 143 unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
251 dev->dev_state != MEI_DEV_DISABLED && 144 dev->dev_state != MEI_DEV_DISABLED &&
252 dev->dev_state != MEI_DEV_POWER_DOWN && 145 dev->dev_state != MEI_DEV_POWER_DOWN &&
253 dev->dev_state != MEI_DEV_POWER_UP); 146 dev->dev_state != MEI_DEV_POWER_UP);
254 147
255 dev->host_hw_state = mei_hcsr_read(dev);
256
257 dev_dbg(&dev->pdev->dev, "before reset host_hw_state = 0x%08x.\n",
258 dev->host_hw_state);
259
260 mei_hw_reset(dev, interrupts_enabled); 148 mei_hw_reset(dev, interrupts_enabled);
261 149
262 dev->host_hw_state &= ~H_RST;
263 dev->host_hw_state |= H_IG;
264
265 mei_hcsr_set(dev);
266
267 dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n",
268 dev->host_hw_state);
269
270 dev->need_reset = false;
271 150
272 if (dev->dev_state != MEI_DEV_INITIALIZING) { 151 if (dev->dev_state != MEI_DEV_INITIALIZING) {
273 if (dev->dev_state != MEI_DEV_DISABLED && 152 if (dev->dev_state != MEI_DEV_DISABLED &&
274 dev->dev_state != MEI_DEV_POWER_DOWN) 153 dev->dev_state != MEI_DEV_POWER_DOWN)
275 dev->dev_state = MEI_DEV_RESETING; 154 dev->dev_state = MEI_DEV_RESETING;
276 155
277 list_for_each_entry_safe(cl_pos, 156 mei_cl_all_disconnect(dev);
278 cl_next, &dev->file_list, link) { 157
279 cl_pos->state = MEI_FILE_DISCONNECTED;
280 cl_pos->mei_flow_ctrl_creds = 0;
281 cl_pos->read_cb = NULL;
282 cl_pos->timer_count = 0;
283 }
284 /* remove entry if already in list */ 158 /* remove entry if already in list */
285 dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); 159 dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
286 mei_me_cl_unlink(dev, &dev->wd_cl); 160 mei_cl_unlink(&dev->wd_cl);
287 161 if (dev->open_handle_count > 0)
288 mei_me_cl_unlink(dev, &dev->iamthif_cl); 162 dev->open_handle_count--;
163 mei_cl_unlink(&dev->iamthif_cl);
164 if (dev->open_handle_count > 0)
165 dev->open_handle_count--;
289 166
290 mei_amthif_reset_params(dev); 167 mei_amthif_reset_params(dev);
291 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); 168 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
@@ -295,392 +172,17 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
295 dev->rd_msg_hdr = 0; 172 dev->rd_msg_hdr = 0;
296 dev->wd_pending = false; 173 dev->wd_pending = false;
297 174
298 /* update the state of the registers after reset */
299 dev->host_hw_state = mei_hcsr_read(dev);
300 dev->me_hw_state = mei_mecsr_read(dev);
301
302 dev_dbg(&dev->pdev->dev, "after reset host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
303 dev->host_hw_state, dev->me_hw_state);
304
305 if (unexpected) 175 if (unexpected)
306 dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", 176 dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
307 mei_dev_state_str(dev->dev_state)); 177 mei_dev_state_str(dev->dev_state));
308 178
309 /* Wake up all readings so they can be interrupted */ 179 /* wake up all readings so they can be interrupted */
310 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { 180 mei_cl_all_read_wakeup(dev);
311 if (waitqueue_active(&cl_pos->rx_wait)) {
312 dev_dbg(&dev->pdev->dev, "Waking up client!\n");
313 wake_up_interruptible(&cl_pos->rx_wait);
314 }
315 }
316 /* remove all waiting requests */
317 list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) {
318 list_del(&cb_pos->list);
319 mei_io_cb_free(cb_pos);
320 }
321}
322
323
324
325/**
326 * host_start_message - mei host sends start message.
327 *
328 * @dev: the device structure
329 *
330 * returns none.
331 */
332void mei_host_start_message(struct mei_device *dev)
333{
334 struct mei_msg_hdr *mei_hdr;
335 struct hbm_host_version_request *start_req;
336 const size_t len = sizeof(struct hbm_host_version_request);
337
338 mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
339
340 /* host start message */
341 start_req = (struct hbm_host_version_request *)&dev->wr_msg_buf[1];
342 memset(start_req, 0, len);
343 start_req->hbm_cmd = HOST_START_REQ_CMD;
344 start_req->host_version.major_version = HBM_MAJOR_VERSION;
345 start_req->host_version.minor_version = HBM_MINOR_VERSION;
346
347 dev->recvd_msg = false;
348 if (mei_write_message(dev, mei_hdr, (unsigned char *)start_req, len)) {
349 dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
350 dev->dev_state = MEI_DEV_RESETING;
351 mei_reset(dev, 1);
352 }
353 dev->init_clients_state = MEI_START_MESSAGE;
354 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
355 return ;
356}
357
358/**
359 * host_enum_clients_message - host sends enumeration client request message.
360 *
361 * @dev: the device structure
362 *
363 * returns none.
364 */
365void mei_host_enum_clients_message(struct mei_device *dev)
366{
367 struct mei_msg_hdr *mei_hdr;
368 struct hbm_host_enum_request *enum_req;
369 const size_t len = sizeof(struct hbm_host_enum_request);
370 /* enumerate clients */
371 mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
372
373 enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
374 memset(enum_req, 0, sizeof(struct hbm_host_enum_request));
375 enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
376
377 if (mei_write_message(dev, mei_hdr, (unsigned char *)enum_req, len)) {
378 dev->dev_state = MEI_DEV_RESETING;
379 dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
380 mei_reset(dev, 1);
381 }
382 dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
383 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
384 return;
385}
386
387
388/**
389 * allocate_me_clients_storage - allocates storage for me clients
390 *
391 * @dev: the device structure
392 *
393 * returns none.
394 */
395void mei_allocate_me_clients_storage(struct mei_device *dev)
396{
397 struct mei_me_client *clients;
398 int b;
399
400 /* count how many ME clients we have */
401 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
402 dev->me_clients_num++;
403
404 if (dev->me_clients_num <= 0)
405 return ;
406
407
408 if (dev->me_clients != NULL) {
409 kfree(dev->me_clients);
410 dev->me_clients = NULL;
411 }
412 dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
413 dev->me_clients_num * sizeof(struct mei_me_client));
414 /* allocate storage for ME clients representation */
415 clients = kcalloc(dev->me_clients_num,
416 sizeof(struct mei_me_client), GFP_KERNEL);
417 if (!clients) {
418 dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
419 dev->dev_state = MEI_DEV_RESETING;
420 mei_reset(dev, 1);
421 return ;
422 }
423 dev->me_clients = clients;
424 return ;
425}
426
427void mei_host_client_init(struct work_struct *work)
428{
429 struct mei_device *dev = container_of(work,
430 struct mei_device, init_work);
431 struct mei_client_properties *client_props;
432 int i;
433
434 mutex_lock(&dev->device_lock);
435
436 bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
437 dev->open_handle_count = 0;
438
439 /*
440 * Reserving the first three client IDs
441 * 0: Reserved for MEI Bus Message communications
442 * 1: Reserved for Watchdog
443 * 2: Reserved for AMTHI
444 */
445 bitmap_set(dev->host_clients_map, 0, 3);
446
447 for (i = 0; i < dev->me_clients_num; i++) {
448 client_props = &dev->me_clients[i].props;
449
450 if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid))
451 mei_amthif_host_init(dev);
452 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
453 mei_wd_host_init(dev);
454 }
455
456 dev->dev_state = MEI_DEV_ENABLED;
457
458 mutex_unlock(&dev->device_lock);
459}
460
461int mei_host_client_enumerate(struct mei_device *dev)
462{
463
464 struct mei_msg_hdr *mei_hdr;
465 struct hbm_props_request *prop_req;
466 const size_t len = sizeof(struct hbm_props_request);
467 unsigned long next_client_index;
468 u8 client_num;
469
470
471 client_num = dev->me_client_presentation_num;
472
473 next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
474 dev->me_client_index);
475
476 /* We got all client properties */
477 if (next_client_index == MEI_CLIENTS_MAX) {
478 schedule_work(&dev->init_work);
479
480 return 0;
481 }
482
483 dev->me_clients[client_num].client_id = next_client_index;
484 dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
485
486 mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
487 prop_req = (struct hbm_props_request *)&dev->wr_msg_buf[1];
488
489 memset(prop_req, 0, sizeof(struct hbm_props_request));
490
491
492 prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
493 prop_req->address = next_client_index;
494
495 if (mei_write_message(dev, mei_hdr, (unsigned char *) prop_req,
496 mei_hdr->length)) {
497 dev->dev_state = MEI_DEV_RESETING;
498 dev_err(&dev->pdev->dev, "Properties request command failed\n");
499 mei_reset(dev, 1);
500
501 return -EIO;
502 }
503
504 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
505 dev->me_client_index = next_client_index;
506
507 return 0;
508}
509
510/**
511 * mei_init_file_private - initializes private file structure.
512 *
513 * @priv: private file structure to be initialized
514 * @file: the file structure
515 */
516void mei_cl_init(struct mei_cl *priv, struct mei_device *dev)
517{
518 memset(priv, 0, sizeof(struct mei_cl));
519 init_waitqueue_head(&priv->wait);
520 init_waitqueue_head(&priv->rx_wait);
521 init_waitqueue_head(&priv->tx_wait);
522 INIT_LIST_HEAD(&priv->link);
523 priv->reading_state = MEI_IDLE;
524 priv->writing_state = MEI_IDLE;
525 priv->dev = dev;
526}
527
528int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid)
529{
530 int i, res = -ENOENT;
531
532 for (i = 0; i < dev->me_clients_num; ++i)
533 if (uuid_le_cmp(*cuuid,
534 dev->me_clients[i].props.protocol_name) == 0) {
535 res = i;
536 break;
537 }
538
539 return res;
540}
541
542
543/**
544 * mei_me_cl_link - create link between host and me clinet and add
545 * me_cl to the list
546 *
547 * @dev: the device structure
548 * @cl: link between me and host client assocated with opened file descriptor
549 * @cuuid: uuid of ME client
550 * @client_id: id of the host client
551 *
552 * returns ME client index if ME client
553 * -EINVAL on incorrect values
554 * -ENONET if client not found
555 */
556int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
557 const uuid_le *cuuid, u8 host_cl_id)
558{
559 int i;
560
561 if (!dev || !cl || !cuuid)
562 return -EINVAL;
563
564 /* check for valid client id */
565 i = mei_me_cl_by_uuid(dev, cuuid);
566 if (i >= 0) {
567 cl->me_client_id = dev->me_clients[i].client_id;
568 cl->state = MEI_FILE_CONNECTING;
569 cl->host_client_id = host_cl_id;
570
571 list_add_tail(&cl->link, &dev->file_list);
572 return (u8)i;
573 }
574
575 return -ENOENT;
576}
577/**
578 * mei_me_cl_unlink - remove me_cl from the list
579 *
580 * @dev: the device structure
581 * @host_client_id: host client id to be removed
582 */
583void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl)
584{
585 struct mei_cl *pos, *next;
586 list_for_each_entry_safe(pos, next, &dev->file_list, link) {
587 if (cl->host_client_id == pos->host_client_id) {
588 dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
589 pos->host_client_id, pos->me_client_id);
590 list_del_init(&pos->link);
591 break;
592 }
593 }
594}
595 181
596/** 182 /* remove all waiting requests */
597 * mei_alloc_file_private - allocates a private file structure and sets it up. 183 mei_cl_all_write_clear(dev);
598 * @file: the file structure
599 *
600 * returns The allocated file or NULL on failure
601 */
602struct mei_cl *mei_cl_allocate(struct mei_device *dev)
603{
604 struct mei_cl *cl;
605
606 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
607 if (!cl)
608 return NULL;
609
610 mei_cl_init(cl, dev);
611
612 return cl;
613} 184}
614 185
615 186
616 187
617/**
618 * mei_disconnect_host_client - sends disconnect message to fw from host client.
619 *
620 * @dev: the device structure
621 * @cl: private data of the file object
622 *
623 * Locking: called under "dev->device_lock" lock
624 *
625 * returns 0 on success, <0 on failure.
626 */
627int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
628{
629 struct mei_cl_cb *cb;
630 int rets, err;
631
632 if (!dev || !cl)
633 return -ENODEV;
634
635 if (cl->state != MEI_FILE_DISCONNECTING)
636 return 0;
637
638 cb = mei_io_cb_init(cl, NULL);
639 if (!cb)
640 return -ENOMEM;
641
642 cb->fop_type = MEI_FOP_CLOSE;
643 if (dev->mei_host_buffer_is_empty) {
644 dev->mei_host_buffer_is_empty = false;
645 if (mei_disconnect(dev, cl)) {
646 rets = -ENODEV;
647 dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
648 goto free;
649 }
650 mdelay(10); /* Wait for hardware disconnection ready */
651 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
652 } else {
653 dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
654 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
655
656 }
657 mutex_unlock(&dev->device_lock);
658
659 err = wait_event_timeout(dev->wait_recvd_msg,
660 MEI_FILE_DISCONNECTED == cl->state,
661 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
662
663 mutex_lock(&dev->device_lock);
664 if (MEI_FILE_DISCONNECTED == cl->state) {
665 rets = 0;
666 dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
667 } else {
668 rets = -ENODEV;
669 if (MEI_FILE_DISCONNECTED != cl->state)
670 dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
671
672 if (err)
673 dev_dbg(&dev->pdev->dev,
674 "wait failed disconnect err=%08x\n",
675 err);
676
677 dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
678 }
679
680 mei_io_list_flush(&dev->ctrl_rd_list, cl);
681 mei_io_list_flush(&dev->ctrl_wr_list, cl);
682free:
683 mei_io_cb_free(cb);
684 return rets;
685}
686 188
diff --git a/drivers/misc/mei/interface.c b/drivers/misc/mei/interface.c
deleted file mode 100644
index 8de854785960..000000000000
--- a/drivers/misc/mei/interface.c
+++ /dev/null
@@ -1,388 +0,0 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
18#include "mei_dev.h"
19#include <linux/mei.h>
20#include "interface.h"
21
22
23
24/**
25 * mei_set_csr_register - writes H_CSR register to the mei device,
26 * and ignores the H_IS bit for it is write-one-to-zero.
27 *
28 * @dev: the device structure
29 */
30void mei_hcsr_set(struct mei_device *dev)
31{
32 if ((dev->host_hw_state & H_IS) == H_IS)
33 dev->host_hw_state &= ~H_IS;
34 mei_reg_write(dev, H_CSR, dev->host_hw_state);
35 dev->host_hw_state = mei_hcsr_read(dev);
36}
37
38/**
39 * mei_csr_enable_interrupts - enables mei device interrupts
40 *
41 * @dev: the device structure
42 */
43void mei_enable_interrupts(struct mei_device *dev)
44{
45 dev->host_hw_state |= H_IE;
46 mei_hcsr_set(dev);
47}
48
49/**
50 * mei_csr_disable_interrupts - disables mei device interrupts
51 *
52 * @dev: the device structure
53 */
54void mei_disable_interrupts(struct mei_device *dev)
55{
56 dev->host_hw_state &= ~H_IE;
57 mei_hcsr_set(dev);
58}
59
60/**
61 * mei_hbuf_filled_slots - gets number of device filled buffer slots
62 *
63 * @device: the device structure
64 *
65 * returns number of filled slots
66 */
67static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
68{
69 char read_ptr, write_ptr;
70
71 dev->host_hw_state = mei_hcsr_read(dev);
72
73 read_ptr = (char) ((dev->host_hw_state & H_CBRP) >> 8);
74 write_ptr = (char) ((dev->host_hw_state & H_CBWP) >> 16);
75
76 return (unsigned char) (write_ptr - read_ptr);
77}
78
79/**
80 * mei_hbuf_is_empty - checks if host buffer is empty.
81 *
82 * @dev: the device structure
83 *
84 * returns true if empty, false - otherwise.
85 */
86bool mei_hbuf_is_empty(struct mei_device *dev)
87{
88 return mei_hbuf_filled_slots(dev) == 0;
89}
90
91/**
92 * mei_hbuf_empty_slots - counts write empty slots.
93 *
94 * @dev: the device structure
95 *
96 * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
97 */
98int mei_hbuf_empty_slots(struct mei_device *dev)
99{
100 unsigned char filled_slots, empty_slots;
101
102 filled_slots = mei_hbuf_filled_slots(dev);
103 empty_slots = dev->hbuf_depth - filled_slots;
104
105 /* check for overflow */
106 if (filled_slots > dev->hbuf_depth)
107 return -EOVERFLOW;
108
109 return empty_slots;
110}
111
112/**
113 * mei_write_message - writes a message to mei device.
114 *
115 * @dev: the device structure
116 * @header: header of message
117 * @write_buffer: message buffer will be written
118 * @write_length: message size will be written
119 *
120 * This function returns -EIO if write has failed
121 */
122int mei_write_message(struct mei_device *dev, struct mei_msg_hdr *header,
123 unsigned char *buf, unsigned long length)
124{
125 unsigned long rem, dw_cnt;
126 u32 *reg_buf = (u32 *)buf;
127 int i;
128 int empty_slots;
129
130
131 dev_dbg(&dev->pdev->dev,
132 "mei_write_message header=%08x.\n",
133 *((u32 *) header));
134
135 empty_slots = mei_hbuf_empty_slots(dev);
136 dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
137
138 dw_cnt = mei_data2slots(length);
139 if (empty_slots < 0 || dw_cnt > empty_slots)
140 return -EIO;
141
142 mei_reg_write(dev, H_CB_WW, *((u32 *) header));
143
144 for (i = 0; i < length / 4; i++)
145 mei_reg_write(dev, H_CB_WW, reg_buf[i]);
146
147 rem = length & 0x3;
148 if (rem > 0) {
149 u32 reg = 0;
150 memcpy(&reg, &buf[length - rem], rem);
151 mei_reg_write(dev, H_CB_WW, reg);
152 }
153
154 dev->host_hw_state = mei_hcsr_read(dev);
155 dev->host_hw_state |= H_IG;
156 mei_hcsr_set(dev);
157 dev->me_hw_state = mei_mecsr_read(dev);
158 if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
159 return -EIO;
160
161 return 0;
162}
163
164/**
165 * mei_count_full_read_slots - counts read full slots.
166 *
167 * @dev: the device structure
168 *
169 * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
170 */
171int mei_count_full_read_slots(struct mei_device *dev)
172{
173 char read_ptr, write_ptr;
174 unsigned char buffer_depth, filled_slots;
175
176 dev->me_hw_state = mei_mecsr_read(dev);
177 buffer_depth = (unsigned char)((dev->me_hw_state & ME_CBD_HRA) >> 24);
178 read_ptr = (char) ((dev->me_hw_state & ME_CBRP_HRA) >> 8);
179 write_ptr = (char) ((dev->me_hw_state & ME_CBWP_HRA) >> 16);
180 filled_slots = (unsigned char) (write_ptr - read_ptr);
181
182 /* check for overflow */
183 if (filled_slots > buffer_depth)
184 return -EOVERFLOW;
185
186 dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
187 return (int)filled_slots;
188}
189
190/**
191 * mei_read_slots - reads a message from mei device.
192 *
193 * @dev: the device structure
194 * @buffer: message buffer will be written
195 * @buffer_length: message size will be read
196 */
197void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
198 unsigned long buffer_length)
199{
200 u32 *reg_buf = (u32 *)buffer;
201
202 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
203 *reg_buf++ = mei_mecbrw_read(dev);
204
205 if (buffer_length > 0) {
206 u32 reg = mei_mecbrw_read(dev);
207 memcpy(reg_buf, &reg, buffer_length);
208 }
209
210 dev->host_hw_state |= H_IG;
211 mei_hcsr_set(dev);
212}
213
214/**
215 * mei_flow_ctrl_creds - checks flow_control credentials.
216 *
217 * @dev: the device structure
218 * @cl: private data of the file object
219 *
220 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
221 * -ENOENT if mei_cl is not present
222 * -EINVAL if single_recv_buf == 0
223 */
224int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
225{
226 int i;
227
228 if (!dev->me_clients_num)
229 return 0;
230
231 if (cl->mei_flow_ctrl_creds > 0)
232 return 1;
233
234 for (i = 0; i < dev->me_clients_num; i++) {
235 struct mei_me_client *me_cl = &dev->me_clients[i];
236 if (me_cl->client_id == cl->me_client_id) {
237 if (me_cl->mei_flow_ctrl_creds) {
238 if (WARN_ON(me_cl->props.single_recv_buf == 0))
239 return -EINVAL;
240 return 1;
241 } else {
242 return 0;
243 }
244 }
245 }
246 return -ENOENT;
247}
248
249/**
250 * mei_flow_ctrl_reduce - reduces flow_control.
251 *
252 * @dev: the device structure
253 * @cl: private data of the file object
254 * @returns
255 * 0 on success
256 * -ENOENT when me client is not found
257 * -EINVAL when ctrl credits are <= 0
258 */
259int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
260{
261 int i;
262
263 if (!dev->me_clients_num)
264 return -ENOENT;
265
266 for (i = 0; i < dev->me_clients_num; i++) {
267 struct mei_me_client *me_cl = &dev->me_clients[i];
268 if (me_cl->client_id == cl->me_client_id) {
269 if (me_cl->props.single_recv_buf != 0) {
270 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
271 return -EINVAL;
272 dev->me_clients[i].mei_flow_ctrl_creds--;
273 } else {
274 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
275 return -EINVAL;
276 cl->mei_flow_ctrl_creds--;
277 }
278 return 0;
279 }
280 }
281 return -ENOENT;
282}
283
284/**
285 * mei_send_flow_control - sends flow control to fw.
286 *
287 * @dev: the device structure
288 * @cl: private data of the file object
289 *
290 * This function returns -EIO on write failure
291 */
292int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
293{
294 struct mei_msg_hdr *mei_hdr;
295 struct hbm_flow_control *flow_ctrl;
296 const size_t len = sizeof(struct hbm_flow_control);
297
298 mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
299
300 flow_ctrl = (struct hbm_flow_control *)&dev->wr_msg_buf[1];
301 memset(flow_ctrl, 0, len);
302 flow_ctrl->hbm_cmd = MEI_FLOW_CONTROL_CMD;
303 flow_ctrl->host_addr = cl->host_client_id;
304 flow_ctrl->me_addr = cl->me_client_id;
305 /* FIXME: reserved !? */
306 memset(flow_ctrl->reserved, 0, sizeof(flow_ctrl->reserved));
307 dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
308 cl->host_client_id, cl->me_client_id);
309
310 return mei_write_message(dev, mei_hdr,
311 (unsigned char *) flow_ctrl, len);
312}
313
314/**
315 * mei_other_client_is_connecting - checks if other
316 * client with the same client id is connected.
317 *
318 * @dev: the device structure
319 * @cl: private data of the file object
320 *
321 * returns 1 if other client is connected, 0 - otherwise.
322 */
323int mei_other_client_is_connecting(struct mei_device *dev,
324 struct mei_cl *cl)
325{
326 struct mei_cl *cl_pos = NULL;
327 struct mei_cl *cl_next = NULL;
328
329 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
330 if ((cl_pos->state == MEI_FILE_CONNECTING) &&
331 (cl_pos != cl) &&
332 cl->me_client_id == cl_pos->me_client_id)
333 return 1;
334
335 }
336 return 0;
337}
338
339/**
340 * mei_disconnect - sends disconnect message to fw.
341 *
342 * @dev: the device structure
343 * @cl: private data of the file object
344 *
345 * This function returns -EIO on write failure
346 */
347int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
348{
349 struct mei_msg_hdr *mei_hdr;
350 struct hbm_client_connect_request *req;
351 const size_t len = sizeof(struct hbm_client_connect_request);
352
353 mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
354
355 req = (struct hbm_client_connect_request *)&dev->wr_msg_buf[1];
356 memset(req, 0, len);
357 req->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
358 req->host_addr = cl->host_client_id;
359 req->me_addr = cl->me_client_id;
360 req->reserved = 0;
361
362 return mei_write_message(dev, mei_hdr, (unsigned char *)req, len);
363}
364
365/**
366 * mei_connect - sends connect message to fw.
367 *
368 * @dev: the device structure
369 * @cl: private data of the file object
370 *
371 * This function returns -EIO on write failure
372 */
373int mei_connect(struct mei_device *dev, struct mei_cl *cl)
374{
375 struct mei_msg_hdr *mei_hdr;
376 struct hbm_client_connect_request *req;
377 const size_t len = sizeof(struct hbm_client_connect_request);
378
379 mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
380
381 req = (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
382 req->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
383 req->host_addr = cl->host_client_id;
384 req->me_addr = cl->me_client_id;
385 req->reserved = 0;
386
387 return mei_write_message(dev, mei_hdr, (unsigned char *) req, len);
388}
diff --git a/drivers/misc/mei/interface.h b/drivers/misc/mei/interface.h
deleted file mode 100644
index ec6c785a3961..000000000000
--- a/drivers/misc/mei/interface.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17
18
19#ifndef _MEI_INTERFACE_H_
20#define _MEI_INTERFACE_H_
21
22#include <linux/mei.h>
23#include "mei_dev.h"
24
25
26
27void mei_read_slots(struct mei_device *dev,
28 unsigned char *buffer,
29 unsigned long buffer_length);
30
31int mei_write_message(struct mei_device *dev,
32 struct mei_msg_hdr *header,
33 unsigned char *write_buffer,
34 unsigned long write_length);
35
36bool mei_hbuf_is_empty(struct mei_device *dev);
37
38int mei_hbuf_empty_slots(struct mei_device *dev);
39
40static inline size_t mei_hbuf_max_data(const struct mei_device *dev)
41{
42 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
43}
44
45/* get slots (dwords) from a message length + header (bytes) */
46static inline unsigned char mei_data2slots(size_t length)
47{
48 return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
49}
50
51int mei_count_full_read_slots(struct mei_device *dev);
52
53
54int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl);
55
56
57
58int mei_wd_send(struct mei_device *dev);
59int mei_wd_stop(struct mei_device *dev);
60int mei_wd_host_init(struct mei_device *dev);
61/*
62 * mei_watchdog_register - Registering watchdog interface
63 * once we got connection to the WD Client
64 * @dev - mei device
65 */
66void mei_watchdog_register(struct mei_device *dev);
67/*
68 * mei_watchdog_unregister - Unregistering watchdog interface
69 * @dev - mei device
70 */
71void mei_watchdog_unregister(struct mei_device *dev);
72
73int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
74
75int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl);
76
77int mei_disconnect(struct mei_device *dev, struct mei_cl *cl);
78int mei_other_client_is_connecting(struct mei_device *dev, struct mei_cl *cl);
79int mei_connect(struct mei_device *dev, struct mei_cl *cl);
80
81#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 04fa2134615e..3535b2676c97 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -21,41 +21,21 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23 23
24#include "mei_dev.h"
25#include <linux/mei.h> 24#include <linux/mei.h>
26#include "hw.h"
27#include "interface.h"
28
29
30/**
31 * mei_interrupt_quick_handler - The ISR of the MEI device
32 *
33 * @irq: The irq number
34 * @dev_id: pointer to the device structure
35 *
36 * returns irqreturn_t
37 */
38irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id)
39{
40 struct mei_device *dev = (struct mei_device *) dev_id;
41 u32 csr_reg = mei_hcsr_read(dev);
42
43 if ((csr_reg & H_IS) != H_IS)
44 return IRQ_NONE;
45 25
46 /* clear H_IS bit in H_CSR */ 26#include "mei_dev.h"
47 mei_reg_write(dev, H_CSR, csr_reg); 27#include "hbm.h"
28#include "hw-me.h"
29#include "client.h"
48 30
49 return IRQ_WAKE_THREAD;
50}
51 31
52/** 32/**
53 * _mei_cmpl - processes completed operation. 33 * mei_complete_handler - processes completed operation.
54 * 34 *
55 * @cl: private data of the file object. 35 * @cl: private data of the file object.
56 * @cb_pos: callback block. 36 * @cb_pos: callback block.
57 */ 37 */
58static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos) 38void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
59{ 39{
60 if (cb_pos->fop_type == MEI_FOP_WRITE) { 40 if (cb_pos->fop_type == MEI_FOP_WRITE) {
61 mei_io_cb_free(cb_pos); 41 mei_io_cb_free(cb_pos);
@@ -150,8 +130,8 @@ quit:
150 dev_dbg(&dev->pdev->dev, "message read\n"); 130 dev_dbg(&dev->pdev->dev, "message read\n");
151 if (!buffer) { 131 if (!buffer) {
152 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); 132 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
153 dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n", 133 dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
154 *(u32 *) dev->rd_msg_buf); 134 MEI_HDR_PRM(mei_hdr));
155 } 135 }
156 136
157 return 0; 137 return 0;
@@ -179,7 +159,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
179 159
180 *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request)); 160 *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
181 161
182 if (mei_disconnect(dev, cl)) { 162 if (mei_hbm_cl_disconnect_req(dev, cl)) {
183 cl->status = 0; 163 cl->status = 0;
184 cb_pos->buf_idx = 0; 164 cb_pos->buf_idx = 0;
185 list_move_tail(&cb_pos->list, &cmpl_list->list); 165 list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -195,440 +175,6 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
195 return 0; 175 return 0;
196} 176}
197 177
198/**
199 * is_treat_specially_client - checks if the message belongs
200 * to the file private data.
201 *
202 * @cl: private data of the file object
203 * @rs: connect response bus message
204 *
205 */
206static bool is_treat_specially_client(struct mei_cl *cl,
207 struct hbm_client_connect_response *rs)
208{
209
210 if (cl->host_client_id == rs->host_addr &&
211 cl->me_client_id == rs->me_addr) {
212 if (!rs->status) {
213 cl->state = MEI_FILE_CONNECTED;
214 cl->status = 0;
215
216 } else {
217 cl->state = MEI_FILE_DISCONNECTED;
218 cl->status = -ENODEV;
219 }
220 cl->timer_count = 0;
221
222 return true;
223 }
224 return false;
225}
226
227/**
228 * mei_client_connect_response - connects to response irq routine
229 *
230 * @dev: the device structure
231 * @rs: connect response bus message
232 */
233static void mei_client_connect_response(struct mei_device *dev,
234 struct hbm_client_connect_response *rs)
235{
236
237 struct mei_cl *cl;
238 struct mei_cl_cb *pos = NULL, *next = NULL;
239
240 dev_dbg(&dev->pdev->dev,
241 "connect_response:\n"
242 "ME Client = %d\n"
243 "Host Client = %d\n"
244 "Status = %d\n",
245 rs->me_addr,
246 rs->host_addr,
247 rs->status);
248
249 /* if WD or iamthif client treat specially */
250
251 if (is_treat_specially_client(&(dev->wd_cl), rs)) {
252 dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
253 mei_watchdog_register(dev);
254
255 return;
256 }
257
258 if (is_treat_specially_client(&(dev->iamthif_cl), rs)) {
259 dev->iamthif_state = MEI_IAMTHIF_IDLE;
260 return;
261 }
262 list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
263
264 cl = pos->cl;
265 if (!cl) {
266 list_del(&pos->list);
267 return;
268 }
269 if (pos->fop_type == MEI_FOP_IOCTL) {
270 if (is_treat_specially_client(cl, rs)) {
271 list_del(&pos->list);
272 cl->status = 0;
273 cl->timer_count = 0;
274 break;
275 }
276 }
277 }
278}
279
280/**
281 * mei_client_disconnect_response - disconnects from response irq routine
282 *
283 * @dev: the device structure
284 * @rs: disconnect response bus message
285 */
286static void mei_client_disconnect_response(struct mei_device *dev,
287 struct hbm_client_connect_response *rs)
288{
289 struct mei_cl *cl;
290 struct mei_cl_cb *pos = NULL, *next = NULL;
291
292 dev_dbg(&dev->pdev->dev,
293 "disconnect_response:\n"
294 "ME Client = %d\n"
295 "Host Client = %d\n"
296 "Status = %d\n",
297 rs->me_addr,
298 rs->host_addr,
299 rs->status);
300
301 list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
302 cl = pos->cl;
303
304 if (!cl) {
305 list_del(&pos->list);
306 return;
307 }
308
309 dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
310 if (cl->host_client_id == rs->host_addr &&
311 cl->me_client_id == rs->me_addr) {
312
313 list_del(&pos->list);
314 if (!rs->status)
315 cl->state = MEI_FILE_DISCONNECTED;
316
317 cl->status = 0;
318 cl->timer_count = 0;
319 break;
320 }
321 }
322}
323
324/**
325 * same_flow_addr - tells if they have the same address.
326 *
327 * @file: private data of the file object.
328 * @flow: flow control.
329 *
330 * returns !=0, same; 0,not.
331 */
332static int same_flow_addr(struct mei_cl *cl, struct hbm_flow_control *flow)
333{
334 return (cl->host_client_id == flow->host_addr &&
335 cl->me_client_id == flow->me_addr);
336}
337
338/**
339 * add_single_flow_creds - adds single buffer credentials.
340 *
341 * @file: private data ot the file object.
342 * @flow: flow control.
343 */
344static void add_single_flow_creds(struct mei_device *dev,
345 struct hbm_flow_control *flow)
346{
347 struct mei_me_client *client;
348 int i;
349
350 for (i = 0; i < dev->me_clients_num; i++) {
351 client = &dev->me_clients[i];
352 if (client && flow->me_addr == client->client_id) {
353 if (client->props.single_recv_buf) {
354 client->mei_flow_ctrl_creds++;
355 dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
356 flow->me_addr);
357 dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
358 client->mei_flow_ctrl_creds);
359 } else {
360 BUG(); /* error in flow control */
361 }
362 }
363 }
364}
365
366/**
367 * mei_client_flow_control_response - flow control response irq routine
368 *
369 * @dev: the device structure
370 * @flow_control: flow control response bus message
371 */
372static void mei_client_flow_control_response(struct mei_device *dev,
373 struct hbm_flow_control *flow_control)
374{
375 struct mei_cl *cl_pos = NULL;
376 struct mei_cl *cl_next = NULL;
377
378 if (!flow_control->host_addr) {
379 /* single receive buffer */
380 add_single_flow_creds(dev, flow_control);
381 } else {
382 /* normal connection */
383 list_for_each_entry_safe(cl_pos, cl_next,
384 &dev->file_list, link) {
385 dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in file_list\n");
386
387 dev_dbg(&dev->pdev->dev, "cl of host client %d ME client %d.\n",
388 cl_pos->host_client_id,
389 cl_pos->me_client_id);
390 dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
391 flow_control->host_addr,
392 flow_control->me_addr);
393 if (same_flow_addr(cl_pos, flow_control)) {
394 dev_dbg(&dev->pdev->dev, "recv ctrl msg for host %d ME %d.\n",
395 flow_control->host_addr,
396 flow_control->me_addr);
397 cl_pos->mei_flow_ctrl_creds++;
398 dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
399 cl_pos->mei_flow_ctrl_creds);
400 break;
401 }
402 }
403 }
404}
405
406/**
407 * same_disconn_addr - tells if they have the same address
408 *
409 * @file: private data of the file object.
410 * @disconn: disconnection request.
411 *
412 * returns !=0, same; 0,not.
413 */
414static int same_disconn_addr(struct mei_cl *cl,
415 struct hbm_client_connect_request *req)
416{
417 return (cl->host_client_id == req->host_addr &&
418 cl->me_client_id == req->me_addr);
419}
420
421/**
422 * mei_client_disconnect_request - disconnects from request irq routine
423 *
424 * @dev: the device structure.
425 * @disconnect_req: disconnect request bus message.
426 */
427static void mei_client_disconnect_request(struct mei_device *dev,
428 struct hbm_client_connect_request *disconnect_req)
429{
430 struct hbm_client_connect_response *disconnect_res;
431 struct mei_cl *pos, *next;
432 const size_t len = sizeof(struct hbm_client_connect_response);
433
434 list_for_each_entry_safe(pos, next, &dev->file_list, link) {
435 if (same_disconn_addr(pos, disconnect_req)) {
436 dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
437 disconnect_req->host_addr,
438 disconnect_req->me_addr);
439 pos->state = MEI_FILE_DISCONNECTED;
440 pos->timer_count = 0;
441 if (pos == &dev->wd_cl)
442 dev->wd_pending = false;
443 else if (pos == &dev->iamthif_cl)
444 dev->iamthif_timer = 0;
445
446 /* prepare disconnect response */
447 (void)mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
448 disconnect_res =
449 (struct hbm_client_connect_response *)
450 &dev->wr_ext_msg.data;
451 disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
452 disconnect_res->host_addr = pos->host_client_id;
453 disconnect_res->me_addr = pos->me_client_id;
454 disconnect_res->status = 0;
455 break;
456 }
457 }
458}
459
460/**
461 * mei_irq_thread_read_bus_message - bottom half read routine after ISR to
462 * handle the read bus message cmd processing.
463 *
464 * @dev: the device structure
465 * @mei_hdr: header of bus message
466 */
467static void mei_irq_thread_read_bus_message(struct mei_device *dev,
468 struct mei_msg_hdr *mei_hdr)
469{
470 struct mei_bus_message *mei_msg;
471 struct mei_me_client *me_client;
472 struct hbm_host_version_response *version_res;
473 struct hbm_client_connect_response *connect_res;
474 struct hbm_client_connect_response *disconnect_res;
475 struct hbm_client_connect_request *disconnect_req;
476 struct hbm_flow_control *flow_control;
477 struct hbm_props_response *props_res;
478 struct hbm_host_enum_response *enum_res;
479 struct hbm_host_stop_request *stop_req;
480
481 /* read the message to our buffer */
482 BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
483 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
484 mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
485
486 switch (mei_msg->hbm_cmd) {
487 case HOST_START_RES_CMD:
488 version_res = (struct hbm_host_version_response *) mei_msg;
489 if (version_res->host_version_supported) {
490 dev->version.major_version = HBM_MAJOR_VERSION;
491 dev->version.minor_version = HBM_MINOR_VERSION;
492 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
493 dev->init_clients_state == MEI_START_MESSAGE) {
494 dev->init_clients_timer = 0;
495 mei_host_enum_clients_message(dev);
496 } else {
497 dev->recvd_msg = false;
498 dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n");
499 mei_reset(dev, 1);
500 return;
501 }
502 } else {
503 u32 *buf = dev->wr_msg_buf;
504 const size_t len = sizeof(struct hbm_host_stop_request);
505
506 dev->version = version_res->me_max_version;
507
508 /* send stop message */
509 mei_hdr = mei_hbm_hdr(&buf[0], len);
510 stop_req = (struct hbm_host_stop_request *)&buf[1];
511 memset(stop_req, 0, len);
512 stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
513 stop_req->reason = DRIVER_STOP_REQUEST;
514
515 mei_write_message(dev, mei_hdr,
516 (unsigned char *)stop_req, len);
517 dev_dbg(&dev->pdev->dev, "version mismatch.\n");
518 return;
519 }
520
521 dev->recvd_msg = true;
522 dev_dbg(&dev->pdev->dev, "host start response message received.\n");
523 break;
524
525 case CLIENT_CONNECT_RES_CMD:
526 connect_res = (struct hbm_client_connect_response *) mei_msg;
527 mei_client_connect_response(dev, connect_res);
528 dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
529 wake_up(&dev->wait_recvd_msg);
530 break;
531
532 case CLIENT_DISCONNECT_RES_CMD:
533 disconnect_res = (struct hbm_client_connect_response *) mei_msg;
534 mei_client_disconnect_response(dev, disconnect_res);
535 dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
536 wake_up(&dev->wait_recvd_msg);
537 break;
538
539 case MEI_FLOW_CONTROL_CMD:
540 flow_control = (struct hbm_flow_control *) mei_msg;
541 mei_client_flow_control_response(dev, flow_control);
542 dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
543 break;
544
545 case HOST_CLIENT_PROPERTIES_RES_CMD:
546 props_res = (struct hbm_props_response *)mei_msg;
547 me_client = &dev->me_clients[dev->me_client_presentation_num];
548
549 if (props_res->status || !dev->me_clients) {
550 dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
551 mei_reset(dev, 1);
552 return;
553 }
554
555 if (me_client->client_id != props_res->address) {
556 dev_err(&dev->pdev->dev,
557 "Host client properties reply mismatch\n");
558 mei_reset(dev, 1);
559
560 return;
561 }
562
563 if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
564 dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
565 dev_err(&dev->pdev->dev,
566 "Unexpected client properties reply\n");
567 mei_reset(dev, 1);
568
569 return;
570 }
571
572 me_client->props = props_res->client_properties;
573 dev->me_client_index++;
574 dev->me_client_presentation_num++;
575
576 mei_host_client_enumerate(dev);
577
578 break;
579
580 case HOST_ENUM_RES_CMD:
581 enum_res = (struct hbm_host_enum_response *) mei_msg;
582 memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
583 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
584 dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
585 dev->init_clients_timer = 0;
586 dev->me_client_presentation_num = 0;
587 dev->me_client_index = 0;
588 mei_allocate_me_clients_storage(dev);
589 dev->init_clients_state =
590 MEI_CLIENT_PROPERTIES_MESSAGE;
591
592 mei_host_client_enumerate(dev);
593 } else {
594 dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
595 mei_reset(dev, 1);
596 return;
597 }
598 break;
599
600 case HOST_STOP_RES_CMD:
601 dev->dev_state = MEI_DEV_DISABLED;
602 dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
603 mei_reset(dev, 1);
604 break;
605
606 case CLIENT_DISCONNECT_REQ_CMD:
607 /* search for client */
608 disconnect_req = (struct hbm_client_connect_request *)mei_msg;
609 mei_client_disconnect_request(dev, disconnect_req);
610 break;
611
612 case ME_STOP_REQ_CMD:
613 {
614 /* prepare stop request: sent in next interrupt event */
615
616 const size_t len = sizeof(struct hbm_host_stop_request);
617
618 mei_hdr = mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
619 stop_req = (struct hbm_host_stop_request *)&dev->wr_ext_msg.data;
620 memset(stop_req, 0, len);
621 stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
622 stop_req->reason = DRIVER_STOP_REQUEST;
623 break;
624 }
625 default:
626 BUG();
627 break;
628
629 }
630}
631
632 178
633/** 179/**
634 * _mei_hb_read - processes read related operation. 180 * _mei_hb_read - processes read related operation.
@@ -655,7 +201,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
655 201
656 *slots -= mei_data2slots(sizeof(struct hbm_flow_control)); 202 *slots -= mei_data2slots(sizeof(struct hbm_flow_control));
657 203
658 if (mei_send_flow_control(dev, cl)) { 204 if (mei_hbm_cl_flow_control_req(dev, cl)) {
659 cl->status = -ENODEV; 205 cl->status = -ENODEV;
660 cb_pos->buf_idx = 0; 206 cb_pos->buf_idx = 0;
661 list_move_tail(&cb_pos->list, &cmpl_list->list); 207 list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -691,8 +237,8 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
691 } 237 }
692 238
693 cl->state = MEI_FILE_CONNECTING; 239 cl->state = MEI_FILE_CONNECTING;
694 *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request)); 240 *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
695 if (mei_connect(dev, cl)) { 241 if (mei_hbm_cl_connect_req(dev, cl)) {
696 cl->status = -ENODEV; 242 cl->status = -ENODEV;
697 cb_pos->buf_idx = 0; 243 cb_pos->buf_idx = 0;
698 list_del(&cb_pos->list); 244 list_del(&cb_pos->list);
@@ -717,25 +263,24 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
717static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots, 263static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
718 struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list) 264 struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
719{ 265{
720 struct mei_msg_hdr *mei_hdr; 266 struct mei_msg_hdr mei_hdr;
721 struct mei_cl *cl = cb->cl; 267 struct mei_cl *cl = cb->cl;
722 size_t len = cb->request_buffer.size - cb->buf_idx; 268 size_t len = cb->request_buffer.size - cb->buf_idx;
723 size_t msg_slots = mei_data2slots(len); 269 size_t msg_slots = mei_data2slots(len);
724 270
725 mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0]; 271 mei_hdr.host_addr = cl->host_client_id;
726 mei_hdr->host_addr = cl->host_client_id; 272 mei_hdr.me_addr = cl->me_client_id;
727 mei_hdr->me_addr = cl->me_client_id; 273 mei_hdr.reserved = 0;
728 mei_hdr->reserved = 0;
729 274
730 if (*slots >= msg_slots) { 275 if (*slots >= msg_slots) {
731 mei_hdr->length = len; 276 mei_hdr.length = len;
732 mei_hdr->msg_complete = 1; 277 mei_hdr.msg_complete = 1;
733 /* Split the message only if we can write the whole host buffer */ 278 /* Split the message only if we can write the whole host buffer */
734 } else if (*slots == dev->hbuf_depth) { 279 } else if (*slots == dev->hbuf_depth) {
735 msg_slots = *slots; 280 msg_slots = *slots;
736 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 281 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
737 mei_hdr->length = len; 282 mei_hdr.length = len;
738 mei_hdr->msg_complete = 0; 283 mei_hdr.msg_complete = 0;
739 } else { 284 } else {
740 /* wait for next time the host buffer is empty */ 285 /* wait for next time the host buffer is empty */
741 return 0; 286 return 0;
@@ -743,23 +288,22 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
743 288
744 dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n", 289 dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
745 cb->request_buffer.size, cb->buf_idx); 290 cb->request_buffer.size, cb->buf_idx);
746 dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n", 291 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
747 mei_hdr->length, mei_hdr->msg_complete);
748 292
749 *slots -= msg_slots; 293 *slots -= msg_slots;
750 if (mei_write_message(dev, mei_hdr, 294 if (mei_write_message(dev, &mei_hdr,
751 cb->request_buffer.data + cb->buf_idx, len)) { 295 cb->request_buffer.data + cb->buf_idx)) {
752 cl->status = -ENODEV; 296 cl->status = -ENODEV;
753 list_move_tail(&cb->list, &cmpl_list->list); 297 list_move_tail(&cb->list, &cmpl_list->list);
754 return -ENODEV; 298 return -ENODEV;
755 } 299 }
756 300
757 if (mei_flow_ctrl_reduce(dev, cl)) 301 if (mei_cl_flow_ctrl_reduce(cl))
758 return -ENODEV; 302 return -ENODEV;
759 303
760 cl->status = 0; 304 cl->status = 0;
761 cb->buf_idx += mei_hdr->length; 305 cb->buf_idx += mei_hdr.length;
762 if (mei_hdr->msg_complete) 306 if (mei_hdr.msg_complete)
763 list_move_tail(&cb->list, &dev->write_waiting_list.list); 307 list_move_tail(&cb->list, &dev->write_waiting_list.list);
764 308
765 return 0; 309 return 0;
@@ -769,15 +313,14 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
769 * mei_irq_thread_read_handler - bottom half read routine after ISR to 313 * mei_irq_thread_read_handler - bottom half read routine after ISR to
770 * handle the read processing. 314 * handle the read processing.
771 * 315 *
772 * @cmpl_list: An instance of our list structure
773 * @dev: the device structure 316 * @dev: the device structure
317 * @cmpl_list: An instance of our list structure
774 * @slots: slots to read. 318 * @slots: slots to read.
775 * 319 *
776 * returns 0 on success, <0 on failure. 320 * returns 0 on success, <0 on failure.
777 */ 321 */
778static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list, 322int mei_irq_read_handler(struct mei_device *dev,
779 struct mei_device *dev, 323 struct mei_cl_cb *cmpl_list, s32 *slots)
780 s32 *slots)
781{ 324{
782 struct mei_msg_hdr *mei_hdr; 325 struct mei_msg_hdr *mei_hdr;
783 struct mei_cl *cl_pos = NULL; 326 struct mei_cl *cl_pos = NULL;
@@ -785,13 +328,13 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
785 int ret = 0; 328 int ret = 0;
786 329
787 if (!dev->rd_msg_hdr) { 330 if (!dev->rd_msg_hdr) {
788 dev->rd_msg_hdr = mei_mecbrw_read(dev); 331 dev->rd_msg_hdr = mei_read_hdr(dev);
789 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); 332 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
790 (*slots)--; 333 (*slots)--;
791 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); 334 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
792 } 335 }
793 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; 336 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
794 dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", mei_hdr->length); 337 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
795 338
796 if (mei_hdr->reserved || !dev->rd_msg_hdr) { 339 if (mei_hdr->reserved || !dev->rd_msg_hdr) {
797 dev_dbg(&dev->pdev->dev, "corrupted message header.\n"); 340 dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
@@ -830,19 +373,18 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
830 /* decide where to read the message too */ 373 /* decide where to read the message too */
831 if (!mei_hdr->host_addr) { 374 if (!mei_hdr->host_addr) {
832 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n"); 375 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
833 mei_irq_thread_read_bus_message(dev, mei_hdr); 376 mei_hbm_dispatch(dev, mei_hdr);
834 dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n"); 377 dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
835 } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && 378 } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
836 (MEI_FILE_CONNECTED == dev->iamthif_cl.state) && 379 (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
837 (dev->iamthif_state == MEI_IAMTHIF_READING)) { 380 (dev->iamthif_state == MEI_IAMTHIF_READING)) {
838 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n"); 381 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
839 dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", 382
840 mei_hdr->length); 383 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
841 384
842 ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr); 385 ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
843 if (ret) 386 if (ret)
844 goto end; 387 goto end;
845
846 } else { 388 } else {
847 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n"); 389 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
848 ret = mei_irq_thread_read_client_message(cmpl_list, 390 ret = mei_irq_thread_read_client_message(cmpl_list,
@@ -869,15 +411,15 @@ end:
869 411
870 412
871/** 413/**
872 * mei_irq_thread_write_handler - bottom half write routine after 414 * mei_irq_write_handler - dispatch write requests
873 * ISR to handle the write processing. 415 * after irq received
874 * 416 *
875 * @dev: the device structure 417 * @dev: the device structure
876 * @cmpl_list: An instance of our list structure 418 * @cmpl_list: An instance of our list structure
877 * 419 *
878 * returns 0 on success, <0 on failure. 420 * returns 0 on success, <0 on failure.
879 */ 421 */
880static int mei_irq_thread_write_handler(struct mei_device *dev, 422int mei_irq_write_handler(struct mei_device *dev,
881 struct mei_cl_cb *cmpl_list) 423 struct mei_cl_cb *cmpl_list)
882{ 424{
883 425
@@ -887,7 +429,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
887 s32 slots; 429 s32 slots;
888 int ret; 430 int ret;
889 431
890 if (!mei_hbuf_is_empty(dev)) { 432 if (!mei_hbuf_is_ready(dev)) {
891 dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n"); 433 dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
892 return 0; 434 return 0;
893 } 435 }
@@ -930,16 +472,16 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
930 472
931 if (dev->wr_ext_msg.hdr.length) { 473 if (dev->wr_ext_msg.hdr.length) {
932 mei_write_message(dev, &dev->wr_ext_msg.hdr, 474 mei_write_message(dev, &dev->wr_ext_msg.hdr,
933 dev->wr_ext_msg.data, dev->wr_ext_msg.hdr.length); 475 dev->wr_ext_msg.data);
934 slots -= mei_data2slots(dev->wr_ext_msg.hdr.length); 476 slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
935 dev->wr_ext_msg.hdr.length = 0; 477 dev->wr_ext_msg.hdr.length = 0;
936 } 478 }
937 if (dev->dev_state == MEI_DEV_ENABLED) { 479 if (dev->dev_state == MEI_DEV_ENABLED) {
938 if (dev->wd_pending && 480 if (dev->wd_pending &&
939 mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) { 481 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
940 if (mei_wd_send(dev)) 482 if (mei_wd_send(dev))
941 dev_dbg(&dev->pdev->dev, "wd send failed.\n"); 483 dev_dbg(&dev->pdev->dev, "wd send failed.\n");
942 else if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) 484 else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
943 return -ENODEV; 485 return -ENODEV;
944 486
945 dev->wd_pending = false; 487 dev->wd_pending = false;
@@ -978,7 +520,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
978 break; 520 break;
979 case MEI_FOP_IOCTL: 521 case MEI_FOP_IOCTL:
980 /* connect message */ 522 /* connect message */
981 if (mei_other_client_is_connecting(dev, cl)) 523 if (mei_cl_is_other_connecting(cl))
982 continue; 524 continue;
983 ret = _mei_irq_thread_ioctl(dev, &slots, pos, 525 ret = _mei_irq_thread_ioctl(dev, &slots, pos,
984 cl, cmpl_list); 526 cl, cmpl_list);
@@ -998,7 +540,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
998 cl = pos->cl; 540 cl = pos->cl;
999 if (cl == NULL) 541 if (cl == NULL)
1000 continue; 542 continue;
1001 if (mei_flow_ctrl_creds(dev, cl) <= 0) { 543 if (mei_cl_flow_ctrl_creds(cl) <= 0) {
1002 dev_dbg(&dev->pdev->dev, 544 dev_dbg(&dev->pdev->dev,
1003 "No flow control credentials for client %d, not sending.\n", 545 "No flow control credentials for client %d, not sending.\n",
1004 cl->host_client_id); 546 cl->host_client_id);
@@ -1123,115 +665,3 @@ out:
1123 mutex_unlock(&dev->device_lock); 665 mutex_unlock(&dev->device_lock);
1124} 666}
1125 667
1126/**
1127 * mei_interrupt_thread_handler - function called after ISR to handle the interrupt
1128 * processing.
1129 *
1130 * @irq: The irq number
1131 * @dev_id: pointer to the device structure
1132 *
1133 * returns irqreturn_t
1134 *
1135 */
1136irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
1137{
1138 struct mei_device *dev = (struct mei_device *) dev_id;
1139 struct mei_cl_cb complete_list;
1140 struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
1141 struct mei_cl *cl;
1142 s32 slots;
1143 int rets;
1144 bool bus_message_received;
1145
1146
1147 dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
1148 /* initialize our complete list */
1149 mutex_lock(&dev->device_lock);
1150 mei_io_list_init(&complete_list);
1151 dev->host_hw_state = mei_hcsr_read(dev);
1152
1153 /* Ack the interrupt here
1154 * In case of MSI we don't go through the quick handler */
1155 if (pci_dev_msi_enabled(dev->pdev))
1156 mei_reg_write(dev, H_CSR, dev->host_hw_state);
1157
1158 dev->me_hw_state = mei_mecsr_read(dev);
1159
1160 /* check if ME wants a reset */
1161 if ((dev->me_hw_state & ME_RDY_HRA) == 0 &&
1162 dev->dev_state != MEI_DEV_RESETING &&
1163 dev->dev_state != MEI_DEV_INITIALIZING) {
1164 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
1165 mei_reset(dev, 1);
1166 mutex_unlock(&dev->device_lock);
1167 return IRQ_HANDLED;
1168 }
1169
1170 /* check if we need to start the dev */
1171 if ((dev->host_hw_state & H_RDY) == 0) {
1172 if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) {
1173 dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
1174 dev->host_hw_state |= (H_IE | H_IG | H_RDY);
1175 mei_hcsr_set(dev);
1176 dev->dev_state = MEI_DEV_INIT_CLIENTS;
1177 dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
1178 /* link is established
1179 * start sending messages.
1180 */
1181 mei_host_start_message(dev);
1182 mutex_unlock(&dev->device_lock);
1183 return IRQ_HANDLED;
1184 } else {
1185 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
1186 mutex_unlock(&dev->device_lock);
1187 return IRQ_HANDLED;
1188 }
1189 }
1190 /* check slots available for reading */
1191 slots = mei_count_full_read_slots(dev);
1192 while (slots > 0) {
1193 /* we have urgent data to send so break the read */
1194 if (dev->wr_ext_msg.hdr.length)
1195 break;
1196 dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
1197 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
1198 rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
1199 if (rets)
1200 goto end;
1201 }
1202 rets = mei_irq_thread_write_handler(dev, &complete_list);
1203end:
1204 dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
1205 dev->host_hw_state = mei_hcsr_read(dev);
1206 dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
1207
1208 bus_message_received = false;
1209 if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
1210 dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
1211 bus_message_received = true;
1212 }
1213 mutex_unlock(&dev->device_lock);
1214 if (bus_message_received) {
1215 dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
1216 wake_up_interruptible(&dev->wait_recvd_msg);
1217 bus_message_received = false;
1218 }
1219 if (list_empty(&complete_list.list))
1220 return IRQ_HANDLED;
1221
1222
1223 list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
1224 cl = cb_pos->cl;
1225 list_del(&cb_pos->list);
1226 if (cl) {
1227 if (cl != &dev->iamthif_cl) {
1228 dev_dbg(&dev->pdev->dev, "completing call back.\n");
1229 _mei_cmpl(cl, cb_pos);
1230 cb_pos = NULL;
1231 } else if (cl == &dev->iamthif_cl) {
1232 mei_amthif_complete(dev, cb_pos);
1233 }
1234 }
1235 }
1236 return IRQ_HANDLED;
1237}
diff --git a/drivers/misc/mei/iorw.c b/drivers/misc/mei/iorw.c
deleted file mode 100644
index eb93a1b53b9b..000000000000
--- a/drivers/misc/mei/iorw.c
+++ /dev/null
@@ -1,366 +0,0 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17
18#include <linux/kernel.h>
19#include <linux/fs.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/fcntl.h>
23#include <linux/aio.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/ioctl.h>
27#include <linux/cdev.h>
28#include <linux/list.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
31#include <linux/uuid.h>
32#include <linux/jiffies.h>
33#include <linux/uaccess.h>
34
35
36#include "mei_dev.h"
37#include "hw.h"
38#include <linux/mei.h>
39#include "interface.h"
40
41/**
42 * mei_io_cb_free - free mei_cb_private related memory
43 *
44 * @cb: mei callback struct
45 */
46void mei_io_cb_free(struct mei_cl_cb *cb)
47{
48 if (cb == NULL)
49 return;
50
51 kfree(cb->request_buffer.data);
52 kfree(cb->response_buffer.data);
53 kfree(cb);
54}
55/**
56 * mei_io_cb_init - allocate and initialize io callback
57 *
58 * @cl - mei client
59 * @file: pointer to file structure
60 *
61 * returns mei_cl_cb pointer or NULL;
62 */
63struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
64{
65 struct mei_cl_cb *cb;
66
67 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
68 if (!cb)
69 return NULL;
70
71 mei_io_list_init(cb);
72
73 cb->file_object = fp;
74 cb->cl = cl;
75 cb->buf_idx = 0;
76 return cb;
77}
78
79
80/**
81 * mei_io_cb_alloc_req_buf - allocate request buffer
82 *
83 * @cb - io callback structure
84 * @size: size of the buffer
85 *
86 * returns 0 on success
87 * -EINVAL if cb is NULL
88 * -ENOMEM if allocation failed
89 */
90int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
91{
92 if (!cb)
93 return -EINVAL;
94
95 if (length == 0)
96 return 0;
97
98 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
99 if (!cb->request_buffer.data)
100 return -ENOMEM;
101 cb->request_buffer.size = length;
102 return 0;
103}
104/**
105 * mei_io_cb_alloc_req_buf - allocate respose buffer
106 *
107 * @cb - io callback structure
108 * @size: size of the buffer
109 *
110 * returns 0 on success
111 * -EINVAL if cb is NULL
112 * -ENOMEM if allocation failed
113 */
114int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
115{
116 if (!cb)
117 return -EINVAL;
118
119 if (length == 0)
120 return 0;
121
122 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
123 if (!cb->response_buffer.data)
124 return -ENOMEM;
125 cb->response_buffer.size = length;
126 return 0;
127}
128
129
130/**
131 * mei_me_cl_by_id return index to me_clients for client_id
132 *
133 * @dev: the device structure
134 * @client_id: me client id
135 *
136 * Locking: called under "dev->device_lock" lock
137 *
138 * returns index on success, -ENOENT on failure.
139 */
140
141int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
142{
143 int i;
144 for (i = 0; i < dev->me_clients_num; i++)
145 if (dev->me_clients[i].client_id == client_id)
146 break;
147 if (WARN_ON(dev->me_clients[i].client_id != client_id))
148 return -ENOENT;
149
150 if (i == dev->me_clients_num)
151 return -ENOENT;
152
153 return i;
154}
155
156/**
157 * mei_ioctl_connect_client - the connect to fw client IOCTL function
158 *
159 * @dev: the device structure
160 * @data: IOCTL connect data, input and output parameters
161 * @file: private data of the file object
162 *
163 * Locking: called under "dev->device_lock" lock
164 *
165 * returns 0 on success, <0 on failure.
166 */
167int mei_ioctl_connect_client(struct file *file,
168 struct mei_connect_client_data *data)
169{
170 struct mei_device *dev;
171 struct mei_cl_cb *cb;
172 struct mei_client *client;
173 struct mei_cl *cl;
174 long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
175 int i;
176 int err;
177 int rets;
178
179 cl = file->private_data;
180 if (WARN_ON(!cl || !cl->dev))
181 return -ENODEV;
182
183 dev = cl->dev;
184
185 dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n");
186
187 /* buffered ioctl cb */
188 cb = mei_io_cb_init(cl, file);
189 if (!cb) {
190 rets = -ENOMEM;
191 goto end;
192 }
193
194 cb->fop_type = MEI_FOP_IOCTL;
195
196 if (dev->dev_state != MEI_DEV_ENABLED) {
197 rets = -ENODEV;
198 goto end;
199 }
200 if (cl->state != MEI_FILE_INITIALIZING &&
201 cl->state != MEI_FILE_DISCONNECTED) {
202 rets = -EBUSY;
203 goto end;
204 }
205
206 /* find ME client we're trying to connect to */
207 i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
208 if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
209 cl->me_client_id = dev->me_clients[i].client_id;
210 cl->state = MEI_FILE_CONNECTING;
211 }
212
213 dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
214 cl->me_client_id);
215 dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
216 dev->me_clients[i].props.protocol_version);
217 dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
218 dev->me_clients[i].props.max_msg_length);
219
220 /* if we're connecting to amthi client then we will use the
221 * existing connection
222 */
223 if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
224 dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
225 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
226 rets = -ENODEV;
227 goto end;
228 }
229 clear_bit(cl->host_client_id, dev->host_clients_map);
230 mei_me_cl_unlink(dev, cl);
231
232 kfree(cl);
233 cl = NULL;
234 file->private_data = &dev->iamthif_cl;
235
236 client = &data->out_client_properties;
237 client->max_msg_length =
238 dev->me_clients[i].props.max_msg_length;
239 client->protocol_version =
240 dev->me_clients[i].props.protocol_version;
241 rets = dev->iamthif_cl.status;
242
243 goto end;
244 }
245
246 if (cl->state != MEI_FILE_CONNECTING) {
247 rets = -ENODEV;
248 goto end;
249 }
250
251
252 /* prepare the output buffer */
253 client = &data->out_client_properties;
254 client->max_msg_length = dev->me_clients[i].props.max_msg_length;
255 client->protocol_version = dev->me_clients[i].props.protocol_version;
256 dev_dbg(&dev->pdev->dev, "Can connect?\n");
257 if (dev->mei_host_buffer_is_empty
258 && !mei_other_client_is_connecting(dev, cl)) {
259 dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
260 dev->mei_host_buffer_is_empty = false;
261 if (mei_connect(dev, cl)) {
262 dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
263 rets = -ENODEV;
264 goto end;
265 } else {
266 dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n");
267 cl->timer_count = MEI_CONNECT_TIMEOUT;
268 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
269 }
270
271
272 } else {
273 dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n");
274 dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n");
275 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
276 }
277 mutex_unlock(&dev->device_lock);
278 err = wait_event_timeout(dev->wait_recvd_msg,
279 (MEI_FILE_CONNECTED == cl->state ||
280 MEI_FILE_DISCONNECTED == cl->state), timeout);
281
282 mutex_lock(&dev->device_lock);
283 if (MEI_FILE_CONNECTED == cl->state) {
284 dev_dbg(&dev->pdev->dev, "successfully connected to FW client.\n");
285 rets = cl->status;
286 goto end;
287 } else {
288 dev_dbg(&dev->pdev->dev, "failed to connect to FW client.cl->state = %d.\n",
289 cl->state);
290 if (!err) {
291 dev_dbg(&dev->pdev->dev,
292 "wait_event_interruptible_timeout failed on client"
293 " connect message fw response message.\n");
294 }
295 rets = -EFAULT;
296
297 mei_io_list_flush(&dev->ctrl_rd_list, cl);
298 mei_io_list_flush(&dev->ctrl_wr_list, cl);
299 goto end;
300 }
301 rets = 0;
302end:
303 dev_dbg(&dev->pdev->dev, "free connect cb memory.");
304 mei_io_cb_free(cb);
305 return rets;
306}
307
308/**
309 * mei_start_read - the start read client message function.
310 *
311 * @dev: the device structure
312 * @if_num: minor number
313 * @cl: private data of the file object
314 *
315 * returns 0 on success, <0 on failure.
316 */
317int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
318{
319 struct mei_cl_cb *cb;
320 int rets;
321 int i;
322
323 if (cl->state != MEI_FILE_CONNECTED)
324 return -ENODEV;
325
326 if (dev->dev_state != MEI_DEV_ENABLED)
327 return -ENODEV;
328
329 if (cl->read_pending || cl->read_cb) {
330 dev_dbg(&dev->pdev->dev, "read is pending.\n");
331 return -EBUSY;
332 }
333 i = mei_me_cl_by_id(dev, cl->me_client_id);
334 if (i < 0) {
335 dev_err(&dev->pdev->dev, "no such me client %d\n",
336 cl->me_client_id);
337 return -ENODEV;
338 }
339
340 cb = mei_io_cb_init(cl, NULL);
341 if (!cb)
342 return -ENOMEM;
343
344 rets = mei_io_cb_alloc_resp_buf(cb,
345 dev->me_clients[i].props.max_msg_length);
346 if (rets)
347 goto err;
348
349 cb->fop_type = MEI_FOP_READ;
350 cl->read_cb = cb;
351 if (dev->mei_host_buffer_is_empty) {
352 dev->mei_host_buffer_is_empty = false;
353 if (mei_send_flow_control(dev, cl)) {
354 rets = -ENODEV;
355 goto err;
356 }
357 list_add_tail(&cb->list, &dev->read_list.list);
358 } else {
359 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
360 }
361 return rets;
362err:
363 mei_io_cb_free(cb);
364 return rets;
365}
366
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 43fb52ff98ad..903f809b21f7 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -37,79 +37,11 @@
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
39 39
40#include "mei_dev.h"
41#include <linux/mei.h> 40#include <linux/mei.h>
42#include "interface.h"
43
44/* AMT device is a singleton on the platform */
45static struct pci_dev *mei_pdev;
46
47/* mei_pci_tbl - PCI Device ID Table */
48static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
49 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
50 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
51 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
58 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
59 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
60 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
61 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
62 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
63 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
64 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
65 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
67 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
68 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
69 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
71 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
72 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
73 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
74 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
75 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
80 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
82
83 /* required last entry */
84 {0, }
85};
86
87MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
88 41
89static DEFINE_MUTEX(mei_mutex); 42#include "mei_dev.h"
90 43#include "hw-me.h"
91 44#include "client.h"
92/**
93 * find_read_list_entry - find read list entry
94 *
95 * @dev: device structure
96 * @file: pointer to file structure
97 *
98 * returns cb on success, NULL on error
99 */
100static struct mei_cl_cb *find_read_list_entry(
101 struct mei_device *dev,
102 struct mei_cl *cl)
103{
104 struct mei_cl_cb *pos = NULL;
105 struct mei_cl_cb *next = NULL;
106
107 dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
108 list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
109 if (mei_cl_cmp_id(cl, pos->cl))
110 return pos;
111 return NULL;
112}
113 45
114/** 46/**
115 * mei_open - the open function 47 * mei_open - the open function
@@ -121,16 +53,20 @@ static struct mei_cl_cb *find_read_list_entry(
121 */ 53 */
122static int mei_open(struct inode *inode, struct file *file) 54static int mei_open(struct inode *inode, struct file *file)
123{ 55{
56 struct miscdevice *misc = file->private_data;
57 struct pci_dev *pdev;
124 struct mei_cl *cl; 58 struct mei_cl *cl;
125 struct mei_device *dev; 59 struct mei_device *dev;
126 unsigned long cl_id; 60
127 int err; 61 int err;
128 62
129 err = -ENODEV; 63 err = -ENODEV;
130 if (!mei_pdev) 64 if (!misc->parent)
131 goto out; 65 goto out;
132 66
133 dev = pci_get_drvdata(mei_pdev); 67 pdev = container_of(misc->parent, struct pci_dev, dev);
68
69 dev = pci_get_drvdata(pdev);
134 if (!dev) 70 if (!dev)
135 goto out; 71 goto out;
136 72
@@ -153,24 +89,9 @@ static int mei_open(struct inode *inode, struct file *file)
153 goto out_unlock; 89 goto out_unlock;
154 } 90 }
155 91
156 cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); 92 err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
157 if (cl_id >= MEI_CLIENTS_MAX) { 93 if (err)
158 dev_err(&dev->pdev->dev, "client_id exceded %d",
159 MEI_CLIENTS_MAX) ;
160 goto out_unlock; 94 goto out_unlock;
161 }
162
163 cl->host_client_id = cl_id;
164
165 dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
166
167 dev->open_handle_count++;
168
169 list_add_tail(&cl->link, &dev->file_list);
170
171 set_bit(cl->host_client_id, dev->host_clients_map);
172 cl->state = MEI_FILE_INITIALIZING;
173 cl->sm_state = 0;
174 95
175 file->private_data = cl; 96 file->private_data = cl;
176 mutex_unlock(&dev->device_lock); 97 mutex_unlock(&dev->device_lock);
@@ -216,7 +137,7 @@ static int mei_release(struct inode *inode, struct file *file)
216 "ME client = %d\n", 137 "ME client = %d\n",
217 cl->host_client_id, 138 cl->host_client_id,
218 cl->me_client_id); 139 cl->me_client_id);
219 rets = mei_disconnect_host_client(dev, cl); 140 rets = mei_cl_disconnect(cl);
220 } 141 }
221 mei_cl_flush_queues(cl); 142 mei_cl_flush_queues(cl);
222 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n", 143 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
@@ -227,12 +148,13 @@ static int mei_release(struct inode *inode, struct file *file)
227 clear_bit(cl->host_client_id, dev->host_clients_map); 148 clear_bit(cl->host_client_id, dev->host_clients_map);
228 dev->open_handle_count--; 149 dev->open_handle_count--;
229 } 150 }
230 mei_me_cl_unlink(dev, cl); 151 mei_cl_unlink(cl);
152
231 153
232 /* free read cb */ 154 /* free read cb */
233 cb = NULL; 155 cb = NULL;
234 if (cl->read_cb) { 156 if (cl->read_cb) {
235 cb = find_read_list_entry(dev, cl); 157 cb = mei_cl_find_read_cb(cl);
236 /* Remove entry from read list */ 158 /* Remove entry from read list */
237 if (cb) 159 if (cb)
238 list_del(&cb->list); 160 list_del(&cb->list);
@@ -322,7 +244,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
322 goto out; 244 goto out;
323 } 245 }
324 246
325 err = mei_start_read(dev, cl); 247 err = mei_cl_read_start(cl);
326 if (err && err != -EBUSY) { 248 if (err && err != -EBUSY) {
327 dev_dbg(&dev->pdev->dev, 249 dev_dbg(&dev->pdev->dev,
328 "mei start read failure with status = %d\n", err); 250 "mei start read failure with status = %d\n", err);
@@ -393,14 +315,13 @@ copy_buffer:
393 goto out; 315 goto out;
394 316
395free: 317free:
396 cb_pos = find_read_list_entry(dev, cl); 318 cb_pos = mei_cl_find_read_cb(cl);
397 /* Remove entry from read list */ 319 /* Remove entry from read list */
398 if (cb_pos) 320 if (cb_pos)
399 list_del(&cb_pos->list); 321 list_del(&cb_pos->list);
400 mei_io_cb_free(cb); 322 mei_io_cb_free(cb);
401 cl->reading_state = MEI_IDLE; 323 cl->reading_state = MEI_IDLE;
402 cl->read_cb = NULL; 324 cl->read_cb = NULL;
403 cl->read_pending = 0;
404out: 325out:
405 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets); 326 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
406 mutex_unlock(&dev->device_lock); 327 mutex_unlock(&dev->device_lock);
@@ -475,16 +396,15 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
475 /* free entry used in read */ 396 /* free entry used in read */
476 if (cl->reading_state == MEI_READ_COMPLETE) { 397 if (cl->reading_state == MEI_READ_COMPLETE) {
477 *offset = 0; 398 *offset = 0;
478 write_cb = find_read_list_entry(dev, cl); 399 write_cb = mei_cl_find_read_cb(cl);
479 if (write_cb) { 400 if (write_cb) {
480 list_del(&write_cb->list); 401 list_del(&write_cb->list);
481 mei_io_cb_free(write_cb); 402 mei_io_cb_free(write_cb);
482 write_cb = NULL; 403 write_cb = NULL;
483 cl->reading_state = MEI_IDLE; 404 cl->reading_state = MEI_IDLE;
484 cl->read_cb = NULL; 405 cl->read_cb = NULL;
485 cl->read_pending = 0;
486 } 406 }
487 } else if (cl->reading_state == MEI_IDLE && !cl->read_pending) 407 } else if (cl->reading_state == MEI_IDLE)
488 *offset = 0; 408 *offset = 0;
489 409
490 410
@@ -519,7 +439,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
519 439
520 if (rets) { 440 if (rets) {
521 dev_err(&dev->pdev->dev, 441 dev_err(&dev->pdev->dev,
522 "amthi write failed with status = %d\n", rets); 442 "amthif write failed with status = %d\n", rets);
523 goto err; 443 goto err;
524 } 444 }
525 mutex_unlock(&dev->device_lock); 445 mutex_unlock(&dev->device_lock);
@@ -530,20 +450,20 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
530 450
531 dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n", 451 dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
532 cl->host_client_id, cl->me_client_id); 452 cl->host_client_id, cl->me_client_id);
533 rets = mei_flow_ctrl_creds(dev, cl); 453 rets = mei_cl_flow_ctrl_creds(cl);
534 if (rets < 0) 454 if (rets < 0)
535 goto err; 455 goto err;
536 456
537 if (rets == 0 || dev->mei_host_buffer_is_empty == false) { 457 if (rets == 0 || !dev->hbuf_is_ready) {
538 write_cb->buf_idx = 0; 458 write_cb->buf_idx = 0;
539 mei_hdr.msg_complete = 0; 459 mei_hdr.msg_complete = 0;
540 cl->writing_state = MEI_WRITING; 460 cl->writing_state = MEI_WRITING;
541 goto out; 461 goto out;
542 } 462 }
543 463
544 dev->mei_host_buffer_is_empty = false; 464 dev->hbuf_is_ready = false;
545 if (length > mei_hbuf_max_data(dev)) { 465 if (length > mei_hbuf_max_len(dev)) {
546 mei_hdr.length = mei_hbuf_max_data(dev); 466 mei_hdr.length = mei_hbuf_max_len(dev);
547 mei_hdr.msg_complete = 0; 467 mei_hdr.msg_complete = 0;
548 } else { 468 } else {
549 mei_hdr.length = length; 469 mei_hdr.length = length;
@@ -552,10 +472,10 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
552 mei_hdr.host_addr = cl->host_client_id; 472 mei_hdr.host_addr = cl->host_client_id;
553 mei_hdr.me_addr = cl->me_client_id; 473 mei_hdr.me_addr = cl->me_client_id;
554 mei_hdr.reserved = 0; 474 mei_hdr.reserved = 0;
555 dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n", 475
556 *((u32 *) &mei_hdr)); 476 dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
557 if (mei_write_message(dev, &mei_hdr, 477 MEI_HDR_PRM(&mei_hdr));
558 write_cb->request_buffer.data, mei_hdr.length)) { 478 if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
559 rets = -ENODEV; 479 rets = -ENODEV;
560 goto err; 480 goto err;
561 } 481 }
@@ -564,7 +484,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
564 484
565out: 485out:
566 if (mei_hdr.msg_complete) { 486 if (mei_hdr.msg_complete) {
567 if (mei_flow_ctrl_reduce(dev, cl)) { 487 if (mei_cl_flow_ctrl_reduce(cl)) {
568 rets = -ENODEV; 488 rets = -ENODEV;
569 goto err; 489 goto err;
570 } 490 }
@@ -582,6 +502,103 @@ err:
582 return rets; 502 return rets;
583} 503}
584 504
505/**
506 * mei_ioctl_connect_client - the connect to fw client IOCTL function
507 *
508 * @dev: the device structure
509 * @data: IOCTL connect data, input and output parameters
510 * @file: private data of the file object
511 *
512 * Locking: called under "dev->device_lock" lock
513 *
514 * returns 0 on success, <0 on failure.
515 */
516static int mei_ioctl_connect_client(struct file *file,
517 struct mei_connect_client_data *data)
518{
519 struct mei_device *dev;
520 struct mei_client *client;
521 struct mei_cl *cl;
522 int i;
523 int rets;
524
525 cl = file->private_data;
526 if (WARN_ON(!cl || !cl->dev))
527 return -ENODEV;
528
529 dev = cl->dev;
530
531 if (dev->dev_state != MEI_DEV_ENABLED) {
532 rets = -ENODEV;
533 goto end;
534 }
535
536 if (cl->state != MEI_FILE_INITIALIZING &&
537 cl->state != MEI_FILE_DISCONNECTED) {
538 rets = -EBUSY;
539 goto end;
540 }
541
542 /* find ME client we're trying to connect to */
543 i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
544 if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
545 cl->me_client_id = dev->me_clients[i].client_id;
546 cl->state = MEI_FILE_CONNECTING;
547 }
548
549 dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
550 cl->me_client_id);
551 dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
552 dev->me_clients[i].props.protocol_version);
553 dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
554 dev->me_clients[i].props.max_msg_length);
555
556 /* if we're connecting to amthif client then we will use the
557 * existing connection
558 */
559 if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
560 dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
561 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
562 rets = -ENODEV;
563 goto end;
564 }
565 clear_bit(cl->host_client_id, dev->host_clients_map);
566 mei_cl_unlink(cl);
567
568 kfree(cl);
569 cl = NULL;
570 file->private_data = &dev->iamthif_cl;
571
572 client = &data->out_client_properties;
573 client->max_msg_length =
574 dev->me_clients[i].props.max_msg_length;
575 client->protocol_version =
576 dev->me_clients[i].props.protocol_version;
577 rets = dev->iamthif_cl.status;
578
579 goto end;
580 }
581
582 if (cl->state != MEI_FILE_CONNECTING) {
583 rets = -ENODEV;
584 goto end;
585 }
586
587
588 /* prepare the output buffer */
589 client = &data->out_client_properties;
590 client->max_msg_length = dev->me_clients[i].props.max_msg_length;
591 client->protocol_version = dev->me_clients[i].props.protocol_version;
592 dev_dbg(&dev->pdev->dev, "Can connect?\n");
593
594
595 rets = mei_cl_connect(cl, file);
596
597end:
598 dev_dbg(&dev->pdev->dev, "free connect cb memory.");
599 return rets;
600}
601
585 602
586/** 603/**
587 * mei_ioctl - the IOCTL function 604 * mei_ioctl - the IOCTL function
@@ -630,6 +647,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
630 rets = -EFAULT; 647 rets = -EFAULT;
631 goto out; 648 goto out;
632 } 649 }
650
633 rets = mei_ioctl_connect_client(file, connect_data); 651 rets = mei_ioctl_connect_client(file, connect_data);
634 652
635 /* if all is ok, copying the data back to user. */ 653 /* if all is ok, copying the data back to user. */
@@ -726,7 +744,6 @@ static const struct file_operations mei_fops = {
726 .llseek = no_llseek 744 .llseek = no_llseek
727}; 745};
728 746
729
730/* 747/*
731 * Misc Device Struct 748 * Misc Device Struct
732 */ 749 */
@@ -736,300 +753,17 @@ static struct miscdevice mei_misc_device = {
736 .minor = MISC_DYNAMIC_MINOR, 753 .minor = MISC_DYNAMIC_MINOR,
737}; 754};
738 755
739/** 756int mei_register(struct device *dev)
740 * mei_quirk_probe - probe for devices that doesn't valid ME interface
741 * @pdev: PCI device structure
742 * @ent: entry into pci_device_table
743 *
744 * returns true if ME Interface is valid, false otherwise
745 */
746static bool mei_quirk_probe(struct pci_dev *pdev,
747 const struct pci_device_id *ent)
748{ 757{
749 u32 reg; 758 mei_misc_device.parent = dev;
750 if (ent->device == MEI_DEV_ID_PBG_1) { 759 return misc_register(&mei_misc_device);
751 pci_read_config_dword(pdev, 0x48, &reg);
752 /* make sure that bit 9 is up and bit 10 is down */
753 if ((reg & 0x600) == 0x200) {
754 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
755 return false;
756 }
757 }
758 return true;
759}
760/**
761 * mei_probe - Device Initialization Routine
762 *
763 * @pdev: PCI device structure
764 * @ent: entry in kcs_pci_tbl
765 *
766 * returns 0 on success, <0 on failure.
767 */
768static int mei_probe(struct pci_dev *pdev,
769 const struct pci_device_id *ent)
770{
771 struct mei_device *dev;
772 int err;
773
774 mutex_lock(&mei_mutex);
775
776 if (!mei_quirk_probe(pdev, ent)) {
777 err = -ENODEV;
778 goto end;
779 }
780
781 if (mei_pdev) {
782 err = -EEXIST;
783 goto end;
784 }
785 /* enable pci dev */
786 err = pci_enable_device(pdev);
787 if (err) {
788 dev_err(&pdev->dev, "failed to enable pci device.\n");
789 goto end;
790 }
791 /* set PCI host mastering */
792 pci_set_master(pdev);
793 /* pci request regions for mei driver */
794 err = pci_request_regions(pdev, KBUILD_MODNAME);
795 if (err) {
796 dev_err(&pdev->dev, "failed to get pci regions.\n");
797 goto disable_device;
798 }
799 /* allocates and initializes the mei dev structure */
800 dev = mei_device_init(pdev);
801 if (!dev) {
802 err = -ENOMEM;
803 goto release_regions;
804 }
805 /* mapping IO device memory */
806 dev->mem_addr = pci_iomap(pdev, 0, 0);
807 if (!dev->mem_addr) {
808 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
809 err = -ENOMEM;
810 goto free_device;
811 }
812 pci_enable_msi(pdev);
813
814 /* request and enable interrupt */
815 if (pci_dev_msi_enabled(pdev))
816 err = request_threaded_irq(pdev->irq,
817 NULL,
818 mei_interrupt_thread_handler,
819 IRQF_ONESHOT, KBUILD_MODNAME, dev);
820 else
821 err = request_threaded_irq(pdev->irq,
822 mei_interrupt_quick_handler,
823 mei_interrupt_thread_handler,
824 IRQF_SHARED, KBUILD_MODNAME, dev);
825
826 if (err) {
827 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
828 pdev->irq);
829 goto disable_msi;
830 }
831 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
832 INIT_WORK(&dev->init_work, mei_host_client_init);
833
834 if (mei_hw_init(dev)) {
835 dev_err(&pdev->dev, "init hw failure.\n");
836 err = -ENODEV;
837 goto release_irq;
838 }
839
840 err = misc_register(&mei_misc_device);
841 if (err)
842 goto release_irq;
843
844 mei_pdev = pdev;
845 pci_set_drvdata(pdev, dev);
846
847
848 schedule_delayed_work(&dev->timer_work, HZ);
849
850 mutex_unlock(&mei_mutex);
851
852 pr_debug("initialization successful.\n");
853
854 return 0;
855
856release_irq:
857 /* disable interrupts */
858 dev->host_hw_state = mei_hcsr_read(dev);
859 mei_disable_interrupts(dev);
860 flush_scheduled_work();
861 free_irq(pdev->irq, dev);
862disable_msi:
863 pci_disable_msi(pdev);
864 pci_iounmap(pdev, dev->mem_addr);
865free_device:
866 kfree(dev);
867release_regions:
868 pci_release_regions(pdev);
869disable_device:
870 pci_disable_device(pdev);
871end:
872 mutex_unlock(&mei_mutex);
873 dev_err(&pdev->dev, "initialization failed.\n");
874 return err;
875} 760}
876 761
877/** 762void mei_deregister(void)
878 * mei_remove - Device Removal Routine
879 *
880 * @pdev: PCI device structure
881 *
882 * mei_remove is called by the PCI subsystem to alert the driver
883 * that it should release a PCI device.
884 */
885static void mei_remove(struct pci_dev *pdev)
886{ 763{
887 struct mei_device *dev;
888
889 if (mei_pdev != pdev)
890 return;
891
892 dev = pci_get_drvdata(pdev);
893 if (!dev)
894 return;
895
896 mutex_lock(&dev->device_lock);
897
898 cancel_delayed_work(&dev->timer_work);
899
900 mei_wd_stop(dev);
901
902 mei_pdev = NULL;
903
904 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
905 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
906 mei_disconnect_host_client(dev, &dev->iamthif_cl);
907 }
908 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
909 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
910 mei_disconnect_host_client(dev, &dev->wd_cl);
911 }
912
913 /* Unregistering watchdog device */
914 mei_watchdog_unregister(dev);
915
916 /* remove entry if already in list */
917 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
918 mei_me_cl_unlink(dev, &dev->wd_cl);
919 mei_me_cl_unlink(dev, &dev->iamthif_cl);
920
921 dev->iamthif_current_cb = NULL;
922 dev->me_clients_num = 0;
923
924 mutex_unlock(&dev->device_lock);
925
926 flush_scheduled_work();
927
928 /* disable interrupts */
929 mei_disable_interrupts(dev);
930
931 free_irq(pdev->irq, dev);
932 pci_disable_msi(pdev);
933 pci_set_drvdata(pdev, NULL);
934
935 if (dev->mem_addr)
936 pci_iounmap(pdev, dev->mem_addr);
937
938 kfree(dev);
939
940 pci_release_regions(pdev);
941 pci_disable_device(pdev);
942
943 misc_deregister(&mei_misc_device); 764 misc_deregister(&mei_misc_device);
944} 765 mei_misc_device.parent = NULL;
945#ifdef CONFIG_PM
946static int mei_pci_suspend(struct device *device)
947{
948 struct pci_dev *pdev = to_pci_dev(device);
949 struct mei_device *dev = pci_get_drvdata(pdev);
950 int err;
951
952 if (!dev)
953 return -ENODEV;
954 mutex_lock(&dev->device_lock);
955
956 cancel_delayed_work(&dev->timer_work);
957
958 /* Stop watchdog if exists */
959 err = mei_wd_stop(dev);
960 /* Set new mei state */
961 if (dev->dev_state == MEI_DEV_ENABLED ||
962 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
963 dev->dev_state = MEI_DEV_POWER_DOWN;
964 mei_reset(dev, 0);
965 }
966 mutex_unlock(&dev->device_lock);
967
968 free_irq(pdev->irq, dev);
969 pci_disable_msi(pdev);
970
971 return err;
972} 766}
973 767
974static int mei_pci_resume(struct device *device)
975{
976 struct pci_dev *pdev = to_pci_dev(device);
977 struct mei_device *dev;
978 int err;
979
980 dev = pci_get_drvdata(pdev);
981 if (!dev)
982 return -ENODEV;
983
984 pci_enable_msi(pdev);
985
986 /* request and enable interrupt */
987 if (pci_dev_msi_enabled(pdev))
988 err = request_threaded_irq(pdev->irq,
989 NULL,
990 mei_interrupt_thread_handler,
991 IRQF_ONESHOT, KBUILD_MODNAME, dev);
992 else
993 err = request_threaded_irq(pdev->irq,
994 mei_interrupt_quick_handler,
995 mei_interrupt_thread_handler,
996 IRQF_SHARED, KBUILD_MODNAME, dev);
997
998 if (err) {
999 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
1000 pdev->irq);
1001 return err;
1002 }
1003
1004 mutex_lock(&dev->device_lock);
1005 dev->dev_state = MEI_DEV_POWER_UP;
1006 mei_reset(dev, 1);
1007 mutex_unlock(&dev->device_lock);
1008
1009 /* Start timer if stopped in suspend */
1010 schedule_delayed_work(&dev->timer_work, HZ);
1011
1012 return err;
1013}
1014static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
1015#define MEI_PM_OPS (&mei_pm_ops)
1016#else
1017#define MEI_PM_OPS NULL
1018#endif /* CONFIG_PM */
1019/*
1020 * PCI driver structure
1021 */
1022static struct pci_driver mei_driver = {
1023 .name = KBUILD_MODNAME,
1024 .id_table = mei_pci_tbl,
1025 .probe = mei_probe,
1026 .remove = mei_remove,
1027 .shutdown = mei_remove,
1028 .driver.pm = MEI_PM_OPS,
1029};
1030
1031module_pci_driver(mei_driver);
1032
1033MODULE_AUTHOR("Intel Corporation");
1034MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1035MODULE_LICENSE("GPL v2"); 768MODULE_LICENSE("GPL v2");
769
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 25da04549d04..cb80166161f0 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -21,7 +21,9 @@
21#include <linux/watchdog.h> 21#include <linux/watchdog.h>
22#include <linux/poll.h> 22#include <linux/poll.h>
23#include <linux/mei.h> 23#include <linux/mei.h>
24
24#include "hw.h" 25#include "hw.h"
26#include "hw-me-regs.h"
25 27
26/* 28/*
27 * watch dog definition 29 * watch dog definition
@@ -44,7 +46,7 @@
44/* 46/*
45 * AMTHI Client UUID 47 * AMTHI Client UUID
46 */ 48 */
47extern const uuid_le mei_amthi_guid; 49extern const uuid_le mei_amthif_guid;
48 50
49/* 51/*
50 * Watchdog Client UUID 52 * Watchdog Client UUID
@@ -65,12 +67,18 @@ extern const u8 mei_wd_state_independence_msg[3][4];
65 * Number of File descriptors/handles 67 * Number of File descriptors/handles
66 * that can be opened to the driver. 68 * that can be opened to the driver.
67 * 69 *
68 * Limit to 253: 256 Total Clients 70 * Limit to 255: 256 Total Clients
69 * minus internal client for MEI Bus Messags 71 * minus internal client for MEI Bus Messags
70 * minus internal client for AMTHI
71 * minus internal client for Watchdog
72 */ 72 */
73#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 3) 73#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
74
75/*
76 * Internal Clients Number
77 */
78#define MEI_HOST_CLIENT_ID_ANY (-1)
79#define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */
80#define MEI_WD_HOST_CLIENT_ID 1
81#define MEI_IAMTHIF_HOST_CLIENT_ID 2
74 82
75 83
76/* File state */ 84/* File state */
@@ -150,6 +158,19 @@ struct mei_message_data {
150 unsigned char *data; 158 unsigned char *data;
151}; 159};
152 160
161/**
162 * struct mei_me_client - representation of me (fw) client
163 *
164 * @props - client properties
165 * @client_id - me client id
166 * @mei_flow_ctrl_creds - flow control credits
167 */
168struct mei_me_client {
169 struct mei_client_properties props;
170 u8 client_id;
171 u8 mei_flow_ctrl_creds;
172};
173
153 174
154struct mei_cl; 175struct mei_cl;
155 176
@@ -178,7 +199,6 @@ struct mei_cl {
178 wait_queue_head_t tx_wait; 199 wait_queue_head_t tx_wait;
179 wait_queue_head_t rx_wait; 200 wait_queue_head_t rx_wait;
180 wait_queue_head_t wait; 201 wait_queue_head_t wait;
181 int read_pending;
182 int status; 202 int status;
183 /* ID of client connected */ 203 /* ID of client connected */
184 u8 host_client_id; 204 u8 host_client_id;
@@ -191,10 +211,67 @@ struct mei_cl {
191 struct mei_cl_cb *read_cb; 211 struct mei_cl_cb *read_cb;
192}; 212};
193 213
214/** struct mei_hw_ops
215 *
216 * @host_set_ready - notify FW that host side is ready
217 * @host_is_ready - query for host readiness
218
219 * @hw_is_ready - query if hw is ready
220 * @hw_reset - reset hw
221 * @hw_config - configure hw
222
223 * @intr_clear - clear pending interrupts
224 * @intr_enable - enable interrupts
225 * @intr_disable - disable interrupts
226
227 * @hbuf_free_slots - query for write buffer empty slots
228 * @hbuf_is_ready - query if write buffer is empty
229 * @hbuf_max_len - query for write buffer max len
230
231 * @write - write a message to FW
232
233 * @rdbuf_full_slots - query how many slots are filled
234
235 * @read_hdr - get first 4 bytes (header)
236 * @read - read a buffer from the FW
237 */
238struct mei_hw_ops {
239
240 void (*host_set_ready) (struct mei_device *dev);
241 bool (*host_is_ready) (struct mei_device *dev);
242
243 bool (*hw_is_ready) (struct mei_device *dev);
244 void (*hw_reset) (struct mei_device *dev, bool enable);
245 void (*hw_config) (struct mei_device *dev);
246
247 void (*intr_clear) (struct mei_device *dev);
248 void (*intr_enable) (struct mei_device *dev);
249 void (*intr_disable) (struct mei_device *dev);
250
251 int (*hbuf_free_slots) (struct mei_device *dev);
252 bool (*hbuf_is_ready) (struct mei_device *dev);
253 size_t (*hbuf_max_len) (const struct mei_device *dev);
254
255 int (*write)(struct mei_device *dev,
256 struct mei_msg_hdr *hdr,
257 unsigned char *buf);
258
259 int (*rdbuf_full_slots)(struct mei_device *dev);
260
261 u32 (*read_hdr)(const struct mei_device *dev);
262 int (*read) (struct mei_device *dev,
263 unsigned char *buf, unsigned long len);
264};
265
194/** 266/**
195 * struct mei_device - MEI private device struct 267 * struct mei_device - MEI private device struct
196 * @hbuf_depth - depth of host(write) buffer 268
197 * @wr_ext_msg - buffer for hbm control responses (set in read cycle) 269 * @mem_addr - mem mapped base register address
270
271 * @hbuf_depth - depth of hardware host/write buffer is slots
272 * @hbuf_is_ready - query if the host host/write buffer is ready
273 * @wr_msg - the buffer for hbm control messages
274 * @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
198 */ 275 */
199struct mei_device { 276struct mei_device {
200 struct pci_dev *pdev; /* pointer to pci device struct */ 277 struct pci_dev *pdev; /* pointer to pci device struct */
@@ -213,24 +290,14 @@ struct mei_device {
213 */ 290 */
214 struct list_head file_list; 291 struct list_head file_list;
215 long open_handle_count; 292 long open_handle_count;
216 /* 293
217 * memory of device
218 */
219 unsigned int mem_base;
220 unsigned int mem_length;
221 void __iomem *mem_addr;
222 /* 294 /*
223 * lock for the device 295 * lock for the device
224 */ 296 */
225 struct mutex device_lock; /* device lock */ 297 struct mutex device_lock; /* device lock */
226 struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */ 298 struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */
227 bool recvd_msg; 299 bool recvd_msg;
228 /* 300
229 * hw states of host and fw(ME)
230 */
231 u32 host_hw_state;
232 u32 me_hw_state;
233 u8 hbuf_depth;
234 /* 301 /*
235 * waiting queue for receive message from FW 302 * waiting queue for receive message from FW
236 */ 303 */
@@ -243,11 +310,20 @@ struct mei_device {
243 enum mei_dev_state dev_state; 310 enum mei_dev_state dev_state;
244 enum mei_init_clients_states init_clients_state; 311 enum mei_init_clients_states init_clients_state;
245 u16 init_clients_timer; 312 u16 init_clients_timer;
246 bool need_reset;
247 313
248 unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */ 314 unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
249 u32 rd_msg_hdr; 315 u32 rd_msg_hdr;
250 u32 wr_msg_buf[128]; /* used for control messages */ 316
317 /* write buffer */
318 u8 hbuf_depth;
319 bool hbuf_is_ready;
320
321 /* used for control messages */
322 struct {
323 struct mei_msg_hdr hdr;
324 unsigned char data[128];
325 } wr_msg;
326
251 struct { 327 struct {
252 struct mei_msg_hdr hdr; 328 struct mei_msg_hdr hdr;
253 unsigned char data[4]; /* All HBM messages are 4 bytes */ 329 unsigned char data[4]; /* All HBM messages are 4 bytes */
@@ -261,7 +337,6 @@ struct mei_device {
261 u8 me_clients_num; 337 u8 me_clients_num;
262 u8 me_client_presentation_num; 338 u8 me_client_presentation_num;
263 u8 me_client_index; 339 u8 me_client_index;
264 bool mei_host_buffer_is_empty;
265 340
266 struct mei_cl wd_cl; 341 struct mei_cl wd_cl;
267 enum mei_wd_states wd_state; 342 enum mei_wd_states wd_state;
@@ -289,6 +364,9 @@ struct mei_device {
289 bool iamthif_canceled; 364 bool iamthif_canceled;
290 365
291 struct work_struct init_work; 366 struct work_struct init_work;
367
368 const struct mei_hw_ops *ops;
369 char hw[0] __aligned(sizeof(void *));
292}; 370};
293 371
294static inline unsigned long mei_secs_to_jiffies(unsigned long sec) 372static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
@@ -300,96 +378,28 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
300/* 378/*
301 * mei init function prototypes 379 * mei init function prototypes
302 */ 380 */
303struct mei_device *mei_device_init(struct pci_dev *pdev); 381void mei_device_init(struct mei_device *dev);
304void mei_reset(struct mei_device *dev, int interrupts); 382void mei_reset(struct mei_device *dev, int interrupts);
305int mei_hw_init(struct mei_device *dev); 383int mei_hw_init(struct mei_device *dev);
306int mei_task_initialize_clients(void *data);
307int mei_initialize_clients(struct mei_device *dev);
308int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl);
309void mei_allocate_me_clients_storage(struct mei_device *dev);
310
311
312int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
313 const uuid_le *cguid, u8 host_client_id);
314void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl);
315int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
316int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
317
318/*
319 * MEI IO Functions
320 */
321struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
322void mei_io_cb_free(struct mei_cl_cb *priv_cb);
323int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
324int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
325
326
327/**
328 * mei_io_list_init - Sets up a queue list.
329 *
330 * @list: An instance cl callback structure
331 */
332static inline void mei_io_list_init(struct mei_cl_cb *list)
333{
334 INIT_LIST_HEAD(&list->list);
335}
336void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
337
338/*
339 * MEI ME Client Functions
340 */
341
342struct mei_cl *mei_cl_allocate(struct mei_device *dev);
343void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
344int mei_cl_flush_queues(struct mei_cl *cl);
345/**
346 * mei_cl_cmp_id - tells if file private data have same id
347 *
348 * @fe1: private data of 1. file object
349 * @fe2: private data of 2. file object
350 *
351 * returns true - if ids are the same and not NULL
352 */
353static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
354 const struct mei_cl *cl2)
355{
356 return cl1 && cl2 &&
357 (cl1->host_client_id == cl2->host_client_id) &&
358 (cl1->me_client_id == cl2->me_client_id);
359}
360
361
362
363/*
364 * MEI Host Client Functions
365 */
366void mei_host_start_message(struct mei_device *dev);
367void mei_host_enum_clients_message(struct mei_device *dev);
368int mei_host_client_enumerate(struct mei_device *dev);
369void mei_host_client_init(struct work_struct *work);
370 384
371/* 385/*
372 * MEI interrupt functions prototype 386 * MEI interrupt functions prototype
373 */ 387 */
374irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id);
375irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
376void mei_timer(struct work_struct *work);
377 388
378/* 389void mei_timer(struct work_struct *work);
379 * MEI input output function prototype 390int mei_irq_read_handler(struct mei_device *dev,
380 */ 391 struct mei_cl_cb *cmpl_list, s32 *slots);
381int mei_ioctl_connect_client(struct file *file,
382 struct mei_connect_client_data *data);
383 392
384int mei_start_read(struct mei_device *dev, struct mei_cl *cl); 393int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
385 394
395void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos);
386 396
387/* 397/*
388 * AMTHIF - AMT Host Interface Functions 398 * AMTHIF - AMT Host Interface Functions
389 */ 399 */
390void mei_amthif_reset_params(struct mei_device *dev); 400void mei_amthif_reset_params(struct mei_device *dev);
391 401
392void mei_amthif_host_init(struct mei_device *dev); 402int mei_amthif_host_init(struct mei_device *dev);
393 403
394int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb); 404int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
395 405
@@ -407,9 +417,6 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
407void mei_amthif_run_next_cmd(struct mei_device *dev); 417void mei_amthif_run_next_cmd(struct mei_device *dev);
408 418
409 419
410int mei_amthif_read_message(struct mei_cl_cb *complete_list,
411 struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
412
413int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots, 420int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
414 struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list); 421 struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list);
415 422
@@ -418,92 +425,107 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
418 struct mei_device *dev, struct mei_msg_hdr *mei_hdr); 425 struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
419int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); 426int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
420 427
428
429int mei_wd_send(struct mei_device *dev);
430int mei_wd_stop(struct mei_device *dev);
431int mei_wd_host_init(struct mei_device *dev);
421/* 432/*
422 * Register Access Function 433 * mei_watchdog_register - Registering watchdog interface
434 * once we got connection to the WD Client
435 * @dev - mei device
436 */
437void mei_watchdog_register(struct mei_device *dev);
438/*
439 * mei_watchdog_unregister - Unregistering watchdog interface
440 * @dev - mei device
423 */ 441 */
442void mei_watchdog_unregister(struct mei_device *dev);
424 443
425/** 444/*
426 * mei_reg_read - Reads 32bit data from the mei device 445 * Register Access Function
427 *
428 * @dev: the device structure
429 * @offset: offset from which to read the data
430 *
431 * returns register value (u32)
432 */ 446 */
433static inline u32 mei_reg_read(const struct mei_device *dev, 447
434 unsigned long offset) 448static inline void mei_hw_config(struct mei_device *dev)
449{
450 dev->ops->hw_config(dev);
451}
452static inline void mei_hw_reset(struct mei_device *dev, bool enable)
435{ 453{
436 return ioread32(dev->mem_addr + offset); 454 dev->ops->hw_reset(dev, enable);
437} 455}
438 456
439/** 457static inline void mei_clear_interrupts(struct mei_device *dev)
440 * mei_reg_write - Writes 32bit data to the mei device
441 *
442 * @dev: the device structure
443 * @offset: offset from which to write the data
444 * @value: register value to write (u32)
445 */
446static inline void mei_reg_write(const struct mei_device *dev,
447 unsigned long offset, u32 value)
448{ 458{
449 iowrite32(value, dev->mem_addr + offset); 459 dev->ops->intr_clear(dev);
450} 460}
451 461
452/** 462static inline void mei_enable_interrupts(struct mei_device *dev)
453 * mei_hcsr_read - Reads 32bit data from the host CSR
454 *
455 * @dev: the device structure
456 *
457 * returns the byte read.
458 */
459static inline u32 mei_hcsr_read(const struct mei_device *dev)
460{ 463{
461 return mei_reg_read(dev, H_CSR); 464 dev->ops->intr_enable(dev);
462} 465}
463 466
464/** 467static inline void mei_disable_interrupts(struct mei_device *dev)
465 * mei_mecsr_read - Reads 32bit data from the ME CSR
466 *
467 * @dev: the device structure
468 *
469 * returns ME_CSR_HA register value (u32)
470 */
471static inline u32 mei_mecsr_read(const struct mei_device *dev)
472{ 468{
473 return mei_reg_read(dev, ME_CSR_HA); 469 dev->ops->intr_disable(dev);
474} 470}
475 471
476/** 472static inline void mei_host_set_ready(struct mei_device *dev)
477 * get_me_cb_rw - Reads 32bit data from the mei ME_CB_RW register
478 *
479 * @dev: the device structure
480 *
481 * returns ME_CB_RW register value (u32)
482 */
483static inline u32 mei_mecbrw_read(const struct mei_device *dev)
484{ 473{
485 return mei_reg_read(dev, ME_CB_RW); 474 dev->ops->host_set_ready(dev);
475}
476static inline bool mei_host_is_ready(struct mei_device *dev)
477{
478 return dev->ops->host_is_ready(dev);
479}
480static inline bool mei_hw_is_ready(struct mei_device *dev)
481{
482 return dev->ops->hw_is_ready(dev);
486} 483}
487 484
485static inline bool mei_hbuf_is_ready(struct mei_device *dev)
486{
487 return dev->ops->hbuf_is_ready(dev);
488}
488 489
489/* 490static inline int mei_hbuf_empty_slots(struct mei_device *dev)
490 * mei interface function prototypes 491{
491 */ 492 return dev->ops->hbuf_free_slots(dev);
492void mei_hcsr_set(struct mei_device *dev); 493}
493void mei_csr_clear_his(struct mei_device *dev); 494
495static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
496{
497 return dev->ops->hbuf_max_len(dev);
498}
494 499
495void mei_enable_interrupts(struct mei_device *dev); 500static inline int mei_write_message(struct mei_device *dev,
496void mei_disable_interrupts(struct mei_device *dev); 501 struct mei_msg_hdr *hdr,
502 unsigned char *buf)
503{
504 return dev->ops->write(dev, hdr, buf);
505}
497 506
498static inline struct mei_msg_hdr *mei_hbm_hdr(u32 *buf, size_t length) 507static inline u32 mei_read_hdr(const struct mei_device *dev)
499{ 508{
500 struct mei_msg_hdr *hdr = (struct mei_msg_hdr *)buf; 509 return dev->ops->read_hdr(dev);
501 hdr->host_addr = 0;
502 hdr->me_addr = 0;
503 hdr->length = length;
504 hdr->msg_complete = 1;
505 hdr->reserved = 0;
506 return hdr;
507} 510}
508 511
512static inline void mei_read_slots(struct mei_device *dev,
513 unsigned char *buf, unsigned long len)
514{
515 dev->ops->read(dev, buf, len);
516}
517
518static inline int mei_count_full_read_slots(struct mei_device *dev)
519{
520 return dev->ops->rdbuf_full_slots(dev);
521}
522
523int mei_register(struct device *dev);
524void mei_deregister(void);
525
526#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d"
527#define MEI_HDR_PRM(hdr) \
528 (hdr)->host_addr, (hdr)->me_addr, \
529 (hdr)->length, (hdr)->msg_complete
530
509#endif 531#endif
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
new file mode 100644
index 000000000000..b40ec0601ab0
--- /dev/null
+++ b/drivers/misc/mei/pci-me.c
@@ -0,0 +1,396 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/device.h>
23#include <linux/fs.h>
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/aio.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/ioctl.h>
32#include <linux/cdev.h>
33#include <linux/sched.h>
34#include <linux/uuid.h>
35#include <linux/compat.h>
36#include <linux/jiffies.h>
37#include <linux/interrupt.h>
38#include <linux/miscdevice.h>
39
40#include <linux/mei.h>
41
42#include "mei_dev.h"
43#include "hw-me.h"
44#include "client.h"
45
46/* AMT device is a singleton on the platform */
47static struct pci_dev *mei_pdev;
48
49/* mei_pci_tbl - PCI Device ID Table */
50static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
51 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
58 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
59 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
60 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
61 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
62 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
63 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
64 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
65 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
67 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
68 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
69 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
71 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
72 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
73 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
74 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
75 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
80 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
82 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
83 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
84
85 /* required last entry */
86 {0, }
87};
88
89MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
90
91static DEFINE_MUTEX(mei_mutex);
92
93/**
94 * mei_quirk_probe - probe for devices that doesn't valid ME interface
95 * @pdev: PCI device structure
96 * @ent: entry into pci_device_table
97 *
98 * returns true if ME Interface is valid, false otherwise
99 */
100static bool mei_quirk_probe(struct pci_dev *pdev,
101 const struct pci_device_id *ent)
102{
103 u32 reg;
104 if (ent->device == MEI_DEV_ID_PBG_1) {
105 pci_read_config_dword(pdev, 0x48, &reg);
106 /* make sure that bit 9 is up and bit 10 is down */
107 if ((reg & 0x600) == 0x200) {
108 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
109 return false;
110 }
111 }
112 return true;
113}
114/**
115 * mei_probe - Device Initialization Routine
116 *
117 * @pdev: PCI device structure
118 * @ent: entry in kcs_pci_tbl
119 *
120 * returns 0 on success, <0 on failure.
121 */
122static int mei_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
123{
124 struct mei_device *dev;
125 struct mei_me_hw *hw;
126 int err;
127
128 mutex_lock(&mei_mutex);
129
130 if (!mei_quirk_probe(pdev, ent)) {
131 err = -ENODEV;
132 goto end;
133 }
134
135 if (mei_pdev) {
136 err = -EEXIST;
137 goto end;
138 }
139 /* enable pci dev */
140 err = pci_enable_device(pdev);
141 if (err) {
142 dev_err(&pdev->dev, "failed to enable pci device.\n");
143 goto end;
144 }
145 /* set PCI host mastering */
146 pci_set_master(pdev);
147 /* pci request regions for mei driver */
148 err = pci_request_regions(pdev, KBUILD_MODNAME);
149 if (err) {
150 dev_err(&pdev->dev, "failed to get pci regions.\n");
151 goto disable_device;
152 }
153 /* allocates and initializes the mei dev structure */
154 dev = mei_me_dev_init(pdev);
155 if (!dev) {
156 err = -ENOMEM;
157 goto release_regions;
158 }
159 hw = to_me_hw(dev);
160 /* mapping IO device memory */
161 hw->mem_addr = pci_iomap(pdev, 0, 0);
162 if (!hw->mem_addr) {
163 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
164 err = -ENOMEM;
165 goto free_device;
166 }
167 pci_enable_msi(pdev);
168
169 /* request and enable interrupt */
170 if (pci_dev_msi_enabled(pdev))
171 err = request_threaded_irq(pdev->irq,
172 NULL,
173 mei_me_irq_thread_handler,
174 IRQF_ONESHOT, KBUILD_MODNAME, dev);
175 else
176 err = request_threaded_irq(pdev->irq,
177 mei_me_irq_quick_handler,
178 mei_me_irq_thread_handler,
179 IRQF_SHARED, KBUILD_MODNAME, dev);
180
181 if (err) {
182 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
183 pdev->irq);
184 goto disable_msi;
185 }
186
187 if (mei_hw_init(dev)) {
188 dev_err(&pdev->dev, "init hw failure.\n");
189 err = -ENODEV;
190 goto release_irq;
191 }
192
193 err = mei_register(&pdev->dev);
194 if (err)
195 goto release_irq;
196
197 mei_pdev = pdev;
198 pci_set_drvdata(pdev, dev);
199
200
201 schedule_delayed_work(&dev->timer_work, HZ);
202
203 mutex_unlock(&mei_mutex);
204
205 pr_debug("initialization successful.\n");
206
207 return 0;
208
209release_irq:
210 mei_disable_interrupts(dev);
211 flush_scheduled_work();
212 free_irq(pdev->irq, dev);
213disable_msi:
214 pci_disable_msi(pdev);
215 pci_iounmap(pdev, hw->mem_addr);
216free_device:
217 kfree(dev);
218release_regions:
219 pci_release_regions(pdev);
220disable_device:
221 pci_disable_device(pdev);
222end:
223 mutex_unlock(&mei_mutex);
224 dev_err(&pdev->dev, "initialization failed.\n");
225 return err;
226}
227
228/**
229 * mei_remove - Device Removal Routine
230 *
231 * @pdev: PCI device structure
232 *
233 * mei_remove is called by the PCI subsystem to alert the driver
234 * that it should release a PCI device.
235 */
236static void mei_remove(struct pci_dev *pdev)
237{
238 struct mei_device *dev;
239 struct mei_me_hw *hw;
240
241 if (mei_pdev != pdev)
242 return;
243
244 dev = pci_get_drvdata(pdev);
245 if (!dev)
246 return;
247
248 hw = to_me_hw(dev);
249
250 mutex_lock(&dev->device_lock);
251
252 cancel_delayed_work(&dev->timer_work);
253
254 mei_wd_stop(dev);
255
256 mei_pdev = NULL;
257
258 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
259 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
260 mei_cl_disconnect(&dev->iamthif_cl);
261 }
262 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
263 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
264 mei_cl_disconnect(&dev->wd_cl);
265 }
266
267 /* Unregistering watchdog device */
268 mei_watchdog_unregister(dev);
269
270 /* remove entry if already in list */
271 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
272
273 if (dev->open_handle_count > 0)
274 dev->open_handle_count--;
275 mei_cl_unlink(&dev->wd_cl);
276
277 if (dev->open_handle_count > 0)
278 dev->open_handle_count--;
279 mei_cl_unlink(&dev->iamthif_cl);
280
281 dev->iamthif_current_cb = NULL;
282 dev->me_clients_num = 0;
283
284 mutex_unlock(&dev->device_lock);
285
286 flush_scheduled_work();
287
288 /* disable interrupts */
289 mei_disable_interrupts(dev);
290
291 free_irq(pdev->irq, dev);
292 pci_disable_msi(pdev);
293 pci_set_drvdata(pdev, NULL);
294
295 if (hw->mem_addr)
296 pci_iounmap(pdev, hw->mem_addr);
297
298 kfree(dev);
299
300 pci_release_regions(pdev);
301 pci_disable_device(pdev);
302
303 mei_deregister();
304
305}
306#ifdef CONFIG_PM
307static int mei_pci_suspend(struct device *device)
308{
309 struct pci_dev *pdev = to_pci_dev(device);
310 struct mei_device *dev = pci_get_drvdata(pdev);
311 int err;
312
313 if (!dev)
314 return -ENODEV;
315 mutex_lock(&dev->device_lock);
316
317 cancel_delayed_work(&dev->timer_work);
318
319 /* Stop watchdog if exists */
320 err = mei_wd_stop(dev);
321 /* Set new mei state */
322 if (dev->dev_state == MEI_DEV_ENABLED ||
323 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
324 dev->dev_state = MEI_DEV_POWER_DOWN;
325 mei_reset(dev, 0);
326 }
327 mutex_unlock(&dev->device_lock);
328
329 free_irq(pdev->irq, dev);
330 pci_disable_msi(pdev);
331
332 return err;
333}
334
335static int mei_pci_resume(struct device *device)
336{
337 struct pci_dev *pdev = to_pci_dev(device);
338 struct mei_device *dev;
339 int err;
340
341 dev = pci_get_drvdata(pdev);
342 if (!dev)
343 return -ENODEV;
344
345 pci_enable_msi(pdev);
346
347 /* request and enable interrupt */
348 if (pci_dev_msi_enabled(pdev))
349 err = request_threaded_irq(pdev->irq,
350 NULL,
351 mei_me_irq_thread_handler,
352 IRQF_ONESHOT, KBUILD_MODNAME, dev);
353 else
354 err = request_threaded_irq(pdev->irq,
355 mei_me_irq_quick_handler,
356 mei_me_irq_thread_handler,
357 IRQF_SHARED, KBUILD_MODNAME, dev);
358
359 if (err) {
360 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
361 pdev->irq);
362 return err;
363 }
364
365 mutex_lock(&dev->device_lock);
366 dev->dev_state = MEI_DEV_POWER_UP;
367 mei_reset(dev, 1);
368 mutex_unlock(&dev->device_lock);
369
370 /* Start timer if stopped in suspend */
371 schedule_delayed_work(&dev->timer_work, HZ);
372
373 return err;
374}
375static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
376#define MEI_PM_OPS (&mei_pm_ops)
377#else
378#define MEI_PM_OPS NULL
379#endif /* CONFIG_PM */
380/*
381 * PCI driver structure
382 */
383static struct pci_driver mei_driver = {
384 .name = KBUILD_MODNAME,
385 .id_table = mei_pci_tbl,
386 .probe = mei_probe,
387 .remove = mei_remove,
388 .shutdown = mei_remove,
389 .driver.pm = MEI_PM_OPS,
390};
391
392module_pci_driver(mei_driver);
393
394MODULE_AUTHOR("Intel Corporation");
395MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
396MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 9299a8c29a6f..2413247fc392 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -21,11 +21,13 @@
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/watchdog.h> 22#include <linux/watchdog.h>
23 23
24#include "mei_dev.h"
25#include "hw.h"
26#include "interface.h"
27#include <linux/mei.h> 24#include <linux/mei.h>
28 25
26#include "mei_dev.h"
27#include "hbm.h"
28#include "hw-me.h"
29#include "client.h"
30
29static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 }; 31static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
30static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 }; 32static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
31 33
@@ -62,30 +64,41 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
62 */ 64 */
63int mei_wd_host_init(struct mei_device *dev) 65int mei_wd_host_init(struct mei_device *dev)
64{ 66{
65 int id; 67 struct mei_cl *cl = &dev->wd_cl;
66 mei_cl_init(&dev->wd_cl, dev); 68 int i;
69 int ret;
70
71 mei_cl_init(cl, dev);
67 72
68 /* look for WD client and connect to it */
69 dev->wd_cl.state = MEI_FILE_DISCONNECTED;
70 dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT; 73 dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
71 dev->wd_state = MEI_WD_IDLE; 74 dev->wd_state = MEI_WD_IDLE;
72 75
73 /* Connect WD ME client to the host client */
74 id = mei_me_cl_link(dev, &dev->wd_cl,
75 &mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
76 76
77 if (id < 0) { 77 /* check for valid client id */
78 i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
79 if (i < 0) {
78 dev_info(&dev->pdev->dev, "wd: failed to find the client\n"); 80 dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
79 return -ENOENT; 81 return -ENOENT;
80 } 82 }
81 83
82 if (mei_connect(dev, &dev->wd_cl)) { 84 cl->me_client_id = dev->me_clients[i].client_id;
85
86 ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
87
88 if (ret < 0) {
89 dev_info(&dev->pdev->dev, "wd: failed link client\n");
90 return -ENOENT;
91 }
92
93 cl->state = MEI_FILE_CONNECTING;
94
95 if (mei_hbm_cl_connect_req(dev, cl)) {
83 dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n"); 96 dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n");
84 dev->wd_cl.state = MEI_FILE_DISCONNECTED; 97 cl->state = MEI_FILE_DISCONNECTED;
85 dev->wd_cl.host_client_id = 0; 98 cl->host_client_id = 0;
86 return -EIO; 99 return -EIO;
87 } 100 }
88 dev->wd_cl.timer_count = MEI_CONNECT_TIMEOUT; 101 cl->timer_count = MEI_CONNECT_TIMEOUT;
89 102
90 return 0; 103 return 0;
91} 104}
@@ -101,22 +114,21 @@ int mei_wd_host_init(struct mei_device *dev)
101 */ 114 */
102int mei_wd_send(struct mei_device *dev) 115int mei_wd_send(struct mei_device *dev)
103{ 116{
104 struct mei_msg_hdr *mei_hdr; 117 struct mei_msg_hdr hdr;
105 118
106 mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0]; 119 hdr.host_addr = dev->wd_cl.host_client_id;
107 mei_hdr->host_addr = dev->wd_cl.host_client_id; 120 hdr.me_addr = dev->wd_cl.me_client_id;
108 mei_hdr->me_addr = dev->wd_cl.me_client_id; 121 hdr.msg_complete = 1;
109 mei_hdr->msg_complete = 1; 122 hdr.reserved = 0;
110 mei_hdr->reserved = 0;
111 123
112 if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) 124 if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
113 mei_hdr->length = MEI_WD_START_MSG_SIZE; 125 hdr.length = MEI_WD_START_MSG_SIZE;
114 else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) 126 else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
115 mei_hdr->length = MEI_WD_STOP_MSG_SIZE; 127 hdr.length = MEI_WD_STOP_MSG_SIZE;
116 else 128 else
117 return -EINVAL; 129 return -EINVAL;
118 130
119 return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length); 131 return mei_write_message(dev, &hdr, dev->wd_data);
120} 132}
121 133
122/** 134/**
@@ -141,16 +153,16 @@ int mei_wd_stop(struct mei_device *dev)
141 153
142 dev->wd_state = MEI_WD_STOPPING; 154 dev->wd_state = MEI_WD_STOPPING;
143 155
144 ret = mei_flow_ctrl_creds(dev, &dev->wd_cl); 156 ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
145 if (ret < 0) 157 if (ret < 0)
146 goto out; 158 goto out;
147 159
148 if (ret && dev->mei_host_buffer_is_empty) { 160 if (ret && dev->hbuf_is_ready) {
149 ret = 0; 161 ret = 0;
150 dev->mei_host_buffer_is_empty = false; 162 dev->hbuf_is_ready = false;
151 163
152 if (!mei_wd_send(dev)) { 164 if (!mei_wd_send(dev)) {
153 ret = mei_flow_ctrl_reduce(dev, &dev->wd_cl); 165 ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
154 if (ret) 166 if (ret)
155 goto out; 167 goto out;
156 } else { 168 } else {
@@ -270,10 +282,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
270 dev->wd_state = MEI_WD_RUNNING; 282 dev->wd_state = MEI_WD_RUNNING;
271 283
272 /* Check if we can send the ping to HW*/ 284 /* Check if we can send the ping to HW*/
273 if (dev->mei_host_buffer_is_empty && 285 if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
274 mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
275 286
276 dev->mei_host_buffer_is_empty = false; 287 dev->hbuf_is_ready = false;
277 dev_dbg(&dev->pdev->dev, "wd: sending ping\n"); 288 dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
278 289
279 if (mei_wd_send(dev)) { 290 if (mei_wd_send(dev)) {
@@ -282,9 +293,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
282 goto end; 293 goto end;
283 } 294 }
284 295
285 if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) { 296 if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
286 dev_err(&dev->pdev->dev, 297 dev_err(&dev->pdev->dev,
287 "wd: mei_flow_ctrl_reduce() failed.\n"); 298 "wd: mei_cl_flow_ctrl_reduce() failed.\n");
288 ret = -EIO; 299 ret = -EIO;
289 goto end; 300 goto end;
290 } 301 }
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index b90a2241d79c..0a1428016350 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -240,7 +240,8 @@ void st_int_recv(void *disc_data,
240 char *ptr; 240 char *ptr;
241 struct st_proto_s *proto; 241 struct st_proto_s *proto;
242 unsigned short payload_len = 0; 242 unsigned short payload_len = 0;
243 int len = 0, type = 0; 243 int len = 0;
244 unsigned char type = 0;
244 unsigned char *plen; 245 unsigned char *plen;
245 struct st_data_s *st_gdata = (struct st_data_s *)disc_data; 246 struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
246 unsigned long flags; 247 unsigned long flags;
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
new file mode 100644
index 000000000000..39c2ecadb273
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -0,0 +1,16 @@
1#
2# VMware VMCI device
3#
4
5config VMWARE_VMCI
6 tristate "VMware VMCI Driver"
7 depends on X86 && PCI
8 help
9 This is VMware's Virtual Machine Communication Interface. It enables
10 high-speed communication between host and guest in a virtual
11 environment via the VMCI virtual device.
12
13 If unsure, say N.
14
15 To compile this driver as a module, choose M here: the
16 module will be called vmw_vmci.
diff --git a/drivers/misc/vmw_vmci/Makefile b/drivers/misc/vmw_vmci/Makefile
new file mode 100644
index 000000000000..4da9893c3942
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o
2vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \
3 vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \
4 vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
new file mode 100644
index 000000000000..f866a4baecb5
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -0,0 +1,1214 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/highmem.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23
24#include "vmci_queue_pair.h"
25#include "vmci_datagram.h"
26#include "vmci_doorbell.h"
27#include "vmci_context.h"
28#include "vmci_driver.h"
29#include "vmci_event.h"
30
31/*
32 * List of current VMCI contexts. Contexts can be added by
33 * vmci_ctx_create() and removed via vmci_ctx_destroy().
34 * These, along with context lookup, are protected by the
35 * list structure's lock.
36 */
37static struct {
38 struct list_head head;
39 spinlock_t lock; /* Spinlock for context list operations */
40} ctx_list = {
41 .head = LIST_HEAD_INIT(ctx_list.head),
42 .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock),
43};
44
45/* Used by contexts that did not set up notify flag pointers */
46static bool ctx_dummy_notify;
47
48static void ctx_signal_notify(struct vmci_ctx *context)
49{
50 *context->notify = true;
51}
52
53static void ctx_clear_notify(struct vmci_ctx *context)
54{
55 *context->notify = false;
56}
57
58/*
59 * If nothing requires the attention of the guest, clears both
60 * notify flag and call.
61 */
62static void ctx_clear_notify_call(struct vmci_ctx *context)
63{
64 if (context->pending_datagrams == 0 &&
65 vmci_handle_arr_get_size(context->pending_doorbell_array) == 0)
66 ctx_clear_notify(context);
67}
68
69/*
70 * Sets the context's notify flag iff datagrams are pending for this
71 * context. Called from vmci_setup_notify().
72 */
73void vmci_ctx_check_signal_notify(struct vmci_ctx *context)
74{
75 spin_lock(&context->lock);
76 if (context->pending_datagrams)
77 ctx_signal_notify(context);
78 spin_unlock(&context->lock);
79}
80
81/*
82 * Allocates and initializes a VMCI context.
83 */
84struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
85 uintptr_t event_hnd,
86 int user_version,
87 const struct cred *cred)
88{
89 struct vmci_ctx *context;
90 int error;
91
92 if (cid == VMCI_INVALID_ID) {
93 pr_devel("Invalid context ID for VMCI context\n");
94 error = -EINVAL;
95 goto err_out;
96 }
97
98 if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) {
99 pr_devel("Invalid flag (flags=0x%x) for VMCI context\n",
100 priv_flags);
101 error = -EINVAL;
102 goto err_out;
103 }
104
105 if (user_version == 0) {
106 pr_devel("Invalid suer_version %d\n", user_version);
107 error = -EINVAL;
108 goto err_out;
109 }
110
111 context = kzalloc(sizeof(*context), GFP_KERNEL);
112 if (!context) {
113 pr_warn("Failed to allocate memory for VMCI context\n");
114 error = -EINVAL;
115 goto err_out;
116 }
117
118 kref_init(&context->kref);
119 spin_lock_init(&context->lock);
120 INIT_LIST_HEAD(&context->list_item);
121 INIT_LIST_HEAD(&context->datagram_queue);
122 INIT_LIST_HEAD(&context->notifier_list);
123
124 /* Initialize host-specific VMCI context. */
125 init_waitqueue_head(&context->host_context.wait_queue);
126
127 context->queue_pair_array = vmci_handle_arr_create(0);
128 if (!context->queue_pair_array) {
129 error = -ENOMEM;
130 goto err_free_ctx;
131 }
132
133 context->doorbell_array = vmci_handle_arr_create(0);
134 if (!context->doorbell_array) {
135 error = -ENOMEM;
136 goto err_free_qp_array;
137 }
138
139 context->pending_doorbell_array = vmci_handle_arr_create(0);
140 if (!context->pending_doorbell_array) {
141 error = -ENOMEM;
142 goto err_free_db_array;
143 }
144
145 context->user_version = user_version;
146
147 context->priv_flags = priv_flags;
148
149 if (cred)
150 context->cred = get_cred(cred);
151
152 context->notify = &ctx_dummy_notify;
153 context->notify_page = NULL;
154
155 /*
156 * If we collide with an existing context we generate a new
157 * and use it instead. The VMX will determine if regeneration
158 * is okay. Since there isn't 4B - 16 VMs running on a given
159 * host, the below loop will terminate.
160 */
161 spin_lock(&ctx_list.lock);
162
163 while (vmci_ctx_exists(cid)) {
164 /* We reserve the lowest 16 ids for fixed contexts. */
165 cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1;
166 if (cid == VMCI_INVALID_ID)
167 cid = VMCI_RESERVED_CID_LIMIT;
168 }
169 context->cid = cid;
170
171 list_add_tail_rcu(&context->list_item, &ctx_list.head);
172 spin_unlock(&ctx_list.lock);
173
174 return context;
175
176 err_free_db_array:
177 vmci_handle_arr_destroy(context->doorbell_array);
178 err_free_qp_array:
179 vmci_handle_arr_destroy(context->queue_pair_array);
180 err_free_ctx:
181 kfree(context);
182 err_out:
183 return ERR_PTR(error);
184}
185
186/*
187 * Destroy VMCI context.
188 */
189void vmci_ctx_destroy(struct vmci_ctx *context)
190{
191 spin_lock(&ctx_list.lock);
192 list_del_rcu(&context->list_item);
193 spin_unlock(&ctx_list.lock);
194 synchronize_rcu();
195
196 vmci_ctx_put(context);
197}
198
199/*
200 * Fire notification for all contexts interested in given cid.
201 */
202static int ctx_fire_notification(u32 context_id, u32 priv_flags)
203{
204 u32 i, array_size;
205 struct vmci_ctx *sub_ctx;
206 struct vmci_handle_arr *subscriber_array;
207 struct vmci_handle context_handle =
208 vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
209
210 /*
211 * We create an array to hold the subscribers we find when
212 * scanning through all contexts.
213 */
214 subscriber_array = vmci_handle_arr_create(0);
215 if (subscriber_array == NULL)
216 return VMCI_ERROR_NO_MEM;
217
218 /*
219 * Scan all contexts to find who is interested in being
220 * notified about given contextID.
221 */
222 rcu_read_lock();
223 list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) {
224 struct vmci_handle_list *node;
225
226 /*
227 * We only deliver notifications of the removal of
228 * contexts, if the two contexts are allowed to
229 * interact.
230 */
231 if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags))
232 continue;
233
234 list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) {
235 if (!vmci_handle_is_equal(node->handle, context_handle))
236 continue;
237
238 vmci_handle_arr_append_entry(&subscriber_array,
239 vmci_make_handle(sub_ctx->cid,
240 VMCI_EVENT_HANDLER));
241 }
242 }
243 rcu_read_unlock();
244
245 /* Fire event to all subscribers. */
246 array_size = vmci_handle_arr_get_size(subscriber_array);
247 for (i = 0; i < array_size; i++) {
248 int result;
249 struct vmci_event_ctx ev;
250
251 ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i);
252 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
253 VMCI_CONTEXT_RESOURCE_ID);
254 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
255 ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED;
256 ev.payload.context_id = context_id;
257
258 result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
259 &ev.msg.hdr, false);
260 if (result < VMCI_SUCCESS) {
261 pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n",
262 ev.msg.event_data.event,
263 ev.msg.hdr.dst.context);
264 /* We continue to enqueue on next subscriber. */
265 }
266 }
267 vmci_handle_arr_destroy(subscriber_array);
268
269 return VMCI_SUCCESS;
270}
271
272/*
273 * Returns the current number of pending datagrams. The call may
274 * also serve as a synchronization point for the datagram queue,
275 * as no enqueue operations can occur concurrently.
276 */
277int vmci_ctx_pending_datagrams(u32 cid, u32 *pending)
278{
279 struct vmci_ctx *context;
280
281 context = vmci_ctx_get(cid);
282 if (context == NULL)
283 return VMCI_ERROR_INVALID_ARGS;
284
285 spin_lock(&context->lock);
286 if (pending)
287 *pending = context->pending_datagrams;
288 spin_unlock(&context->lock);
289 vmci_ctx_put(context);
290
291 return VMCI_SUCCESS;
292}
293
294/*
295 * Queues a VMCI datagram for the appropriate target VM context.
296 */
297int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg)
298{
299 struct vmci_datagram_queue_entry *dq_entry;
300 struct vmci_ctx *context;
301 struct vmci_handle dg_src;
302 size_t vmci_dg_size;
303
304 vmci_dg_size = VMCI_DG_SIZE(dg);
305 if (vmci_dg_size > VMCI_MAX_DG_SIZE) {
306 pr_devel("Datagram too large (bytes=%Zu)\n", vmci_dg_size);
307 return VMCI_ERROR_INVALID_ARGS;
308 }
309
310 /* Get the target VM's VMCI context. */
311 context = vmci_ctx_get(cid);
312 if (!context) {
313 pr_devel("Invalid context (ID=0x%x)\n", cid);
314 return VMCI_ERROR_INVALID_ARGS;
315 }
316
317 /* Allocate guest call entry and add it to the target VM's queue. */
318 dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL);
319 if (dq_entry == NULL) {
320 pr_warn("Failed to allocate memory for datagram\n");
321 vmci_ctx_put(context);
322 return VMCI_ERROR_NO_MEM;
323 }
324 dq_entry->dg = dg;
325 dq_entry->dg_size = vmci_dg_size;
326 dg_src = dg->src;
327 INIT_LIST_HEAD(&dq_entry->list_item);
328
329 spin_lock(&context->lock);
330
331 /*
332 * We put a higher limit on datagrams from the hypervisor. If
333 * the pending datagram is not from hypervisor, then we check
334 * if enqueueing it would exceed the
335 * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If
336 * the pending datagram is from hypervisor, we allow it to be
337 * queued at the destination side provided we don't reach the
338 * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit.
339 */
340 if (context->datagram_queue_size + vmci_dg_size >=
341 VMCI_MAX_DATAGRAM_QUEUE_SIZE &&
342 (!vmci_handle_is_equal(dg_src,
343 vmci_make_handle
344 (VMCI_HYPERVISOR_CONTEXT_ID,
345 VMCI_CONTEXT_RESOURCE_ID)) ||
346 context->datagram_queue_size + vmci_dg_size >=
347 VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) {
348 spin_unlock(&context->lock);
349 vmci_ctx_put(context);
350 kfree(dq_entry);
351 pr_devel("Context (ID=0x%x) receive queue is full\n", cid);
352 return VMCI_ERROR_NO_RESOURCES;
353 }
354
355 list_add(&dq_entry->list_item, &context->datagram_queue);
356 context->pending_datagrams++;
357 context->datagram_queue_size += vmci_dg_size;
358 ctx_signal_notify(context);
359 wake_up(&context->host_context.wait_queue);
360 spin_unlock(&context->lock);
361 vmci_ctx_put(context);
362
363 return vmci_dg_size;
364}
365
366/*
367 * Verifies whether a context with the specified context ID exists.
368 * FIXME: utility is dubious as no decisions can be reliably made
369 * using this data as context can appear and disappear at any time.
370 */
371bool vmci_ctx_exists(u32 cid)
372{
373 struct vmci_ctx *context;
374 bool exists = false;
375
376 rcu_read_lock();
377
378 list_for_each_entry_rcu(context, &ctx_list.head, list_item) {
379 if (context->cid == cid) {
380 exists = true;
381 break;
382 }
383 }
384
385 rcu_read_unlock();
386 return exists;
387}
388
389/*
390 * Retrieves VMCI context corresponding to the given cid.
391 */
392struct vmci_ctx *vmci_ctx_get(u32 cid)
393{
394 struct vmci_ctx *c, *context = NULL;
395
396 if (cid == VMCI_INVALID_ID)
397 return NULL;
398
399 rcu_read_lock();
400 list_for_each_entry_rcu(c, &ctx_list.head, list_item) {
401 if (c->cid == cid) {
402 /*
403 * The context owner drops its own reference to the
404 * context only after removing it from the list and
405 * waiting for RCU grace period to expire. This
406 * means that we are not about to increase the
407 * reference count of something that is in the
408 * process of being destroyed.
409 */
410 context = c;
411 kref_get(&context->kref);
412 break;
413 }
414 }
415 rcu_read_unlock();
416
417 return context;
418}
419
420/*
421 * Deallocates all parts of a context data structure. This
422 * function doesn't lock the context, because it assumes that
423 * the caller was holding the last reference to context.
424 */
425static void ctx_free_ctx(struct kref *kref)
426{
427 struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref);
428 struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp;
429 struct vmci_handle temp_handle;
430 struct vmci_handle_list *notifier, *tmp;
431
432 /*
433 * Fire event to all contexts interested in knowing this
434 * context is dying.
435 */
436 ctx_fire_notification(context->cid, context->priv_flags);
437
438 /*
439 * Cleanup all queue pair resources attached to context. If
440 * the VM dies without cleaning up, this code will make sure
441 * that no resources are leaked.
442 */
443 temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0);
444 while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) {
445 if (vmci_qp_broker_detach(temp_handle,
446 context) < VMCI_SUCCESS) {
447 /*
448 * When vmci_qp_broker_detach() succeeds it
449 * removes the handle from the array. If
450 * detach fails, we must remove the handle
451 * ourselves.
452 */
453 vmci_handle_arr_remove_entry(context->queue_pair_array,
454 temp_handle);
455 }
456 temp_handle =
457 vmci_handle_arr_get_entry(context->queue_pair_array, 0);
458 }
459
460 /*
461 * It is fine to destroy this without locking the callQueue, as
462 * this is the only thread having a reference to the context.
463 */
464 list_for_each_entry_safe(dq_entry, dq_entry_tmp,
465 &context->datagram_queue, list_item) {
466 WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg));
467 list_del(&dq_entry->list_item);
468 kfree(dq_entry->dg);
469 kfree(dq_entry);
470 }
471
472 list_for_each_entry_safe(notifier, tmp,
473 &context->notifier_list, node) {
474 list_del(&notifier->node);
475 kfree(notifier);
476 }
477
478 vmci_handle_arr_destroy(context->queue_pair_array);
479 vmci_handle_arr_destroy(context->doorbell_array);
480 vmci_handle_arr_destroy(context->pending_doorbell_array);
481 vmci_ctx_unset_notify(context);
482 if (context->cred)
483 put_cred(context->cred);
484 kfree(context);
485}
486
487/*
488 * Drops reference to VMCI context. If this is the last reference to
489 * the context it will be deallocated. A context is created with
490 * a reference count of one, and on destroy, it is removed from
491 * the context list before its reference count is decremented. Thus,
492 * if we reach zero, we are sure that nobody else are about to increment
493 * it (they need the entry in the context list for that), and so there
494 * is no need for locking.
495 */
496void vmci_ctx_put(struct vmci_ctx *context)
497{
498 kref_put(&context->kref, ctx_free_ctx);
499}
500
501/*
502 * Dequeues the next datagram and returns it to caller.
503 * The caller passes in a pointer to the max size datagram
504 * it can handle and the datagram is only unqueued if the
505 * size is less than max_size. If larger max_size is set to
506 * the size of the datagram to give the caller a chance to
507 * set up a larger buffer for the guestcall.
508 */
509int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
510 size_t *max_size,
511 struct vmci_datagram **dg)
512{
513 struct vmci_datagram_queue_entry *dq_entry;
514 struct list_head *list_item;
515 int rv;
516
517 /* Dequeue the next datagram entry. */
518 spin_lock(&context->lock);
519 if (context->pending_datagrams == 0) {
520 ctx_clear_notify_call(context);
521 spin_unlock(&context->lock);
522 pr_devel("No datagrams pending\n");
523 return VMCI_ERROR_NO_MORE_DATAGRAMS;
524 }
525
526 list_item = context->datagram_queue.next;
527
528 dq_entry =
529 list_entry(list_item, struct vmci_datagram_queue_entry, list_item);
530
531 /* Check size of caller's buffer. */
532 if (*max_size < dq_entry->dg_size) {
533 *max_size = dq_entry->dg_size;
534 spin_unlock(&context->lock);
535 pr_devel("Caller's buffer should be at least (size=%u bytes)\n",
536 (u32) *max_size);
537 return VMCI_ERROR_NO_MEM;
538 }
539
540 list_del(list_item);
541 context->pending_datagrams--;
542 context->datagram_queue_size -= dq_entry->dg_size;
543 if (context->pending_datagrams == 0) {
544 ctx_clear_notify_call(context);
545 rv = VMCI_SUCCESS;
546 } else {
547 /*
548 * Return the size of the next datagram.
549 */
550 struct vmci_datagram_queue_entry *next_entry;
551
552 list_item = context->datagram_queue.next;
553 next_entry =
554 list_entry(list_item, struct vmci_datagram_queue_entry,
555 list_item);
556
557 /*
558 * The following size_t -> int truncation is fine as
559 * the maximum size of a (routable) datagram is 68KB.
560 */
561 rv = (int)next_entry->dg_size;
562 }
563 spin_unlock(&context->lock);
564
565 /* Caller must free datagram. */
566 *dg = dq_entry->dg;
567 dq_entry->dg = NULL;
568 kfree(dq_entry);
569
570 return rv;
571}
572
573/*
574 * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the
575 * page mapped/locked by vmci_setup_notify().
576 */
577void vmci_ctx_unset_notify(struct vmci_ctx *context)
578{
579 struct page *notify_page;
580
581 spin_lock(&context->lock);
582
583 notify_page = context->notify_page;
584 context->notify = &ctx_dummy_notify;
585 context->notify_page = NULL;
586
587 spin_unlock(&context->lock);
588
589 if (notify_page) {
590 kunmap(notify_page);
591 put_page(notify_page);
592 }
593}
594
595/*
596 * Add remote_cid to list of contexts current contexts wants
597 * notifications from/about.
598 */
599int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
600{
601 struct vmci_ctx *context;
602 struct vmci_handle_list *notifier, *n;
603 int result;
604 bool exists = false;
605
606 context = vmci_ctx_get(context_id);
607 if (!context)
608 return VMCI_ERROR_NOT_FOUND;
609
610 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) {
611 pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n",
612 context_id, remote_cid);
613 result = VMCI_ERROR_DST_UNREACHABLE;
614 goto out;
615 }
616
617 if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) {
618 result = VMCI_ERROR_NO_ACCESS;
619 goto out;
620 }
621
622 notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL);
623 if (!notifier) {
624 result = VMCI_ERROR_NO_MEM;
625 goto out;
626 }
627
628 INIT_LIST_HEAD(&notifier->node);
629 notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
630
631 spin_lock(&context->lock);
632
633 list_for_each_entry(n, &context->notifier_list, node) {
634 if (vmci_handle_is_equal(n->handle, notifier->handle)) {
635 exists = true;
636 break;
637 }
638 }
639
640 if (exists) {
641 kfree(notifier);
642 result = VMCI_ERROR_ALREADY_EXISTS;
643 } else {
644 list_add_tail_rcu(&notifier->node, &context->notifier_list);
645 context->n_notifiers++;
646 result = VMCI_SUCCESS;
647 }
648
649 spin_unlock(&context->lock);
650
651 out:
652 vmci_ctx_put(context);
653 return result;
654}
655
656/*
657 * Remove remote_cid from current context's list of contexts it is
658 * interested in getting notifications from/about.
659 */
660int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
661{
662 struct vmci_ctx *context;
663 struct vmci_handle_list *notifier, *tmp;
664 struct vmci_handle handle;
665 bool found = false;
666
667 context = vmci_ctx_get(context_id);
668 if (!context)
669 return VMCI_ERROR_NOT_FOUND;
670
671 handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
672
673 spin_lock(&context->lock);
674 list_for_each_entry_safe(notifier, tmp,
675 &context->notifier_list, node) {
676 if (vmci_handle_is_equal(notifier->handle, handle)) {
677 list_del_rcu(&notifier->node);
678 context->n_notifiers--;
679 found = true;
680 break;
681 }
682 }
683 spin_unlock(&context->lock);
684
685 if (found) {
686 synchronize_rcu();
687 kfree(notifier);
688 }
689
690 vmci_ctx_put(context);
691
692 return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
693}
694
695static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
696 u32 *buf_size, void **pbuf)
697{
698 u32 *notifiers;
699 size_t data_size;
700 struct vmci_handle_list *entry;
701 int i = 0;
702
703 if (context->n_notifiers == 0) {
704 *buf_size = 0;
705 *pbuf = NULL;
706 return VMCI_SUCCESS;
707 }
708
709 data_size = context->n_notifiers * sizeof(*notifiers);
710 if (*buf_size < data_size) {
711 *buf_size = data_size;
712 return VMCI_ERROR_MORE_DATA;
713 }
714
715 notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */
716 if (!notifiers)
717 return VMCI_ERROR_NO_MEM;
718
719 list_for_each_entry(entry, &context->notifier_list, node)
720 notifiers[i++] = entry->handle.context;
721
722 *buf_size = data_size;
723 *pbuf = notifiers;
724 return VMCI_SUCCESS;
725}
726
727static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
728 u32 *buf_size, void **pbuf)
729{
730 struct dbell_cpt_state *dbells;
731 size_t n_doorbells;
732 int i;
733
734 n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
735 if (n_doorbells > 0) {
736 size_t data_size = n_doorbells * sizeof(*dbells);
737 if (*buf_size < data_size) {
738 *buf_size = data_size;
739 return VMCI_ERROR_MORE_DATA;
740 }
741
742 dbells = kmalloc(data_size, GFP_ATOMIC);
743 if (!dbells)
744 return VMCI_ERROR_NO_MEM;
745
746 for (i = 0; i < n_doorbells; i++)
747 dbells[i].handle = vmci_handle_arr_get_entry(
748 context->doorbell_array, i);
749
750 *buf_size = data_size;
751 *pbuf = dbells;
752 } else {
753 *buf_size = 0;
754 *pbuf = NULL;
755 }
756
757 return VMCI_SUCCESS;
758}
759
760/*
761 * Get current context's checkpoint state of given type.
762 */
763int vmci_ctx_get_chkpt_state(u32 context_id,
764 u32 cpt_type,
765 u32 *buf_size,
766 void **pbuf)
767{
768 struct vmci_ctx *context;
769 int result;
770
771 context = vmci_ctx_get(context_id);
772 if (!context)
773 return VMCI_ERROR_NOT_FOUND;
774
775 spin_lock(&context->lock);
776
777 switch (cpt_type) {
778 case VMCI_NOTIFICATION_CPT_STATE:
779 result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf);
780 break;
781
782 case VMCI_WELLKNOWN_CPT_STATE:
783 /*
784 * For compatibility with VMX'en with VM to VM communication, we
785 * always return zero wellknown handles.
786 */
787
788 *buf_size = 0;
789 *pbuf = NULL;
790 result = VMCI_SUCCESS;
791 break;
792
793 case VMCI_DOORBELL_CPT_STATE:
794 result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf);
795 break;
796
797 default:
798 pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
799 result = VMCI_ERROR_INVALID_ARGS;
800 break;
801 }
802
803 spin_unlock(&context->lock);
804 vmci_ctx_put(context);
805
806 return result;
807}
808
809/*
810 * Set current context's checkpoint state of given type.
811 */
812int vmci_ctx_set_chkpt_state(u32 context_id,
813 u32 cpt_type,
814 u32 buf_size,
815 void *cpt_buf)
816{
817 u32 i;
818 u32 current_id;
819 int result = VMCI_SUCCESS;
820 u32 num_ids = buf_size / sizeof(u32);
821
822 if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) {
823 /*
824 * We would end up here if VMX with VM to VM communication
825 * attempts to restore a checkpoint with wellknown handles.
826 */
827 pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n");
828 return VMCI_ERROR_OBSOLETE;
829 }
830
831 if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) {
832 pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
833 return VMCI_ERROR_INVALID_ARGS;
834 }
835
836 for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) {
837 current_id = ((u32 *)cpt_buf)[i];
838 result = vmci_ctx_add_notification(context_id, current_id);
839 if (result != VMCI_SUCCESS)
840 break;
841 }
842 if (result != VMCI_SUCCESS)
843 pr_devel("Failed to set cpt state (type=%d) (error=%d)\n",
844 cpt_type, result);
845
846 return result;
847}
848
849/*
850 * Retrieves the specified context's pending notifications in the
851 * form of a handle array. The handle arrays returned are the
852 * actual data - not a copy and should not be modified by the
853 * caller. They must be released using
854 * vmci_ctx_rcv_notifications_release.
855 */
856int vmci_ctx_rcv_notifications_get(u32 context_id,
857 struct vmci_handle_arr **db_handle_array,
858 struct vmci_handle_arr **qp_handle_array)
859{
860 struct vmci_ctx *context;
861 int result = VMCI_SUCCESS;
862
863 context = vmci_ctx_get(context_id);
864 if (context == NULL)
865 return VMCI_ERROR_NOT_FOUND;
866
867 spin_lock(&context->lock);
868
869 *db_handle_array = context->pending_doorbell_array;
870 context->pending_doorbell_array = vmci_handle_arr_create(0);
871 if (!context->pending_doorbell_array) {
872 context->pending_doorbell_array = *db_handle_array;
873 *db_handle_array = NULL;
874 result = VMCI_ERROR_NO_MEM;
875 }
876 *qp_handle_array = NULL;
877
878 spin_unlock(&context->lock);
879 vmci_ctx_put(context);
880
881 return result;
882}
883
884/*
885 * Releases handle arrays with pending notifications previously
886 * retrieved using vmci_ctx_rcv_notifications_get. If the
887 * notifications were not successfully handed over to the guest,
888 * success must be false.
889 */
890void vmci_ctx_rcv_notifications_release(u32 context_id,
891 struct vmci_handle_arr *db_handle_array,
892 struct vmci_handle_arr *qp_handle_array,
893 bool success)
894{
895 struct vmci_ctx *context = vmci_ctx_get(context_id);
896
897 spin_lock(&context->lock);
898 if (!success) {
899 struct vmci_handle handle;
900
901 /*
902 * New notifications may have been added while we were not
903 * holding the context lock, so we transfer any new pending
904 * doorbell notifications to the old array, and reinstate the
905 * old array.
906 */
907
908 handle = vmci_handle_arr_remove_tail(
909 context->pending_doorbell_array);
910 while (!vmci_handle_is_invalid(handle)) {
911 if (!vmci_handle_arr_has_entry(db_handle_array,
912 handle)) {
913 vmci_handle_arr_append_entry(
914 &db_handle_array, handle);
915 }
916 handle = vmci_handle_arr_remove_tail(
917 context->pending_doorbell_array);
918 }
919 vmci_handle_arr_destroy(context->pending_doorbell_array);
920 context->pending_doorbell_array = db_handle_array;
921 db_handle_array = NULL;
922 } else {
923 ctx_clear_notify_call(context);
924 }
925 spin_unlock(&context->lock);
926 vmci_ctx_put(context);
927
928 if (db_handle_array)
929 vmci_handle_arr_destroy(db_handle_array);
930
931 if (qp_handle_array)
932 vmci_handle_arr_destroy(qp_handle_array);
933}
934
935/*
936 * Registers that a new doorbell handle has been allocated by the
937 * context. Only doorbell handles registered can be notified.
938 */
939int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
940{
941 struct vmci_ctx *context;
942 int result;
943
944 if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
945 return VMCI_ERROR_INVALID_ARGS;
946
947 context = vmci_ctx_get(context_id);
948 if (context == NULL)
949 return VMCI_ERROR_NOT_FOUND;
950
951 spin_lock(&context->lock);
952 if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
953 vmci_handle_arr_append_entry(&context->doorbell_array, handle);
954 result = VMCI_SUCCESS;
955 } else {
956 result = VMCI_ERROR_DUPLICATE_ENTRY;
957 }
958
959 spin_unlock(&context->lock);
960 vmci_ctx_put(context);
961
962 return result;
963}
964
965/*
966 * Unregisters a doorbell handle that was previously registered
967 * with vmci_ctx_dbell_create.
968 */
969int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle)
970{
971 struct vmci_ctx *context;
972 struct vmci_handle removed_handle;
973
974 if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
975 return VMCI_ERROR_INVALID_ARGS;
976
977 context = vmci_ctx_get(context_id);
978 if (context == NULL)
979 return VMCI_ERROR_NOT_FOUND;
980
981 spin_lock(&context->lock);
982 removed_handle =
983 vmci_handle_arr_remove_entry(context->doorbell_array, handle);
984 vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle);
985 spin_unlock(&context->lock);
986
987 vmci_ctx_put(context);
988
989 return vmci_handle_is_invalid(removed_handle) ?
990 VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
991}
992
993/*
994 * Unregisters all doorbell handles that were previously
995 * registered with vmci_ctx_dbell_create.
996 */
997int vmci_ctx_dbell_destroy_all(u32 context_id)
998{
999 struct vmci_ctx *context;
1000 struct vmci_handle handle;
1001
1002 if (context_id == VMCI_INVALID_ID)
1003 return VMCI_ERROR_INVALID_ARGS;
1004
1005 context = vmci_ctx_get(context_id);
1006 if (context == NULL)
1007 return VMCI_ERROR_NOT_FOUND;
1008
1009 spin_lock(&context->lock);
1010 do {
1011 struct vmci_handle_arr *arr = context->doorbell_array;
1012 handle = vmci_handle_arr_remove_tail(arr);
1013 } while (!vmci_handle_is_invalid(handle));
1014 do {
1015 struct vmci_handle_arr *arr = context->pending_doorbell_array;
1016 handle = vmci_handle_arr_remove_tail(arr);
1017 } while (!vmci_handle_is_invalid(handle));
1018 spin_unlock(&context->lock);
1019
1020 vmci_ctx_put(context);
1021
1022 return VMCI_SUCCESS;
1023}
1024
1025/*
1026 * Registers a notification of a doorbell handle initiated by the
1027 * specified source context. The notification of doorbells are
1028 * subject to the same isolation rules as datagram delivery. To
1029 * allow host side senders of notifications a finer granularity
1030 * of sender rights than those assigned to the sending context
1031 * itself, the host context is required to specify a different
1032 * set of privilege flags that will override the privileges of
1033 * the source context.
1034 */
1035int vmci_ctx_notify_dbell(u32 src_cid,
1036 struct vmci_handle handle,
1037 u32 src_priv_flags)
1038{
1039 struct vmci_ctx *dst_context;
1040 int result;
1041
1042 if (vmci_handle_is_invalid(handle))
1043 return VMCI_ERROR_INVALID_ARGS;
1044
1045 /* Get the target VM's VMCI context. */
1046 dst_context = vmci_ctx_get(handle.context);
1047 if (!dst_context) {
1048 pr_devel("Invalid context (ID=0x%x)\n", handle.context);
1049 return VMCI_ERROR_NOT_FOUND;
1050 }
1051
1052 if (src_cid != handle.context) {
1053 u32 dst_priv_flags;
1054
1055 if (VMCI_CONTEXT_IS_VM(src_cid) &&
1056 VMCI_CONTEXT_IS_VM(handle.context)) {
1057 pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n",
1058 src_cid, handle.context);
1059 result = VMCI_ERROR_DST_UNREACHABLE;
1060 goto out;
1061 }
1062
1063 result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags);
1064 if (result < VMCI_SUCCESS) {
1065 pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n",
1066 handle.context, handle.resource);
1067 goto out;
1068 }
1069
1070 if (src_cid != VMCI_HOST_CONTEXT_ID ||
1071 src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) {
1072 src_priv_flags = vmci_context_get_priv_flags(src_cid);
1073 }
1074
1075 if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) {
1076 result = VMCI_ERROR_NO_ACCESS;
1077 goto out;
1078 }
1079 }
1080
1081 if (handle.context == VMCI_HOST_CONTEXT_ID) {
1082 result = vmci_dbell_host_context_notify(src_cid, handle);
1083 } else {
1084 spin_lock(&dst_context->lock);
1085
1086 if (!vmci_handle_arr_has_entry(dst_context->doorbell_array,
1087 handle)) {
1088 result = VMCI_ERROR_NOT_FOUND;
1089 } else {
1090 if (!vmci_handle_arr_has_entry(
1091 dst_context->pending_doorbell_array,
1092 handle)) {
1093 vmci_handle_arr_append_entry(
1094 &dst_context->pending_doorbell_array,
1095 handle);
1096
1097 ctx_signal_notify(dst_context);
1098 wake_up(&dst_context->host_context.wait_queue);
1099
1100 }
1101 result = VMCI_SUCCESS;
1102 }
1103 spin_unlock(&dst_context->lock);
1104 }
1105
1106 out:
1107 vmci_ctx_put(dst_context);
1108
1109 return result;
1110}
1111
1112bool vmci_ctx_supports_host_qp(struct vmci_ctx *context)
1113{
1114 return context && context->user_version >= VMCI_VERSION_HOSTQP;
1115}
1116
1117/*
1118 * Registers that a new queue pair handle has been allocated by
1119 * the context.
1120 */
1121int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
1122{
1123 int result;
1124
1125 if (context == NULL || vmci_handle_is_invalid(handle))
1126 return VMCI_ERROR_INVALID_ARGS;
1127
1128 if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
1129 vmci_handle_arr_append_entry(&context->queue_pair_array,
1130 handle);
1131 result = VMCI_SUCCESS;
1132 } else {
1133 result = VMCI_ERROR_DUPLICATE_ENTRY;
1134 }
1135
1136 return result;
1137}
1138
1139/*
1140 * Unregisters a queue pair handle that was previously registered
1141 * with vmci_ctx_qp_create.
1142 */
1143int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle)
1144{
1145 struct vmci_handle hndl;
1146
1147 if (context == NULL || vmci_handle_is_invalid(handle))
1148 return VMCI_ERROR_INVALID_ARGS;
1149
1150 hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle);
1151
1152 return vmci_handle_is_invalid(hndl) ?
1153 VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
1154}
1155
1156/*
1157 * Determines whether a given queue pair handle is registered
1158 * with the given context.
1159 */
1160bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle)
1161{
1162 if (context == NULL || vmci_handle_is_invalid(handle))
1163 return false;
1164
1165 return vmci_handle_arr_has_entry(context->queue_pair_array, handle);
1166}
1167
1168/*
1169 * vmci_context_get_priv_flags() - Retrieve privilege flags.
1170 * @context_id: The context ID of the VMCI context.
1171 *
1172 * Retrieves privilege flags of the given VMCI context ID.
1173 */
1174u32 vmci_context_get_priv_flags(u32 context_id)
1175{
1176 if (vmci_host_code_active()) {
1177 u32 flags;
1178 struct vmci_ctx *context;
1179
1180 context = vmci_ctx_get(context_id);
1181 if (!context)
1182 return VMCI_LEAST_PRIVILEGE_FLAGS;
1183
1184 flags = context->priv_flags;
1185 vmci_ctx_put(context);
1186 return flags;
1187 }
1188 return VMCI_NO_PRIVILEGE_FLAGS;
1189}
1190EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags);
1191
1192/*
1193 * vmci_is_context_owner() - Determimnes if user is the context owner
1194 * @context_id: The context ID of the VMCI context.
1195 * @uid: The host user id (real kernel value).
1196 *
1197 * Determines whether a given UID is the owner of given VMCI context.
1198 */
1199bool vmci_is_context_owner(u32 context_id, kuid_t uid)
1200{
1201 bool is_owner = false;
1202
1203 if (vmci_host_code_active()) {
1204 struct vmci_ctx *context = vmci_ctx_get(context_id);
1205 if (context) {
1206 if (context->cred)
1207 is_owner = uid_eq(context->cred->uid, uid);
1208 vmci_ctx_put(context);
1209 }
1210 }
1211
1212 return is_owner;
1213}
1214EXPORT_SYMBOL_GPL(vmci_is_context_owner);
diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h
new file mode 100644
index 000000000000..24a88e68a1e6
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.h
@@ -0,0 +1,182 @@
1/*
2 * VMware VMCI driver (vmciContext.h)
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMCI_CONTEXT_H_
17#define _VMCI_CONTEXT_H_
18
19#include <linux/vmw_vmci_defs.h>
20#include <linux/atomic.h>
21#include <linux/kref.h>
22#include <linux/types.h>
23#include <linux/wait.h>
24
25#include "vmci_handle_array.h"
26#include "vmci_datagram.h"
27
28/* Used to determine what checkpoint state to get and set. */
29enum {
30 VMCI_NOTIFICATION_CPT_STATE = 1,
31 VMCI_WELLKNOWN_CPT_STATE = 2,
32 VMCI_DG_OUT_STATE = 3,
33 VMCI_DG_IN_STATE = 4,
34 VMCI_DG_IN_SIZE_STATE = 5,
35 VMCI_DOORBELL_CPT_STATE = 6,
36};
37
38/* Host specific struct used for signalling */
39struct vmci_host {
40 wait_queue_head_t wait_queue;
41};
42
43struct vmci_handle_list {
44 struct list_head node;
45 struct vmci_handle handle;
46};
47
48struct vmci_ctx {
49 struct list_head list_item; /* For global VMCI list. */
50 u32 cid;
51 struct kref kref;
52 struct list_head datagram_queue; /* Head of per VM queue. */
53 u32 pending_datagrams;
54 size_t datagram_queue_size; /* Size of datagram queue in bytes. */
55
56 /*
57 * Version of the code that created
58 * this context; e.g., VMX.
59 */
60 int user_version;
61 spinlock_t lock; /* Locks callQueue and handle_arrays. */
62
63 /*
64 * queue_pairs attached to. The array of
65 * handles for queue pairs is accessed
66 * from the code for QP API, and there
67 * it is protected by the QP lock. It
68 * is also accessed from the context
69 * clean up path, which does not
70 * require a lock. VMCILock is not
71 * used to protect the QP array field.
72 */
73 struct vmci_handle_arr *queue_pair_array;
74
75 /* Doorbells created by context. */
76 struct vmci_handle_arr *doorbell_array;
77
78 /* Doorbells pending for context. */
79 struct vmci_handle_arr *pending_doorbell_array;
80
81 /* Contexts current context is subscribing to. */
82 struct list_head notifier_list;
83 unsigned int n_notifiers;
84
85 struct vmci_host host_context;
86 u32 priv_flags;
87
88 const struct cred *cred;
89 bool *notify; /* Notify flag pointer - hosted only. */
90 struct page *notify_page; /* Page backing the notify UVA. */
91};
92
93/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */
94struct vmci_ctx_info {
95 u32 remote_cid;
96 int result;
97};
98
99/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */
100struct vmci_ctx_chkpt_buf_info {
101 u64 cpt_buf;
102 u32 cpt_type;
103 u32 buf_size;
104 s32 result;
105 u32 _pad;
106};
107
108/*
109 * VMCINotificationReceiveInfo: Used to recieve pending notifications
110 * for doorbells and queue pairs.
111 */
112struct vmci_ctx_notify_recv_info {
113 u64 db_handle_buf_uva;
114 u64 db_handle_buf_size;
115 u64 qp_handle_buf_uva;
116 u64 qp_handle_buf_size;
117 s32 result;
118 u32 _pad;
119};
120
121/*
122 * Utilility function that checks whether two entities are allowed
123 * to interact. If one of them is restricted, the other one must
124 * be trusted.
125 */
126static inline bool vmci_deny_interaction(u32 part_one, u32 part_two)
127{
128 return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
129 !(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) ||
130 ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
131 !(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED));
132}
133
134struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags,
135 uintptr_t event_hnd, int version,
136 const struct cred *cred);
137void vmci_ctx_destroy(struct vmci_ctx *context);
138
139bool vmci_ctx_supports_host_qp(struct vmci_ctx *context);
140int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg);
141int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
142 size_t *max_size, struct vmci_datagram **dg);
143int vmci_ctx_pending_datagrams(u32 cid, u32 *pending);
144struct vmci_ctx *vmci_ctx_get(u32 cid);
145void vmci_ctx_put(struct vmci_ctx *context);
146bool vmci_ctx_exists(u32 cid);
147
148int vmci_ctx_add_notification(u32 context_id, u32 remote_cid);
149int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid);
150int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type,
151 u32 *num_cids, void **cpt_buf_ptr);
152int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type,
153 u32 num_cids, void *cpt_buf);
154
155int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle);
156int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle);
157bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle);
158
159void vmci_ctx_check_signal_notify(struct vmci_ctx *context);
160void vmci_ctx_unset_notify(struct vmci_ctx *context);
161
162int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle);
163int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle);
164int vmci_ctx_dbell_destroy_all(u32 context_id);
165int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle,
166 u32 src_priv_flags);
167
168int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr
169 **db_handle_array, struct vmci_handle_arr
170 **qp_handle_array);
171void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr
172 *db_handle_array, struct vmci_handle_arr
173 *qp_handle_array, bool success);
174
175static inline u32 vmci_ctx_get_id(struct vmci_ctx *context)
176{
177 if (!context)
178 return VMCI_INVALID_ID;
179 return context->cid;
180}
181
182#endif /* _VMCI_CONTEXT_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
new file mode 100644
index 000000000000..ed5c433cd493
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -0,0 +1,500 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/bug.h>
22
23#include "vmci_datagram.h"
24#include "vmci_resource.h"
25#include "vmci_context.h"
26#include "vmci_driver.h"
27#include "vmci_event.h"
28#include "vmci_route.h"
29
30/*
31 * struct datagram_entry describes the datagram entity. It is used for datagram
32 * entities created only on the host.
33 */
34struct datagram_entry {
35 struct vmci_resource resource;
36 u32 flags;
37 bool run_delayed;
38 vmci_datagram_recv_cb recv_cb;
39 void *client_data;
40 u32 priv_flags;
41};
42
43struct delayed_datagram_info {
44 struct datagram_entry *entry;
45 struct vmci_datagram msg;
46 struct work_struct work;
47 bool in_dg_host_queue;
48};
49
50/* Number of in-flight host->host datagrams */
51static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0);
52
53/*
54 * Create a datagram entry given a handle pointer.
55 */
56static int dg_create_handle(u32 resource_id,
57 u32 flags,
58 u32 priv_flags,
59 vmci_datagram_recv_cb recv_cb,
60 void *client_data, struct vmci_handle *out_handle)
61{
62 int result;
63 u32 context_id;
64 struct vmci_handle handle;
65 struct datagram_entry *entry;
66
67 if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0)
68 return VMCI_ERROR_INVALID_ARGS;
69
70 if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
71 context_id = VMCI_INVALID_ID;
72 } else {
73 context_id = vmci_get_context_id();
74 if (context_id == VMCI_INVALID_ID)
75 return VMCI_ERROR_NO_RESOURCES;
76 }
77
78 handle = vmci_make_handle(context_id, resource_id);
79
80 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
81 if (!entry) {
82 pr_warn("Failed allocating memory for datagram entry\n");
83 return VMCI_ERROR_NO_MEM;
84 }
85
86 entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false;
87 entry->flags = flags;
88 entry->recv_cb = recv_cb;
89 entry->client_data = client_data;
90 entry->priv_flags = priv_flags;
91
92 /* Make datagram resource live. */
93 result = vmci_resource_add(&entry->resource,
94 VMCI_RESOURCE_TYPE_DATAGRAM,
95 handle);
96 if (result != VMCI_SUCCESS) {
97 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
98 handle.context, handle.resource, result);
99 kfree(entry);
100 return result;
101 }
102
103 *out_handle = vmci_resource_handle(&entry->resource);
104 return VMCI_SUCCESS;
105}
106
107/*
108 * Internal utility function with the same purpose as
109 * vmci_datagram_get_priv_flags that also takes a context_id.
110 */
111static int vmci_datagram_get_priv_flags(u32 context_id,
112 struct vmci_handle handle,
113 u32 *priv_flags)
114{
115 if (context_id == VMCI_INVALID_ID)
116 return VMCI_ERROR_INVALID_ARGS;
117
118 if (context_id == VMCI_HOST_CONTEXT_ID) {
119 struct datagram_entry *src_entry;
120 struct vmci_resource *resource;
121
122 resource = vmci_resource_by_handle(handle,
123 VMCI_RESOURCE_TYPE_DATAGRAM);
124 if (!resource)
125 return VMCI_ERROR_INVALID_ARGS;
126
127 src_entry = container_of(resource, struct datagram_entry,
128 resource);
129 *priv_flags = src_entry->priv_flags;
130 vmci_resource_put(resource);
131 } else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID)
132 *priv_flags = VMCI_MAX_PRIVILEGE_FLAGS;
133 else
134 *priv_flags = vmci_context_get_priv_flags(context_id);
135
136 return VMCI_SUCCESS;
137}
138
139/*
140 * Calls the specified callback in a delayed context.
141 */
142static void dg_delayed_dispatch(struct work_struct *work)
143{
144 struct delayed_datagram_info *dg_info =
145 container_of(work, struct delayed_datagram_info, work);
146
147 dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg);
148
149 vmci_resource_put(&dg_info->entry->resource);
150
151 if (dg_info->in_dg_host_queue)
152 atomic_dec(&delayed_dg_host_queue_size);
153
154 kfree(dg_info);
155}
156
157/*
158 * Dispatch datagram as a host, to the host, or other vm context. This
159 * function cannot dispatch to hypervisor context handlers. This should
160 * have been handled before we get here by vmci_datagram_dispatch.
161 * Returns number of bytes sent on success, error code otherwise.
162 */
163static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
164{
165 int retval;
166 size_t dg_size;
167 u32 src_priv_flags;
168
169 dg_size = VMCI_DG_SIZE(dg);
170
171 /* Host cannot send to the hypervisor. */
172 if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID)
173 return VMCI_ERROR_DST_UNREACHABLE;
174
175 /* Check that source handle matches sending context. */
176 if (dg->src.context != context_id) {
177 pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n",
178 context_id, dg->src.context, dg->src.resource);
179 return VMCI_ERROR_NO_ACCESS;
180 }
181
182 /* Get hold of privileges of sending endpoint. */
183 retval = vmci_datagram_get_priv_flags(context_id, dg->src,
184 &src_priv_flags);
185 if (retval != VMCI_SUCCESS) {
186 pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n",
187 dg->src.context, dg->src.resource);
188 return retval;
189 }
190
191 /* Determine if we should route to host or guest destination. */
192 if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
193 /* Route to host datagram entry. */
194 struct datagram_entry *dst_entry;
195 struct vmci_resource *resource;
196
197 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
198 dg->dst.resource == VMCI_EVENT_HANDLER) {
199 return vmci_event_dispatch(dg);
200 }
201
202 resource = vmci_resource_by_handle(dg->dst,
203 VMCI_RESOURCE_TYPE_DATAGRAM);
204 if (!resource) {
205 pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n",
206 dg->dst.context, dg->dst.resource);
207 return VMCI_ERROR_INVALID_RESOURCE;
208 }
209 dst_entry = container_of(resource, struct datagram_entry,
210 resource);
211 if (vmci_deny_interaction(src_priv_flags,
212 dst_entry->priv_flags)) {
213 vmci_resource_put(resource);
214 return VMCI_ERROR_NO_ACCESS;
215 }
216
217 /*
218 * If a VMCI datagram destined for the host is also sent by the
219 * host, we always run it delayed. This ensures that no locks
220 * are held when the datagram callback runs.
221 */
222 if (dst_entry->run_delayed ||
223 dg->src.context == VMCI_HOST_CONTEXT_ID) {
224 struct delayed_datagram_info *dg_info;
225
226 if (atomic_add_return(1, &delayed_dg_host_queue_size)
227 == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
228 atomic_dec(&delayed_dg_host_queue_size);
229 vmci_resource_put(resource);
230 return VMCI_ERROR_NO_MEM;
231 }
232
233 dg_info = kmalloc(sizeof(*dg_info) +
234 (size_t) dg->payload_size, GFP_ATOMIC);
235 if (!dg_info) {
236 atomic_dec(&delayed_dg_host_queue_size);
237 vmci_resource_put(resource);
238 return VMCI_ERROR_NO_MEM;
239 }
240
241 dg_info->in_dg_host_queue = true;
242 dg_info->entry = dst_entry;
243 memcpy(&dg_info->msg, dg, dg_size);
244
245 INIT_WORK(&dg_info->work, dg_delayed_dispatch);
246 schedule_work(&dg_info->work);
247 retval = VMCI_SUCCESS;
248
249 } else {
250 retval = dst_entry->recv_cb(dst_entry->client_data, dg);
251 vmci_resource_put(resource);
252 if (retval < VMCI_SUCCESS)
253 return retval;
254 }
255 } else {
256 /* Route to destination VM context. */
257 struct vmci_datagram *new_dg;
258
259 if (context_id != dg->dst.context) {
260 if (vmci_deny_interaction(src_priv_flags,
261 vmci_context_get_priv_flags
262 (dg->dst.context))) {
263 return VMCI_ERROR_NO_ACCESS;
264 } else if (VMCI_CONTEXT_IS_VM(context_id)) {
265 /*
266 * If the sending context is a VM, it
267 * cannot reach another VM.
268 */
269
270 pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n",
271 context_id, dg->dst.context);
272 return VMCI_ERROR_DST_UNREACHABLE;
273 }
274 }
275
276 /* We make a copy to enqueue. */
277 new_dg = kmalloc(dg_size, GFP_KERNEL);
278 if (new_dg == NULL)
279 return VMCI_ERROR_NO_MEM;
280
281 memcpy(new_dg, dg, dg_size);
282 retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg);
283 if (retval < VMCI_SUCCESS) {
284 kfree(new_dg);
285 return retval;
286 }
287 }
288
289 /*
290 * We currently truncate the size to signed 32 bits. This doesn't
291 * matter for this handler as it only support 4Kb messages.
292 */
293 return (int)dg_size;
294}
295
296/*
297 * Dispatch datagram as a guest, down through the VMX and potentially to
298 * the host.
299 * Returns number of bytes sent on success, error code otherwise.
300 */
301static int dg_dispatch_as_guest(struct vmci_datagram *dg)
302{
303 int retval;
304 struct vmci_resource *resource;
305
306 resource = vmci_resource_by_handle(dg->src,
307 VMCI_RESOURCE_TYPE_DATAGRAM);
308 if (!resource)
309 return VMCI_ERROR_NO_HANDLE;
310
311 retval = vmci_send_datagram(dg);
312 vmci_resource_put(resource);
313 return retval;
314}
315
316/*
317 * Dispatch datagram. This will determine the routing for the datagram
318 * and dispatch it accordingly.
319 * Returns number of bytes sent on success, error code otherwise.
320 */
321int vmci_datagram_dispatch(u32 context_id,
322 struct vmci_datagram *dg, bool from_guest)
323{
324 int retval;
325 enum vmci_route route;
326
327 BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24);
328
329 if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) {
330 pr_devel("Payload (size=%llu bytes) too big to send\n",
331 (unsigned long long)dg->payload_size);
332 return VMCI_ERROR_INVALID_ARGS;
333 }
334
335 retval = vmci_route(&dg->src, &dg->dst, from_guest, &route);
336 if (retval < VMCI_SUCCESS) {
337 pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n",
338 dg->src.context, dg->dst.context, retval);
339 return retval;
340 }
341
342 if (VMCI_ROUTE_AS_HOST == route) {
343 if (VMCI_INVALID_ID == context_id)
344 context_id = VMCI_HOST_CONTEXT_ID;
345 return dg_dispatch_as_host(context_id, dg);
346 }
347
348 if (VMCI_ROUTE_AS_GUEST == route)
349 return dg_dispatch_as_guest(dg);
350
351 pr_warn("Unknown route (%d) for datagram\n", route);
352 return VMCI_ERROR_DST_UNREACHABLE;
353}
354
355/*
356 * Invoke the handler for the given datagram. This is intended to be
357 * called only when acting as a guest and receiving a datagram from the
358 * virtual device.
359 */
360int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
361{
362 struct vmci_resource *resource;
363 struct datagram_entry *dst_entry;
364
365 resource = vmci_resource_by_handle(dg->dst,
366 VMCI_RESOURCE_TYPE_DATAGRAM);
367 if (!resource) {
368 pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n",
369 dg->dst.context, dg->dst.resource);
370 return VMCI_ERROR_NO_HANDLE;
371 }
372
373 dst_entry = container_of(resource, struct datagram_entry, resource);
374 if (dst_entry->run_delayed) {
375 struct delayed_datagram_info *dg_info;
376
377 dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size,
378 GFP_ATOMIC);
379 if (!dg_info) {
380 vmci_resource_put(resource);
381 return VMCI_ERROR_NO_MEM;
382 }
383
384 dg_info->in_dg_host_queue = false;
385 dg_info->entry = dst_entry;
386 memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
387
388 INIT_WORK(&dg_info->work, dg_delayed_dispatch);
389 schedule_work(&dg_info->work);
390 } else {
391 dst_entry->recv_cb(dst_entry->client_data, dg);
392 vmci_resource_put(resource);
393 }
394
395 return VMCI_SUCCESS;
396}
397
398/*
399 * vmci_datagram_create_handle_priv() - Create host context datagram endpoint
400 * @resource_id: The resource ID.
401 * @flags: Datagram Flags.
402 * @priv_flags: Privilege Flags.
403 * @recv_cb: Callback when receiving datagrams.
404 * @client_data: Pointer for a datagram_entry struct
405 * @out_handle: vmci_handle that is populated as a result of this function.
406 *
407 * Creates a host context datagram endpoint and returns a handle to it.
408 */
409int vmci_datagram_create_handle_priv(u32 resource_id,
410 u32 flags,
411 u32 priv_flags,
412 vmci_datagram_recv_cb recv_cb,
413 void *client_data,
414 struct vmci_handle *out_handle)
415{
416 if (out_handle == NULL)
417 return VMCI_ERROR_INVALID_ARGS;
418
419 if (recv_cb == NULL) {
420 pr_devel("Client callback needed when creating datagram\n");
421 return VMCI_ERROR_INVALID_ARGS;
422 }
423
424 if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
425 return VMCI_ERROR_INVALID_ARGS;
426
427 return dg_create_handle(resource_id, flags, priv_flags, recv_cb,
428 client_data, out_handle);
429}
430EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv);
431
432/*
433 * vmci_datagram_create_handle() - Create host context datagram endpoint
434 * @resource_id: Resource ID.
435 * @flags: Datagram Flags.
436 * @recv_cb: Callback when receiving datagrams.
437 * @client_ata: Pointer for a datagram_entry struct
438 * @out_handle: vmci_handle that is populated as a result of this function.
439 *
440 * Creates a host context datagram endpoint and returns a handle to
441 * it. Same as vmci_datagram_create_handle_priv without the priviledge
442 * flags argument.
443 */
444int vmci_datagram_create_handle(u32 resource_id,
445 u32 flags,
446 vmci_datagram_recv_cb recv_cb,
447 void *client_data,
448 struct vmci_handle *out_handle)
449{
450 return vmci_datagram_create_handle_priv(
451 resource_id, flags,
452 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
453 recv_cb, client_data,
454 out_handle);
455}
456EXPORT_SYMBOL_GPL(vmci_datagram_create_handle);
457
458/*
459 * vmci_datagram_destroy_handle() - Destroys datagram handle
460 * @handle: vmci_handle to be destroyed and reaped.
461 *
462 * Use this function to destroy any datagram handles created by
463 * vmci_datagram_create_handle{,Priv} functions.
464 */
465int vmci_datagram_destroy_handle(struct vmci_handle handle)
466{
467 struct datagram_entry *entry;
468 struct vmci_resource *resource;
469
470 resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM);
471 if (!resource) {
472 pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n",
473 handle.context, handle.resource);
474 return VMCI_ERROR_NOT_FOUND;
475 }
476
477 entry = container_of(resource, struct datagram_entry, resource);
478
479 vmci_resource_put(&entry->resource);
480 vmci_resource_remove(&entry->resource);
481 kfree(entry);
482
483 return VMCI_SUCCESS;
484}
485EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle);
486
487/*
488 * vmci_datagram_send() - Send a datagram
489 * @msg: The datagram to send.
490 *
491 * Sends the provided datagram on its merry way.
492 */
493int vmci_datagram_send(struct vmci_datagram *msg)
494{
495 if (msg == NULL)
496 return VMCI_ERROR_INVALID_ARGS;
497
498 return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false);
499}
500EXPORT_SYMBOL_GPL(vmci_datagram_send);
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.h b/drivers/misc/vmw_vmci/vmci_datagram.h
new file mode 100644
index 000000000000..eb4aab7f64ec
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.h
@@ -0,0 +1,52 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMCI_DATAGRAM_H_
17#define _VMCI_DATAGRAM_H_
18
19#include <linux/types.h>
20#include <linux/list.h>
21
22#include "vmci_context.h"
23
24#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256
25
26/*
27 * The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI
28 * datagram queues. It is allocated in non-paged memory, as the
29 * content is accessed while holding a spinlock. The pending datagram
30 * itself may be allocated from paged memory. We shadow the size of
31 * the datagram in the non-paged queue entry as this size is used
32 * while holding the same spinlock as above.
33 */
34struct vmci_datagram_queue_entry {
35 struct list_head list_item; /* For queuing. */
36 size_t dg_size; /* Size of datagram. */
37 struct vmci_datagram *dg; /* Pending datagram. */
38};
39
40/* VMCIDatagramSendRecvInfo */
41struct vmci_datagram_snd_rcv_info {
42 u64 addr;
43 u32 len;
44 s32 result;
45};
46
47/* Datagram API for non-public use. */
48int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg,
49 bool from_guest);
50int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg);
51
52#endif /* _VMCI_DATAGRAM_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
new file mode 100644
index 000000000000..c3e8397f62ed
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -0,0 +1,604 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/completion.h>
19#include <linux/hash.h>
20#include <linux/kernel.h>
21#include <linux/list.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25
26#include "vmci_datagram.h"
27#include "vmci_doorbell.h"
28#include "vmci_resource.h"
29#include "vmci_driver.h"
30#include "vmci_route.h"
31
32
33#define VMCI_DOORBELL_INDEX_BITS 6
34#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS)
35#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS)
36
37/*
38 * DoorbellEntry describes the a doorbell notification handle allocated by the
39 * host.
40 */
41struct dbell_entry {
42 struct vmci_resource resource;
43 struct hlist_node node;
44 struct work_struct work;
45 vmci_callback notify_cb;
46 void *client_data;
47 u32 idx;
48 u32 priv_flags;
49 bool run_delayed;
50 atomic_t active; /* Only used by guest personality */
51};
52
53/* The VMCI index table keeps track of currently registered doorbells. */
54struct dbell_index_table {
55 spinlock_t lock; /* Index table lock */
56 struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE];
57};
58
59static struct dbell_index_table vmci_doorbell_it = {
60 .lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock),
61};
62
63/*
64 * The max_notify_idx is one larger than the currently known bitmap index in
65 * use, and is used to determine how much of the bitmap needs to be scanned.
66 */
67static u32 max_notify_idx;
68
69/*
70 * The notify_idx_count is used for determining whether there are free entries
71 * within the bitmap (if notify_idx_count + 1 < max_notify_idx).
72 */
73static u32 notify_idx_count;
74
75/*
76 * The last_notify_idx_reserved is used to track the last index handed out - in
77 * the case where multiple handles share a notification index, we hand out
78 * indexes round robin based on last_notify_idx_reserved.
79 */
80static u32 last_notify_idx_reserved;
81
82/* This is a one entry cache used to by the index allocation. */
83static u32 last_notify_idx_released = PAGE_SIZE;
84
85
86/*
87 * Utility function that retrieves the privilege flags associated
88 * with a given doorbell handle. For guest endpoints, the
89 * privileges are determined by the context ID, but for host
90 * endpoints privileges are associated with the complete
91 * handle. Hypervisor endpoints are not yet supported.
92 */
93int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags)
94{
95 if (priv_flags == NULL || handle.context == VMCI_INVALID_ID)
96 return VMCI_ERROR_INVALID_ARGS;
97
98 if (handle.context == VMCI_HOST_CONTEXT_ID) {
99 struct dbell_entry *entry;
100 struct vmci_resource *resource;
101
102 resource = vmci_resource_by_handle(handle,
103 VMCI_RESOURCE_TYPE_DOORBELL);
104 if (!resource)
105 return VMCI_ERROR_NOT_FOUND;
106
107 entry = container_of(resource, struct dbell_entry, resource);
108 *priv_flags = entry->priv_flags;
109 vmci_resource_put(resource);
110 } else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) {
111 /*
112 * Hypervisor endpoints for notifications are not
113 * supported (yet).
114 */
115 return VMCI_ERROR_INVALID_ARGS;
116 } else {
117 *priv_flags = vmci_context_get_priv_flags(handle.context);
118 }
119
120 return VMCI_SUCCESS;
121}
122
123/*
124 * Find doorbell entry by bitmap index.
125 */
126static struct dbell_entry *dbell_index_table_find(u32 idx)
127{
128 u32 bucket = VMCI_DOORBELL_HASH(idx);
129 struct dbell_entry *dbell;
130 struct hlist_node *node;
131
132 hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket],
133 node) {
134 if (idx == dbell->idx)
135 return dbell;
136 }
137
138 return NULL;
139}
140
141/*
142 * Add the given entry to the index table. This willi take a reference to the
143 * entry's resource so that the entry is not deleted before it is removed from
144 * the * table.
145 */
146static void dbell_index_table_add(struct dbell_entry *entry)
147{
148 u32 bucket;
149 u32 new_notify_idx;
150
151 vmci_resource_get(&entry->resource);
152
153 spin_lock_bh(&vmci_doorbell_it.lock);
154
155 /*
156 * Below we try to allocate an index in the notification
157 * bitmap with "not too much" sharing between resources. If we
158 * use less that the full bitmap, we either add to the end if
159 * there are no unused flags within the currently used area,
160 * or we search for unused ones. If we use the full bitmap, we
161 * allocate the index round robin.
162 */
163 if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) {
164 if (last_notify_idx_released < max_notify_idx &&
165 !dbell_index_table_find(last_notify_idx_released)) {
166 new_notify_idx = last_notify_idx_released;
167 last_notify_idx_released = PAGE_SIZE;
168 } else {
169 bool reused = false;
170 new_notify_idx = last_notify_idx_reserved;
171 if (notify_idx_count + 1 < max_notify_idx) {
172 do {
173 if (!dbell_index_table_find
174 (new_notify_idx)) {
175 reused = true;
176 break;
177 }
178 new_notify_idx = (new_notify_idx + 1) %
179 max_notify_idx;
180 } while (new_notify_idx !=
181 last_notify_idx_released);
182 }
183 if (!reused) {
184 new_notify_idx = max_notify_idx;
185 max_notify_idx++;
186 }
187 }
188 } else {
189 new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE;
190 }
191
192 last_notify_idx_reserved = new_notify_idx;
193 notify_idx_count++;
194
195 entry->idx = new_notify_idx;
196 bucket = VMCI_DOORBELL_HASH(entry->idx);
197 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
198
199 spin_unlock_bh(&vmci_doorbell_it.lock);
200}
201
202/*
203 * Remove the given entry from the index table. This will release() the
204 * entry's resource.
205 */
206static void dbell_index_table_remove(struct dbell_entry *entry)
207{
208 spin_lock_bh(&vmci_doorbell_it.lock);
209
210 hlist_del_init(&entry->node);
211
212 notify_idx_count--;
213 if (entry->idx == max_notify_idx - 1) {
214 /*
215 * If we delete an entry with the maximum known
216 * notification index, we take the opportunity to
217 * prune the current max. As there might be other
218 * unused indices immediately below, we lower the
219 * maximum until we hit an index in use.
220 */
221 while (max_notify_idx > 0 &&
222 !dbell_index_table_find(max_notify_idx - 1))
223 max_notify_idx--;
224 }
225
226 last_notify_idx_released = entry->idx;
227
228 spin_unlock_bh(&vmci_doorbell_it.lock);
229
230 vmci_resource_put(&entry->resource);
231}
232
233/*
234 * Creates a link between the given doorbell handle and the given
235 * index in the bitmap in the device backend. A notification state
236 * is created in hypervisor.
237 */
238static int dbell_link(struct vmci_handle handle, u32 notify_idx)
239{
240 struct vmci_doorbell_link_msg link_msg;
241
242 link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
243 VMCI_DOORBELL_LINK);
244 link_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
245 link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE;
246 link_msg.handle = handle;
247 link_msg.notify_idx = notify_idx;
248
249 return vmci_send_datagram(&link_msg.hdr);
250}
251
252/*
253 * Unlinks the given doorbell handle from an index in the bitmap in
254 * the device backend. The notification state is destroyed in hypervisor.
255 */
256static int dbell_unlink(struct vmci_handle handle)
257{
258 struct vmci_doorbell_unlink_msg unlink_msg;
259
260 unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
261 VMCI_DOORBELL_UNLINK);
262 unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
263 unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE;
264 unlink_msg.handle = handle;
265
266 return vmci_send_datagram(&unlink_msg.hdr);
267}
268
269/*
270 * Notify another guest or the host. We send a datagram down to the
271 * host via the hypervisor with the notification info.
272 */
273static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags)
274{
275 struct vmci_doorbell_notify_msg notify_msg;
276
277 notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
278 VMCI_DOORBELL_NOTIFY);
279 notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
280 notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE;
281 notify_msg.handle = handle;
282
283 return vmci_send_datagram(&notify_msg.hdr);
284}
285
286/*
287 * Calls the specified callback in a delayed context.
288 */
289static void dbell_delayed_dispatch(struct work_struct *work)
290{
291 struct dbell_entry *entry = container_of(work,
292 struct dbell_entry, work);
293
294 entry->notify_cb(entry->client_data);
295 vmci_resource_put(&entry->resource);
296}
297
298/*
299 * Dispatches a doorbell notification to the host context.
300 */
301int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
302{
303 struct dbell_entry *entry;
304 struct vmci_resource *resource;
305
306 if (vmci_handle_is_invalid(handle)) {
307 pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n",
308 handle.context, handle.resource);
309 return VMCI_ERROR_INVALID_ARGS;
310 }
311
312 resource = vmci_resource_by_handle(handle,
313 VMCI_RESOURCE_TYPE_DOORBELL);
314 if (!resource) {
315 pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n",
316 handle.context, handle.resource);
317 return VMCI_ERROR_NOT_FOUND;
318 }
319
320 entry = container_of(resource, struct dbell_entry, resource);
321 if (entry->run_delayed) {
322 schedule_work(&entry->work);
323 } else {
324 entry->notify_cb(entry->client_data);
325 vmci_resource_put(resource);
326 }
327
328 return VMCI_SUCCESS;
329}
330
331/*
332 * Register the notification bitmap with the host.
333 */
334bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
335{
336 int result;
337 struct vmci_notify_bm_set_msg bitmap_set_msg;
338
339 bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
340 VMCI_SET_NOTIFY_BITMAP);
341 bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
342 bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
343 VMCI_DG_HEADERSIZE;
344 bitmap_set_msg.bitmap_ppn = bitmap_ppn;
345
346 result = vmci_send_datagram(&bitmap_set_msg.hdr);
347 if (result != VMCI_SUCCESS) {
348 pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n",
349 bitmap_ppn, result);
350 return false;
351 }
352 return true;
353}
354
355/*
356 * Executes or schedules the handlers for a given notify index.
357 */
358static void dbell_fire_entries(u32 notify_idx)
359{
360 u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
361 struct dbell_entry *dbell;
362 struct hlist_node *node;
363
364 spin_lock_bh(&vmci_doorbell_it.lock);
365
366 hlist_for_each_entry(dbell, node,
367 &vmci_doorbell_it.entries[bucket], node) {
368 if (dbell->idx == notify_idx &&
369 atomic_read(&dbell->active) == 1) {
370 if (dbell->run_delayed) {
371 vmci_resource_get(&dbell->resource);
372 schedule_work(&dbell->work);
373 } else {
374 dbell->notify_cb(dbell->client_data);
375 }
376 }
377 }
378
379 spin_unlock_bh(&vmci_doorbell_it.lock);
380}
381
382/*
383 * Scans the notification bitmap, collects pending notifications,
384 * resets the bitmap and invokes appropriate callbacks.
385 */
386void vmci_dbell_scan_notification_entries(u8 *bitmap)
387{
388 u32 idx;
389
390 for (idx = 0; idx < max_notify_idx; idx++) {
391 if (bitmap[idx] & 0x1) {
392 bitmap[idx] &= ~1;
393 dbell_fire_entries(idx);
394 }
395 }
396}
397
398/*
399 * vmci_doorbell_create() - Creates a doorbell
400 * @handle: A handle used to track the resource. Can be invalid.
401 * @flags: Flag that determines context of callback.
402 * @priv_flags: Privileges flags.
403 * @notify_cb: The callback to be ivoked when the doorbell fires.
404 * @client_data: A parameter to be passed to the callback.
405 *
406 * Creates a doorbell with the given callback. If the handle is
407 * VMCI_INVALID_HANDLE, a free handle will be assigned, if
408 * possible. The callback can be run immediately (potentially with
409 * locks held - the default) or delayed (in a kernel thread) by
410 * specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution
411 * is selected, a given callback may not be run if the kernel is
412 * unable to allocate memory for the delayed execution (highly
413 * unlikely).
414 */
415int vmci_doorbell_create(struct vmci_handle *handle,
416 u32 flags,
417 u32 priv_flags,
418 vmci_callback notify_cb, void *client_data)
419{
420 struct dbell_entry *entry;
421 struct vmci_handle new_handle;
422 int result;
423
424 if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB ||
425 priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
426 return VMCI_ERROR_INVALID_ARGS;
427
428 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
429 if (entry == NULL) {
430 pr_warn("Failed allocating memory for datagram entry\n");
431 return VMCI_ERROR_NO_MEM;
432 }
433
434 if (vmci_handle_is_invalid(*handle)) {
435 u32 context_id = vmci_get_context_id();
436
437 /* Let resource code allocate a free ID for us */
438 new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
439 } else {
440 bool valid_context = false;
441
442 /*
443 * Validate the handle. We must do both of the checks below
444 * because we can be acting as both a host and a guest at the
445 * same time. We always allow the host context ID, since the
446 * host functionality is in practice always there with the
447 * unified driver.
448 */
449 if (handle->context == VMCI_HOST_CONTEXT_ID ||
450 (vmci_guest_code_active() &&
451 vmci_get_context_id() == handle->context)) {
452 valid_context = true;
453 }
454
455 if (!valid_context || handle->resource == VMCI_INVALID_ID) {
456 pr_devel("Invalid argument (handle=0x%x:0x%x)\n",
457 handle->context, handle->resource);
458 result = VMCI_ERROR_INVALID_ARGS;
459 goto free_mem;
460 }
461
462 new_handle = *handle;
463 }
464
465 entry->idx = 0;
466 INIT_HLIST_NODE(&entry->node);
467 entry->priv_flags = priv_flags;
468 INIT_WORK(&entry->work, dbell_delayed_dispatch);
469 entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB;
470 entry->notify_cb = notify_cb;
471 entry->client_data = client_data;
472 atomic_set(&entry->active, 0);
473
474 result = vmci_resource_add(&entry->resource,
475 VMCI_RESOURCE_TYPE_DOORBELL,
476 new_handle);
477 if (result != VMCI_SUCCESS) {
478 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
479 new_handle.context, new_handle.resource, result);
480 goto free_mem;
481 }
482
483 new_handle = vmci_resource_handle(&entry->resource);
484 if (vmci_guest_code_active()) {
485 dbell_index_table_add(entry);
486 result = dbell_link(new_handle, entry->idx);
487 if (VMCI_SUCCESS != result)
488 goto destroy_resource;
489
490 atomic_set(&entry->active, 1);
491 }
492
493 *handle = new_handle;
494
495 return result;
496
497 destroy_resource:
498 dbell_index_table_remove(entry);
499 vmci_resource_remove(&entry->resource);
500 free_mem:
501 kfree(entry);
502 return result;
503}
504EXPORT_SYMBOL_GPL(vmci_doorbell_create);
505
506/*
507 * vmci_doorbell_destroy() - Destroy a doorbell.
508 * @handle: The handle tracking the resource.
509 *
510 * Destroys a doorbell previously created with vmcii_doorbell_create. This
511 * operation may block waiting for a callback to finish.
512 */
513int vmci_doorbell_destroy(struct vmci_handle handle)
514{
515 struct dbell_entry *entry;
516 struct vmci_resource *resource;
517
518 if (vmci_handle_is_invalid(handle))
519 return VMCI_ERROR_INVALID_ARGS;
520
521 resource = vmci_resource_by_handle(handle,
522 VMCI_RESOURCE_TYPE_DOORBELL);
523 if (!resource) {
524 pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n",
525 handle.context, handle.resource);
526 return VMCI_ERROR_NOT_FOUND;
527 }
528
529 entry = container_of(resource, struct dbell_entry, resource);
530
531 if (vmci_guest_code_active()) {
532 int result;
533
534 dbell_index_table_remove(entry);
535
536 result = dbell_unlink(handle);
537 if (VMCI_SUCCESS != result) {
538
539 /*
540 * The only reason this should fail would be
541 * an inconsistency between guest and
542 * hypervisor state, where the guest believes
543 * it has an active registration whereas the
544 * hypervisor doesn't. One case where this may
545 * happen is if a doorbell is unregistered
546 * following a hibernation at a time where the
547 * doorbell state hasn't been restored on the
548 * hypervisor side yet. Since the handle has
549 * now been removed in the guest, we just
550 * print a warning and return success.
551 */
552 pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n",
553 handle.context, handle.resource, result);
554 }
555 }
556
557 /*
558 * Now remove the resource from the table. It might still be in use
559 * after this, in a callback or still on the delayed work queue.
560 */
561 vmci_resource_put(&entry->resource);
562 vmci_resource_remove(&entry->resource);
563
564 kfree(entry);
565
566 return VMCI_SUCCESS;
567}
568EXPORT_SYMBOL_GPL(vmci_doorbell_destroy);
569
570/*
571 * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes).
572 * @dst: The handlle identifying the doorbell resource
573 * @priv_flags: Priviledge flags.
574 *
575 * Generates a notification on the doorbell identified by the
576 * handle. For host side generation of notifications, the caller
577 * can specify what the privilege of the calling side is.
578 */
579int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags)
580{
581 int retval;
582 enum vmci_route route;
583 struct vmci_handle src;
584
585 if (vmci_handle_is_invalid(dst) ||
586 (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS))
587 return VMCI_ERROR_INVALID_ARGS;
588
589 src = VMCI_INVALID_HANDLE;
590 retval = vmci_route(&src, &dst, false, &route);
591 if (retval < VMCI_SUCCESS)
592 return retval;
593
594 if (VMCI_ROUTE_AS_HOST == route)
595 return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID,
596 dst, priv_flags);
597
598 if (VMCI_ROUTE_AS_GUEST == route)
599 return dbell_notify_as_guest(dst, priv_flags);
600
601 pr_warn("Unknown route (%d) for doorbell\n", route);
602 return VMCI_ERROR_DST_UNREACHABLE;
603}
604EXPORT_SYMBOL_GPL(vmci_doorbell_notify);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h
new file mode 100644
index 000000000000..e4c0b17486a5
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.h
@@ -0,0 +1,51 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef VMCI_DOORBELL_H
17#define VMCI_DOORBELL_H
18
19#include <linux/vmw_vmci_defs.h>
20#include <linux/types.h>
21
22#include "vmci_driver.h"
23
24/*
25 * VMCINotifyResourceInfo: Used to create and destroy doorbells, and
26 * generate a notification for a doorbell or queue pair.
27 */
28struct vmci_dbell_notify_resource_info {
29 struct vmci_handle handle;
30 u16 resource;
31 u16 action;
32 s32 result;
33};
34
35/*
36 * Structure used for checkpointing the doorbell mappings. It is
37 * written to the checkpoint as is, so changing this structure will
38 * break checkpoint compatibility.
39 */
40struct dbell_cpt_state {
41 struct vmci_handle handle;
42 u64 bitmap_idx;
43};
44
45int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
46int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
47
48bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn);
49void vmci_dbell_scan_notification_entries(u8 *bitmap);
50
51#endif /* VMCI_DOORBELL_H */
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
new file mode 100644
index 000000000000..7b3fce2da6c3
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -0,0 +1,117 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/atomic.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22
23#include "vmci_driver.h"
24#include "vmci_event.h"
25
26static bool vmci_disable_host;
27module_param_named(disable_host, vmci_disable_host, bool, 0);
28MODULE_PARM_DESC(disable_host,
29 "Disable driver host personality (default=enabled)");
30
31static bool vmci_disable_guest;
32module_param_named(disable_guest, vmci_disable_guest, bool, 0);
33MODULE_PARM_DESC(disable_guest,
34 "Disable driver guest personality (default=enabled)");
35
36static bool vmci_guest_personality_initialized;
37static bool vmci_host_personality_initialized;
38
39/*
40 * vmci_get_context_id() - Gets the current context ID.
41 *
42 * Returns the current context ID. Note that since this is accessed only
43 * from code running in the host, this always returns the host context ID.
44 */
45u32 vmci_get_context_id(void)
46{
47 if (vmci_guest_code_active())
48 return vmci_get_vm_context_id();
49 else if (vmci_host_code_active())
50 return VMCI_HOST_CONTEXT_ID;
51
52 return VMCI_INVALID_ID;
53}
54EXPORT_SYMBOL_GPL(vmci_get_context_id);
55
56static int __init vmci_drv_init(void)
57{
58 int vmci_err;
59 int error;
60
61 vmci_err = vmci_event_init();
62 if (vmci_err < VMCI_SUCCESS) {
63 pr_err("Failed to initialize VMCIEvent (result=%d)\n",
64 vmci_err);
65 return -EINVAL;
66 }
67
68 if (!vmci_disable_guest) {
69 error = vmci_guest_init();
70 if (error) {
71 pr_warn("Failed to initialize guest personality (err=%d)\n",
72 error);
73 } else {
74 vmci_guest_personality_initialized = true;
75 pr_info("Guest personality initialized and is %s\n",
76 vmci_guest_code_active() ?
77 "active" : "inactive");
78 }
79 }
80
81 if (!vmci_disable_host) {
82 error = vmci_host_init();
83 if (error) {
84 pr_warn("Unable to initialize host personality (err=%d)\n",
85 error);
86 } else {
87 vmci_host_personality_initialized = true;
88 pr_info("Initialized host personality\n");
89 }
90 }
91
92 if (!vmci_guest_personality_initialized &&
93 !vmci_host_personality_initialized) {
94 vmci_event_exit();
95 return -ENODEV;
96 }
97
98 return 0;
99}
100module_init(vmci_drv_init);
101
102static void __exit vmci_drv_exit(void)
103{
104 if (vmci_guest_personality_initialized)
105 vmci_guest_exit();
106
107 if (vmci_host_personality_initialized)
108 vmci_host_exit();
109
110 vmci_event_exit();
111}
112module_exit(vmci_drv_exit);
113
114MODULE_AUTHOR("VMware, Inc.");
115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
116MODULE_VERSION("1.0.0.0-k");
117MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
new file mode 100644
index 000000000000..f69156a1f30c
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -0,0 +1,50 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMCI_DRIVER_H_
17#define _VMCI_DRIVER_H_
18
19#include <linux/vmw_vmci_defs.h>
20#include <linux/wait.h>
21
22#include "vmci_queue_pair.h"
23#include "vmci_context.h"
24
25enum vmci_obj_type {
26 VMCIOBJ_VMX_VM = 10,
27 VMCIOBJ_CONTEXT,
28 VMCIOBJ_SOCKET,
29 VMCIOBJ_NOT_SET,
30};
31
32/* For storing VMCI structures in file handles. */
33struct vmci_obj {
34 void *ptr;
35 enum vmci_obj_type type;
36};
37
38u32 vmci_get_context_id(void);
39int vmci_send_datagram(struct vmci_datagram *dg);
40
41int vmci_host_init(void);
42void vmci_host_exit(void);
43bool vmci_host_code_active(void);
44
45int vmci_guest_init(void);
46void vmci_guest_exit(void);
47bool vmci_guest_code_active(void);
48u32 vmci_get_vm_context_id(void);
49
50#endif /* _VMCI_DRIVER_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
new file mode 100644
index 000000000000..8449516d6ac6
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.c
@@ -0,0 +1,224 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22
23#include "vmci_driver.h"
24#include "vmci_event.h"
25
26#define EVENT_MAGIC 0xEABE0000
27#define VMCI_EVENT_MAX_ATTEMPTS 10
28
29struct vmci_subscription {
30 u32 id;
31 u32 event;
32 vmci_event_cb callback;
33 void *callback_data;
34 struct list_head node; /* on one of subscriber lists */
35};
36
37static struct list_head subscriber_array[VMCI_EVENT_MAX];
38static DEFINE_MUTEX(subscriber_mutex);
39
40int __init vmci_event_init(void)
41{
42 int i;
43
44 for (i = 0; i < VMCI_EVENT_MAX; i++)
45 INIT_LIST_HEAD(&subscriber_array[i]);
46
47 return VMCI_SUCCESS;
48}
49
50void vmci_event_exit(void)
51{
52 int e;
53
54 /* We free all memory at exit. */
55 for (e = 0; e < VMCI_EVENT_MAX; e++) {
56 struct vmci_subscription *cur, *p2;
57 list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
58
59 /*
60 * We should never get here because all events
61 * should have been unregistered before we try
62 * to unload the driver module.
63 */
64 pr_warn("Unexpected free events occurring\n");
65 list_del(&cur->node);
66 kfree(cur);
67 }
68 }
69}
70
71/*
72 * Find entry. Assumes subscriber_mutex is held.
73 */
74static struct vmci_subscription *event_find(u32 sub_id)
75{
76 int e;
77
78 for (e = 0; e < VMCI_EVENT_MAX; e++) {
79 struct vmci_subscription *cur;
80 list_for_each_entry(cur, &subscriber_array[e], node) {
81 if (cur->id == sub_id)
82 return cur;
83 }
84 }
85 return NULL;
86}
87
88/*
89 * Actually delivers the events to the subscribers.
90 * The callback function for each subscriber is invoked.
91 */
92static void event_deliver(struct vmci_event_msg *event_msg)
93{
94 struct vmci_subscription *cur;
95 struct list_head *subscriber_list;
96
97 rcu_read_lock();
98 subscriber_list = &subscriber_array[event_msg->event_data.event];
99 list_for_each_entry_rcu(cur, subscriber_list, node) {
100 cur->callback(cur->id, &event_msg->event_data,
101 cur->callback_data);
102 }
103 rcu_read_unlock();
104}
105
106/*
107 * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
108 * subscribers for given event.
109 */
110int vmci_event_dispatch(struct vmci_datagram *msg)
111{
112 struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
113
114 if (msg->payload_size < sizeof(u32) ||
115 msg->payload_size > sizeof(struct vmci_event_data_max))
116 return VMCI_ERROR_INVALID_ARGS;
117
118 if (!VMCI_EVENT_VALID(event_msg->event_data.event))
119 return VMCI_ERROR_EVENT_UNKNOWN;
120
121 event_deliver(event_msg);
122 return VMCI_SUCCESS;
123}
124
125/*
126 * vmci_event_subscribe() - Subscribe to a given event.
127 * @event: The event to subscribe to.
128 * @callback: The callback to invoke upon the event.
129 * @callback_data: Data to pass to the callback.
130 * @subscription_id: ID used to track subscription. Used with
131 * vmci_event_unsubscribe()
132 *
133 * Subscribes to the provided event. The callback specified will be
134 * fired from RCU critical section and therefore must not sleep.
135 */
136int vmci_event_subscribe(u32 event,
137 vmci_event_cb callback,
138 void *callback_data,
139 u32 *new_subscription_id)
140{
141 struct vmci_subscription *sub;
142 int attempts;
143 int retval;
144 bool have_new_id = false;
145
146 if (!new_subscription_id) {
147 pr_devel("%s: Invalid subscription (NULL)\n", __func__);
148 return VMCI_ERROR_INVALID_ARGS;
149 }
150
151 if (!VMCI_EVENT_VALID(event) || !callback) {
152 pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
153 __func__, event, callback, callback_data);
154 return VMCI_ERROR_INVALID_ARGS;
155 }
156
157 sub = kzalloc(sizeof(*sub), GFP_KERNEL);
158 if (!sub)
159 return VMCI_ERROR_NO_MEM;
160
161 sub->id = VMCI_EVENT_MAX;
162 sub->event = event;
163 sub->callback = callback;
164 sub->callback_data = callback_data;
165 INIT_LIST_HEAD(&sub->node);
166
167 mutex_lock(&subscriber_mutex);
168
169 /* Creation of a new event is always allowed. */
170 for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
171 static u32 subscription_id;
172 /*
173 * We try to get an id a couple of time before
174 * claiming we are out of resources.
175 */
176
177 /* Test for duplicate id. */
178 if (!event_find(++subscription_id)) {
179 sub->id = subscription_id;
180 have_new_id = true;
181 break;
182 }
183 }
184
185 if (have_new_id) {
186 list_add_rcu(&sub->node, &subscriber_array[event]);
187 retval = VMCI_SUCCESS;
188 } else {
189 retval = VMCI_ERROR_NO_RESOURCES;
190 }
191
192 mutex_unlock(&subscriber_mutex);
193
194 *new_subscription_id = sub->id;
195 return retval;
196}
197EXPORT_SYMBOL_GPL(vmci_event_subscribe);
198
199/*
200 * vmci_event_unsubscribe() - unsubscribe from an event.
201 * @sub_id: A subscription ID as provided by vmci_event_subscribe()
202 *
203 * Unsubscribe from given event. Removes it from list and frees it.
204 * Will return callback_data if requested by caller.
205 */
206int vmci_event_unsubscribe(u32 sub_id)
207{
208 struct vmci_subscription *s;
209
210 mutex_lock(&subscriber_mutex);
211 s = event_find(sub_id);
212 if (s)
213 list_del_rcu(&s->node);
214 mutex_unlock(&subscriber_mutex);
215
216 if (!s)
217 return VMCI_ERROR_NOT_FOUND;
218
219 synchronize_rcu();
220 kfree(s);
221
222 return VMCI_SUCCESS;
223}
224EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
diff --git a/drivers/misc/vmw_vmci/vmci_event.h b/drivers/misc/vmw_vmci/vmci_event.h
new file mode 100644
index 000000000000..7df9b1c0a96c
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.h
@@ -0,0 +1,25 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef __VMCI_EVENT_H__
17#define __VMCI_EVENT_H__
18
19#include <linux/vmw_vmci_api.h>
20
21int vmci_event_init(void);
22void vmci_event_exit(void);
23int vmci_event_dispatch(struct vmci_datagram *msg);
24
25#endif /*__VMCI_EVENT_H__ */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
new file mode 100644
index 000000000000..60c01999f489
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -0,0 +1,759 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/moduleparam.h>
19#include <linux/interrupt.h>
20#include <linux/highmem.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/smp.h>
29#include <linux/io.h>
30#include <linux/vmalloc.h>
31
32#include "vmci_datagram.h"
33#include "vmci_doorbell.h"
34#include "vmci_context.h"
35#include "vmci_driver.h"
36#include "vmci_event.h"
37
38#define PCI_VENDOR_ID_VMWARE 0x15AD
39#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
40
41#define VMCI_UTIL_NUM_RESOURCES 1
42
43static bool vmci_disable_msi;
44module_param_named(disable_msi, vmci_disable_msi, bool, 0);
45MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
46
47static bool vmci_disable_msix;
48module_param_named(disable_msix, vmci_disable_msix, bool, 0);
49MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
50
51static u32 ctx_update_sub_id = VMCI_INVALID_ID;
52static u32 vm_context_id = VMCI_INVALID_ID;
53
54struct vmci_guest_device {
55 struct device *dev; /* PCI device we are attached to */
56 void __iomem *iobase;
57
58 unsigned int irq;
59 unsigned int intr_type;
60 bool exclusive_vectors;
61 struct msix_entry msix_entries[VMCI_MAX_INTRS];
62
63 struct tasklet_struct datagram_tasklet;
64 struct tasklet_struct bm_tasklet;
65
66 void *data_buffer;
67 void *notification_bitmap;
68};
69
70/* vmci_dev singleton device and supporting data*/
71static struct vmci_guest_device *vmci_dev_g;
72static DEFINE_SPINLOCK(vmci_dev_spinlock);
73
74static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
75
76bool vmci_guest_code_active(void)
77{
78 return atomic_read(&vmci_num_guest_devices) != 0;
79}
80
81u32 vmci_get_vm_context_id(void)
82{
83 if (vm_context_id == VMCI_INVALID_ID) {
84 struct vmci_datagram get_cid_msg;
85 get_cid_msg.dst =
86 vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
87 VMCI_GET_CONTEXT_ID);
88 get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
89 get_cid_msg.payload_size = 0;
90 vm_context_id = vmci_send_datagram(&get_cid_msg);
91 }
92 return vm_context_id;
93}
94
95/*
96 * VM to hypervisor call mechanism. We use the standard VMware naming
97 * convention since shared code is calling this function as well.
98 */
99int vmci_send_datagram(struct vmci_datagram *dg)
100{
101 unsigned long flags;
102 int result;
103
104 /* Check args. */
105 if (dg == NULL)
106 return VMCI_ERROR_INVALID_ARGS;
107
108 /*
109 * Need to acquire spinlock on the device because the datagram
110 * data may be spread over multiple pages and the monitor may
111 * interleave device user rpc calls from multiple
112 * VCPUs. Acquiring the spinlock precludes that
113 * possibility. Disabling interrupts to avoid incoming
114 * datagrams during a "rep out" and possibly landing up in
115 * this function.
116 */
117 spin_lock_irqsave(&vmci_dev_spinlock, flags);
118
119 if (vmci_dev_g) {
120 iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
121 dg, VMCI_DG_SIZE(dg));
122 result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
123 } else {
124 result = VMCI_ERROR_UNAVAILABLE;
125 }
126
127 spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
128
129 return result;
130}
131EXPORT_SYMBOL_GPL(vmci_send_datagram);
132
133/*
134 * Gets called with the new context id if updated or resumed.
135 * Context id.
136 */
137static void vmci_guest_cid_update(u32 sub_id,
138 const struct vmci_event_data *event_data,
139 void *client_data)
140{
141 const struct vmci_event_payld_ctx *ev_payload =
142 vmci_event_data_const_payload(event_data);
143
144 if (sub_id != ctx_update_sub_id) {
145 pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
146 return;
147 }
148
149 if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
150 pr_devel("Invalid event data\n");
151 return;
152 }
153
154 pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
155 vm_context_id, ev_payload->context_id, event_data->event);
156
157 vm_context_id = ev_payload->context_id;
158}
159
160/*
161 * Verify that the host supports the hypercalls we need. If it does not,
162 * try to find fallback hypercalls and use those instead. Returns
163 * true if required hypercalls (or fallback hypercalls) are
164 * supported by the host, false otherwise.
165 */
166static bool vmci_check_host_caps(struct pci_dev *pdev)
167{
168 bool result;
169 struct vmci_resource_query_msg *msg;
170 u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
171 VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
172 struct vmci_datagram *check_msg;
173
174 check_msg = kmalloc(msg_size, GFP_KERNEL);
175 if (!check_msg) {
176 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
177 return false;
178 }
179
180 check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
181 VMCI_RESOURCES_QUERY);
182 check_msg->src = VMCI_ANON_SRC_HANDLE;
183 check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
184 msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
185
186 msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
187 msg->resources[0] = VMCI_GET_CONTEXT_ID;
188
189 /* Checks that hyper calls are supported */
190 result = vmci_send_datagram(check_msg) == 0x01;
191 kfree(check_msg);
192
193 dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
194 __func__, result ? "PASSED" : "FAILED");
195
196 /* We need the vector. There are no fallbacks. */
197 return result;
198}
199
200/*
201 * Reads datagrams from the data in port and dispatches them. We
202 * always start reading datagrams into only the first page of the
203 * datagram buffer. If the datagrams don't fit into one page, we
204 * use the maximum datagram buffer size for the remainder of the
205 * invocation. This is a simple heuristic for not penalizing
206 * small datagrams.
207 *
208 * This function assumes that it has exclusive access to the data
209 * in port for the duration of the call.
210 */
211static void vmci_dispatch_dgs(unsigned long data)
212{
213 struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
214 u8 *dg_in_buffer = vmci_dev->data_buffer;
215 struct vmci_datagram *dg;
216 size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
217 size_t current_dg_in_buffer_size = PAGE_SIZE;
218 size_t remaining_bytes;
219
220 BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
221
222 ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
223 vmci_dev->data_buffer, current_dg_in_buffer_size);
224 dg = (struct vmci_datagram *)dg_in_buffer;
225 remaining_bytes = current_dg_in_buffer_size;
226
227 while (dg->dst.resource != VMCI_INVALID_ID ||
228 remaining_bytes > PAGE_SIZE) {
229 unsigned dg_in_size;
230
231 /*
232 * When the input buffer spans multiple pages, a datagram can
233 * start on any page boundary in the buffer.
234 */
235 if (dg->dst.resource == VMCI_INVALID_ID) {
236 dg = (struct vmci_datagram *)roundup(
237 (uintptr_t)dg + 1, PAGE_SIZE);
238 remaining_bytes =
239 (size_t)(dg_in_buffer +
240 current_dg_in_buffer_size -
241 (u8 *)dg);
242 continue;
243 }
244
245 dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
246
247 if (dg_in_size <= dg_in_buffer_size) {
248 int result;
249
250 /*
251 * If the remaining bytes in the datagram
252 * buffer doesn't contain the complete
253 * datagram, we first make sure we have enough
254 * room for it and then we read the reminder
255 * of the datagram and possibly any following
256 * datagrams.
257 */
258 if (dg_in_size > remaining_bytes) {
259 if (remaining_bytes !=
260 current_dg_in_buffer_size) {
261
262 /*
263 * We move the partial
264 * datagram to the front and
265 * read the reminder of the
266 * datagram and possibly
267 * following calls into the
268 * following bytes.
269 */
270 memmove(dg_in_buffer, dg_in_buffer +
271 current_dg_in_buffer_size -
272 remaining_bytes,
273 remaining_bytes);
274 dg = (struct vmci_datagram *)
275 dg_in_buffer;
276 }
277
278 if (current_dg_in_buffer_size !=
279 dg_in_buffer_size)
280 current_dg_in_buffer_size =
281 dg_in_buffer_size;
282
283 ioread8_rep(vmci_dev->iobase +
284 VMCI_DATA_IN_ADDR,
285 vmci_dev->data_buffer +
286 remaining_bytes,
287 current_dg_in_buffer_size -
288 remaining_bytes);
289 }
290
291 /*
292 * We special case event datagrams from the
293 * hypervisor.
294 */
295 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
296 dg->dst.resource == VMCI_EVENT_HANDLER) {
297 result = vmci_event_dispatch(dg);
298 } else {
299 result = vmci_datagram_invoke_guest_handler(dg);
300 }
301 if (result < VMCI_SUCCESS)
302 dev_dbg(vmci_dev->dev,
303 "Datagram with resource (ID=0x%x) failed (err=%d)\n",
304 dg->dst.resource, result);
305
306 /* On to the next datagram. */
307 dg = (struct vmci_datagram *)((u8 *)dg +
308 dg_in_size);
309 } else {
310 size_t bytes_to_skip;
311
312 /*
313 * Datagram doesn't fit in datagram buffer of maximal
314 * size. We drop it.
315 */
316 dev_dbg(vmci_dev->dev,
317 "Failed to receive datagram (size=%u bytes)\n",
318 dg_in_size);
319
320 bytes_to_skip = dg_in_size - remaining_bytes;
321 if (current_dg_in_buffer_size != dg_in_buffer_size)
322 current_dg_in_buffer_size = dg_in_buffer_size;
323
324 for (;;) {
325 ioread8_rep(vmci_dev->iobase +
326 VMCI_DATA_IN_ADDR,
327 vmci_dev->data_buffer,
328 current_dg_in_buffer_size);
329 if (bytes_to_skip <= current_dg_in_buffer_size)
330 break;
331
332 bytes_to_skip -= current_dg_in_buffer_size;
333 }
334 dg = (struct vmci_datagram *)(dg_in_buffer +
335 bytes_to_skip);
336 }
337
338 remaining_bytes =
339 (size_t) (dg_in_buffer + current_dg_in_buffer_size -
340 (u8 *)dg);
341
342 if (remaining_bytes < VMCI_DG_HEADERSIZE) {
343 /* Get the next batch of datagrams. */
344
345 ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
346 vmci_dev->data_buffer,
347 current_dg_in_buffer_size);
348 dg = (struct vmci_datagram *)dg_in_buffer;
349 remaining_bytes = current_dg_in_buffer_size;
350 }
351 }
352}
353
354/*
355 * Scans the notification bitmap for raised flags, clears them
356 * and handles the notifications.
357 */
358static void vmci_process_bitmap(unsigned long data)
359{
360 struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
361
362 if (!dev->notification_bitmap) {
363 dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
364 return;
365 }
366
367 vmci_dbell_scan_notification_entries(dev->notification_bitmap);
368}
369
370/*
371 * Enable MSI-X. Try exclusive vectors first, then shared vectors.
372 */
373static int vmci_enable_msix(struct pci_dev *pdev,
374 struct vmci_guest_device *vmci_dev)
375{
376 int i;
377 int result;
378
379 for (i = 0; i < VMCI_MAX_INTRS; ++i) {
380 vmci_dev->msix_entries[i].entry = i;
381 vmci_dev->msix_entries[i].vector = i;
382 }
383
384 result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
385 if (result == 0)
386 vmci_dev->exclusive_vectors = true;
387 else if (result > 0)
388 result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
389
390 return result;
391}
392
393/*
394 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
395 * interrupt (vector VMCI_INTR_DATAGRAM).
396 */
397static irqreturn_t vmci_interrupt(int irq, void *_dev)
398{
399 struct vmci_guest_device *dev = _dev;
400
401 /*
402 * If we are using MSI-X with exclusive vectors then we simply schedule
403 * the datagram tasklet, since we know the interrupt was meant for us.
404 * Otherwise we must read the ICR to determine what to do.
405 */
406
407 if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
408 tasklet_schedule(&dev->datagram_tasklet);
409 } else {
410 unsigned int icr;
411
412 /* Acknowledge interrupt and determine what needs doing. */
413 icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
414 if (icr == 0 || icr == ~0)
415 return IRQ_NONE;
416
417 if (icr & VMCI_ICR_DATAGRAM) {
418 tasklet_schedule(&dev->datagram_tasklet);
419 icr &= ~VMCI_ICR_DATAGRAM;
420 }
421
422 if (icr & VMCI_ICR_NOTIFICATION) {
423 tasklet_schedule(&dev->bm_tasklet);
424 icr &= ~VMCI_ICR_NOTIFICATION;
425 }
426
427 if (icr != 0)
428 dev_warn(dev->dev,
429 "Ignoring unknown interrupt cause (%d)\n",
430 icr);
431 }
432
433 return IRQ_HANDLED;
434}
435
436/*
437 * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
438 * which is for the notification bitmap. Will only get called if we are
439 * using MSI-X with exclusive vectors.
440 */
441static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
442{
443 struct vmci_guest_device *dev = _dev;
444
445 /* For MSI-X we can just assume it was meant for us. */
446 tasklet_schedule(&dev->bm_tasklet);
447
448 return IRQ_HANDLED;
449}
450
451/*
452 * Most of the initialization at module load time is done here.
453 */
454static int vmci_guest_probe_device(struct pci_dev *pdev,
455 const struct pci_device_id *id)
456{
457 struct vmci_guest_device *vmci_dev;
458 void __iomem *iobase;
459 unsigned int capabilities;
460 unsigned long cmd;
461 int vmci_err;
462 int error;
463
464 dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
465
466 error = pcim_enable_device(pdev);
467 if (error) {
468 dev_err(&pdev->dev,
469 "Failed to enable VMCI device: %d\n", error);
470 return error;
471 }
472
473 error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
474 if (error) {
475 dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
476 return error;
477 }
478
479 iobase = pcim_iomap_table(pdev)[0];
480
481 dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
482 (unsigned long)iobase, pdev->irq);
483
484 vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
485 if (!vmci_dev) {
486 dev_err(&pdev->dev,
487 "Can't allocate memory for VMCI device\n");
488 return -ENOMEM;
489 }
490
491 vmci_dev->dev = &pdev->dev;
492 vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
493 vmci_dev->exclusive_vectors = false;
494 vmci_dev->iobase = iobase;
495
496 tasklet_init(&vmci_dev->datagram_tasklet,
497 vmci_dispatch_dgs, (unsigned long)vmci_dev);
498 tasklet_init(&vmci_dev->bm_tasklet,
499 vmci_process_bitmap, (unsigned long)vmci_dev);
500
501 vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
502 if (!vmci_dev->data_buffer) {
503 dev_err(&pdev->dev,
504 "Can't allocate memory for datagram buffer\n");
505 return -ENOMEM;
506 }
507
508 pci_set_master(pdev); /* To enable queue_pair functionality. */
509
510 /*
511 * Verify that the VMCI Device supports the capabilities that
512 * we need. If the device is missing capabilities that we would
513 * like to use, check for fallback capabilities and use those
514 * instead (so we can run a new VM on old hosts). Fail the load if
515 * a required capability is missing and there is no fallback.
516 *
517 * Right now, we need datagrams. There are no fallbacks.
518 */
519 capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
520 if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
521 dev_err(&pdev->dev, "Device does not support datagrams\n");
522 error = -ENXIO;
523 goto err_free_data_buffer;
524 }
525
526 /*
527 * If the hardware supports notifications, we will use that as
528 * well.
529 */
530 if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
531 vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE);
532 if (!vmci_dev->notification_bitmap) {
533 dev_warn(&pdev->dev,
534 "Unable to allocate notification bitmap\n");
535 } else {
536 memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
537 capabilities |= VMCI_CAPS_NOTIFICATIONS;
538 }
539 }
540
541 dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
542
543 /* Let the host know which capabilities we intend to use. */
544 iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
545
546 /* Set up global device so that we can start sending datagrams */
547 spin_lock_irq(&vmci_dev_spinlock);
548 vmci_dev_g = vmci_dev;
549 spin_unlock_irq(&vmci_dev_spinlock);
550
551 /*
552 * Register notification bitmap with device if that capability is
553 * used.
554 */
555 if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
556 struct page *page =
557 vmalloc_to_page(vmci_dev->notification_bitmap);
558 unsigned long bitmap_ppn = page_to_pfn(page);
559 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
560 dev_warn(&pdev->dev,
561 "VMCI device unable to register notification bitmap with PPN 0x%x\n",
562 (u32) bitmap_ppn);
563 goto err_remove_vmci_dev_g;
564 }
565 }
566
567 /* Check host capabilities. */
568 if (!vmci_check_host_caps(pdev))
569 goto err_remove_bitmap;
570
571 /* Enable device. */
572
573 /*
574 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
575 * update the internal context id when needed.
576 */
577 vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
578 vmci_guest_cid_update, NULL,
579 &ctx_update_sub_id);
580 if (vmci_err < VMCI_SUCCESS)
581 dev_warn(&pdev->dev,
582 "Failed to subscribe to event (type=%d): %d\n",
583 VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
584
585 /*
586 * Enable interrupts. Try MSI-X first, then MSI, and then fallback on
587 * legacy interrupts.
588 */
589 if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
590 vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
591 vmci_dev->irq = vmci_dev->msix_entries[0].vector;
592 } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
593 vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
594 vmci_dev->irq = pdev->irq;
595 } else {
596 vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
597 vmci_dev->irq = pdev->irq;
598 }
599
600 /*
601 * Request IRQ for legacy or MSI interrupts, or for first
602 * MSI-X vector.
603 */
604 error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
605 KBUILD_MODNAME, vmci_dev);
606 if (error) {
607 dev_err(&pdev->dev, "Irq %u in use: %d\n",
608 vmci_dev->irq, error);
609 goto err_disable_msi;
610 }
611
612 /*
613 * For MSI-X with exclusive vectors we need to request an
614 * interrupt for each vector so that we get a separate
615 * interrupt handler routine. This allows us to distinguish
616 * between the vectors.
617 */
618 if (vmci_dev->exclusive_vectors) {
619 error = request_irq(vmci_dev->msix_entries[1].vector,
620 vmci_interrupt_bm, 0, KBUILD_MODNAME,
621 vmci_dev);
622 if (error) {
623 dev_err(&pdev->dev,
624 "Failed to allocate irq %u: %d\n",
625 vmci_dev->msix_entries[1].vector, error);
626 goto err_free_irq;
627 }
628 }
629
630 dev_dbg(&pdev->dev, "Registered device\n");
631
632 atomic_inc(&vmci_num_guest_devices);
633
634 /* Enable specific interrupt bits. */
635 cmd = VMCI_IMR_DATAGRAM;
636 if (capabilities & VMCI_CAPS_NOTIFICATIONS)
637 cmd |= VMCI_IMR_NOTIFICATION;
638 iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
639
640 /* Enable interrupts. */
641 iowrite32(VMCI_CONTROL_INT_ENABLE,
642 vmci_dev->iobase + VMCI_CONTROL_ADDR);
643
644 pci_set_drvdata(pdev, vmci_dev);
645 return 0;
646
647err_free_irq:
648 free_irq(vmci_dev->irq, &vmci_dev);
649 tasklet_kill(&vmci_dev->datagram_tasklet);
650 tasklet_kill(&vmci_dev->bm_tasklet);
651
652err_disable_msi:
653 if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
654 pci_disable_msix(pdev);
655 else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
656 pci_disable_msi(pdev);
657
658 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
659 if (vmci_err < VMCI_SUCCESS)
660 dev_warn(&pdev->dev,
661 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
662 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
663
664err_remove_bitmap:
665 if (vmci_dev->notification_bitmap) {
666 iowrite32(VMCI_CONTROL_RESET,
667 vmci_dev->iobase + VMCI_CONTROL_ADDR);
668 vfree(vmci_dev->notification_bitmap);
669 }
670
671err_remove_vmci_dev_g:
672 spin_lock_irq(&vmci_dev_spinlock);
673 vmci_dev_g = NULL;
674 spin_unlock_irq(&vmci_dev_spinlock);
675
676err_free_data_buffer:
677 vfree(vmci_dev->data_buffer);
678
679 /* The rest are managed resources and will be freed by PCI core */
680 return error;
681}
682
683static void vmci_guest_remove_device(struct pci_dev *pdev)
684{
685 struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
686 int vmci_err;
687
688 dev_dbg(&pdev->dev, "Removing device\n");
689
690 atomic_dec(&vmci_num_guest_devices);
691
692 vmci_qp_guest_endpoints_exit();
693
694 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
695 if (vmci_err < VMCI_SUCCESS)
696 dev_warn(&pdev->dev,
697 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
698 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
699
700 spin_lock_irq(&vmci_dev_spinlock);
701 vmci_dev_g = NULL;
702 spin_unlock_irq(&vmci_dev_spinlock);
703
704 dev_dbg(&pdev->dev, "Resetting vmci device\n");
705 iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
706
707 /*
708 * Free IRQ and then disable MSI/MSI-X as appropriate. For
709 * MSI-X, we might have multiple vectors, each with their own
710 * IRQ, which we must free too.
711 */
712 free_irq(vmci_dev->irq, vmci_dev);
713 if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
714 if (vmci_dev->exclusive_vectors)
715 free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
716 pci_disable_msix(pdev);
717 } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
718 pci_disable_msi(pdev);
719 }
720
721 tasklet_kill(&vmci_dev->datagram_tasklet);
722 tasklet_kill(&vmci_dev->bm_tasklet);
723
724 if (vmci_dev->notification_bitmap) {
725 /*
726 * The device reset above cleared the bitmap state of the
727 * device, so we can safely free it here.
728 */
729
730 vfree(vmci_dev->notification_bitmap);
731 }
732
733 vfree(vmci_dev->data_buffer);
734
735 /* The rest are managed resources and will be freed by PCI core */
736}
737
738static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = {
739 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
740 { 0 },
741};
742MODULE_DEVICE_TABLE(pci, vmci_ids);
743
744static struct pci_driver vmci_guest_driver = {
745 .name = KBUILD_MODNAME,
746 .id_table = vmci_ids,
747 .probe = vmci_guest_probe_device,
748 .remove = vmci_guest_remove_device,
749};
750
751int __init vmci_guest_init(void)
752{
753 return pci_register_driver(&vmci_guest_driver);
754}
755
756void __exit vmci_guest_exit(void)
757{
758 pci_unregister_driver(&vmci_guest_driver);
759}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
new file mode 100644
index 000000000000..344973a0fb0a
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
@@ -0,0 +1,142 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/slab.h>
17#include "vmci_handle_array.h"
18
19static size_t handle_arr_calc_size(size_t capacity)
20{
21 return sizeof(struct vmci_handle_arr) +
22 capacity * sizeof(struct vmci_handle);
23}
24
25struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
26{
27 struct vmci_handle_arr *array;
28
29 if (capacity == 0)
30 capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
31
32 array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
33 if (!array)
34 return NULL;
35
36 array->capacity = capacity;
37 array->size = 0;
38
39 return array;
40}
41
42void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
43{
44 kfree(array);
45}
46
47void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
48 struct vmci_handle handle)
49{
50 struct vmci_handle_arr *array = *array_ptr;
51
52 if (unlikely(array->size >= array->capacity)) {
53 /* reallocate. */
54 struct vmci_handle_arr *new_array;
55 size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
56 size_t new_size = handle_arr_calc_size(new_capacity);
57
58 new_array = krealloc(array, new_size, GFP_ATOMIC);
59 if (!new_array)
60 return;
61
62 new_array->capacity = new_capacity;
63 *array_ptr = array = new_array;
64 }
65
66 array->entries[array->size] = handle;
67 array->size++;
68}
69
70/*
71 * Handle that was removed, VMCI_INVALID_HANDLE if entry not found.
72 */
73struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
74 struct vmci_handle entry_handle)
75{
76 struct vmci_handle handle = VMCI_INVALID_HANDLE;
77 size_t i;
78
79 for (i = 0; i < array->size; i++) {
80 if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
81 handle = array->entries[i];
82 array->size--;
83 array->entries[i] = array->entries[array->size];
84 array->entries[array->size] = VMCI_INVALID_HANDLE;
85 break;
86 }
87 }
88
89 return handle;
90}
91
92/*
93 * Handle that was removed, VMCI_INVALID_HANDLE if array was empty.
94 */
95struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
96{
97 struct vmci_handle handle = VMCI_INVALID_HANDLE;
98
99 if (array->size) {
100 array->size--;
101 handle = array->entries[array->size];
102 array->entries[array->size] = VMCI_INVALID_HANDLE;
103 }
104
105 return handle;
106}
107
108/*
109 * Handle at given index, VMCI_INVALID_HANDLE if invalid index.
110 */
111struct vmci_handle
112vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
113{
114 if (unlikely(index >= array->size))
115 return VMCI_INVALID_HANDLE;
116
117 return array->entries[index];
118}
119
120bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
121 struct vmci_handle entry_handle)
122{
123 size_t i;
124
125 for (i = 0; i < array->size; i++)
126 if (vmci_handle_is_equal(array->entries[i], entry_handle))
127 return true;
128
129 return false;
130}
131
132/*
133 * NULL if the array is empty. Otherwise, a pointer to the array
134 * of VMCI handles in the handle array.
135 */
136struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array)
137{
138 if (array->size)
139 return array->entries;
140
141 return NULL;
142}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
new file mode 100644
index 000000000000..b5f3a7f98cf1
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
@@ -0,0 +1,52 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMCI_HANDLE_ARRAY_H_
17#define _VMCI_HANDLE_ARRAY_H_
18
19#include <linux/vmw_vmci_defs.h>
20#include <linux/types.h>
21
22#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
23#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */
24
25struct vmci_handle_arr {
26 size_t capacity;
27 size_t size;
28 struct vmci_handle entries[];
29};
30
31struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
32void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
33void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
34 struct vmci_handle handle);
35struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
36 struct vmci_handle
37 entry_handle);
38struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
39struct vmci_handle
40vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
41bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
42 struct vmci_handle entry_handle);
43struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
44
45static inline size_t vmci_handle_arr_get_size(
46 const struct vmci_handle_arr *array)
47{
48 return array->size;
49}
50
51
52#endif /* _VMCI_HANDLE_ARRAY_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
new file mode 100644
index 000000000000..d4722b3dc8ec
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -0,0 +1,1043 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/moduleparam.h>
19#include <linux/miscdevice.h>
20#include <linux/interrupt.h>
21#include <linux/highmem.h>
22#include <linux/atomic.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/file.h>
29#include <linux/init.h>
30#include <linux/poll.h>
31#include <linux/pci.h>
32#include <linux/smp.h>
33#include <linux/fs.h>
34#include <linux/io.h>
35
36#include "vmci_handle_array.h"
37#include "vmci_queue_pair.h"
38#include "vmci_datagram.h"
39#include "vmci_doorbell.h"
40#include "vmci_resource.h"
41#include "vmci_context.h"
42#include "vmci_driver.h"
43#include "vmci_event.h"
44
45#define VMCI_UTIL_NUM_RESOURCES 1
46
47enum {
48 VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
49 VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
50};
51
52enum {
53 VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
54 VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
55 VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
56};
57
58/*
59 * VMCI driver initialization. This block can also be used to
60 * pass initial group membership etc.
61 */
62struct vmci_init_blk {
63 u32 cid;
64 u32 flags;
65};
66
67/* VMCIqueue_pairAllocInfo_VMToVM */
68struct vmci_qp_alloc_info_vmvm {
69 struct vmci_handle handle;
70 u32 peer;
71 u32 flags;
72 u64 produce_size;
73 u64 consume_size;
74 u64 produce_page_file; /* User VA. */
75 u64 consume_page_file; /* User VA. */
76 u64 produce_page_file_size; /* Size of the file name array. */
77 u64 consume_page_file_size; /* Size of the file name array. */
78 s32 result;
79 u32 _pad;
80};
81
82/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
83struct vmci_set_notify_info {
84 u64 notify_uva;
85 s32 result;
86 u32 _pad;
87};
88
89/*
90 * Per-instance host state
91 */
92struct vmci_host_dev {
93 struct vmci_ctx *context;
94 int user_version;
95 enum vmci_obj_type ct_type;
96 struct mutex lock; /* Mutex lock for vmci context access */
97};
98
99static struct vmci_ctx *host_context;
100static bool vmci_host_device_initialized;
101static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
102
103/*
104 * Determines whether the VMCI host personality is
105 * available. Since the core functionality of the host driver is
106 * always present, all guests could possibly use the host
107 * personality. However, to minimize the deviation from the
108 * pre-unified driver state of affairs, we only consider the host
109 * device active if there is no active guest device or if there
110 * are VMX'en with active VMCI contexts using the host device.
111 */
112bool vmci_host_code_active(void)
113{
114 return vmci_host_device_initialized &&
115 (!vmci_guest_code_active() ||
116 atomic_read(&vmci_host_active_users) > 0);
117}
118
119/*
120 * Called on open of /dev/vmci.
121 */
122static int vmci_host_open(struct inode *inode, struct file *filp)
123{
124 struct vmci_host_dev *vmci_host_dev;
125
126 vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
127 if (vmci_host_dev == NULL)
128 return -ENOMEM;
129
130 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
131 mutex_init(&vmci_host_dev->lock);
132 filp->private_data = vmci_host_dev;
133
134 return 0;
135}
136
137/*
138 * Called on close of /dev/vmci, most often when the process
139 * exits.
140 */
141static int vmci_host_close(struct inode *inode, struct file *filp)
142{
143 struct vmci_host_dev *vmci_host_dev = filp->private_data;
144
145 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
146 vmci_ctx_destroy(vmci_host_dev->context);
147 vmci_host_dev->context = NULL;
148
149 /*
150 * The number of active contexts is used to track whether any
151 * VMX'en are using the host personality. It is incremented when
152 * a context is created through the IOCTL_VMCI_INIT_CONTEXT
153 * ioctl.
154 */
155 atomic_dec(&vmci_host_active_users);
156 }
157 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
158
159 kfree(vmci_host_dev);
160 filp->private_data = NULL;
161 return 0;
162}
163
164/*
165 * This is used to wake up the VMX when a VMCI call arrives, or
166 * to wake up select() or poll() at the next clock tick.
167 */
168static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
169{
170 struct vmci_host_dev *vmci_host_dev = filp->private_data;
171 struct vmci_ctx *context = vmci_host_dev->context;
172 unsigned int mask = 0;
173
174 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
175 /* Check for VMCI calls to this VM context. */
176 if (wait)
177 poll_wait(filp, &context->host_context.wait_queue,
178 wait);
179
180 spin_lock(&context->lock);
181 if (context->pending_datagrams > 0 ||
182 vmci_handle_arr_get_size(
183 context->pending_doorbell_array) > 0) {
184 mask = POLLIN;
185 }
186 spin_unlock(&context->lock);
187 }
188 return mask;
189}
190
191/*
192 * Copies the handles of a handle array into a user buffer, and
193 * returns the new length in userBufferSize. If the copy to the
194 * user buffer fails, the functions still returns VMCI_SUCCESS,
195 * but retval != 0.
196 */
197static int drv_cp_harray_to_user(void __user *user_buf_uva,
198 u64 *user_buf_size,
199 struct vmci_handle_arr *handle_array,
200 int *retval)
201{
202 u32 array_size = 0;
203 struct vmci_handle *handles;
204
205 if (handle_array)
206 array_size = vmci_handle_arr_get_size(handle_array);
207
208 if (array_size * sizeof(*handles) > *user_buf_size)
209 return VMCI_ERROR_MORE_DATA;
210
211 *user_buf_size = array_size * sizeof(*handles);
212 if (*user_buf_size)
213 *retval = copy_to_user(user_buf_uva,
214 vmci_handle_arr_get_handles
215 (handle_array), *user_buf_size);
216
217 return VMCI_SUCCESS;
218}
219
220/*
221 * Sets up a given context for notify to work. Calls drv_map_bool_ptr()
222 * which maps the notify boolean in user VA in kernel space.
223 */
224static int vmci_host_setup_notify(struct vmci_ctx *context,
225 unsigned long uva)
226{
227 struct page *page;
228 int retval;
229
230 if (context->notify_page) {
231 pr_devel("%s: Notify mechanism is already set up\n", __func__);
232 return VMCI_ERROR_DUPLICATE_ENTRY;
233 }
234
235 /*
236 * We are using 'bool' internally, but let's make sure we explicit
237 * about the size.
238 */
239 BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
240 if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
241 return VMCI_ERROR_GENERIC;
242
243 /*
244 * Lock physical page backing a given user VA.
245 */
246 down_read(&current->mm->mmap_sem);
247 retval = get_user_pages(current, current->mm,
248 PAGE_ALIGN(uva),
249 1, 1, 0, &page, NULL);
250 up_read(&current->mm->mmap_sem);
251 if (retval != 1)
252 return VMCI_ERROR_GENERIC;
253
254 /*
255 * Map the locked page and set up notify pointer.
256 */
257 context->notify = kmap(page) + (uva & (PAGE_SIZE - 1));
258 vmci_ctx_check_signal_notify(context);
259
260 return VMCI_SUCCESS;
261}
262
263static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
264 unsigned int cmd, void __user *uptr)
265{
266 if (cmd == IOCTL_VMCI_VERSION2) {
267 int __user *vptr = uptr;
268 if (get_user(vmci_host_dev->user_version, vptr))
269 return -EFAULT;
270 }
271
272 /*
273 * The basic logic here is:
274 *
275 * If the user sends in a version of 0 tell it our version.
276 * If the user didn't send in a version, tell it our version.
277 * If the user sent in an old version, tell it -its- version.
278 * If the user sent in an newer version, tell it our version.
279 *
280 * The rationale behind telling the caller its version is that
281 * Workstation 6.5 required that VMX and VMCI kernel module were
282 * version sync'd. All new VMX users will be programmed to
283 * handle the VMCI kernel module version.
284 */
285
286 if (vmci_host_dev->user_version > 0 &&
287 vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
288 return vmci_host_dev->user_version;
289 }
290
291 return VMCI_VERSION;
292}
293
294#define vmci_ioctl_err(fmt, ...) \
295 pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
296
297static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
298 const char *ioctl_name,
299 void __user *uptr)
300{
301 struct vmci_init_blk init_block;
302 const struct cred *cred;
303 int retval;
304
305 if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
306 vmci_ioctl_err("error reading init block\n");
307 return -EFAULT;
308 }
309
310 mutex_lock(&vmci_host_dev->lock);
311
312 if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
313 vmci_ioctl_err("received VMCI init on initialized handle\n");
314 retval = -EINVAL;
315 goto out;
316 }
317
318 if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
319 vmci_ioctl_err("unsupported VMCI restriction flag\n");
320 retval = -EINVAL;
321 goto out;
322 }
323
324 cred = get_current_cred();
325 vmci_host_dev->context = vmci_ctx_create(init_block.cid,
326 init_block.flags, 0,
327 vmci_host_dev->user_version,
328 cred);
329 put_cred(cred);
330 if (IS_ERR(vmci_host_dev->context)) {
331 retval = PTR_ERR(vmci_host_dev->context);
332 vmci_ioctl_err("error initializing context\n");
333 goto out;
334 }
335
336 /*
337 * Copy cid to userlevel, we do this to allow the VMX
338 * to enforce its policy on cid generation.
339 */
340 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
341 if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
342 vmci_ctx_destroy(vmci_host_dev->context);
343 vmci_host_dev->context = NULL;
344 vmci_ioctl_err("error writing init block\n");
345 retval = -EFAULT;
346 goto out;
347 }
348
349 vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
350 atomic_inc(&vmci_host_active_users);
351
352 retval = 0;
353
354out:
355 mutex_unlock(&vmci_host_dev->lock);
356 return retval;
357}
358
359static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
360 const char *ioctl_name,
361 void __user *uptr)
362{
363 struct vmci_datagram_snd_rcv_info send_info;
364 struct vmci_datagram *dg = NULL;
365 u32 cid;
366
367 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
368 vmci_ioctl_err("only valid for contexts\n");
369 return -EINVAL;
370 }
371
372 if (copy_from_user(&send_info, uptr, sizeof(send_info)))
373 return -EFAULT;
374
375 if (send_info.len > VMCI_MAX_DG_SIZE) {
376 vmci_ioctl_err("datagram is too big (size=%d)\n",
377 send_info.len);
378 return -EINVAL;
379 }
380
381 if (send_info.len < sizeof(*dg)) {
382 vmci_ioctl_err("datagram is too small (size=%d)\n",
383 send_info.len);
384 return -EINVAL;
385 }
386
387 dg = kmalloc(send_info.len, GFP_KERNEL);
388 if (!dg) {
389 vmci_ioctl_err(
390 "cannot allocate memory to dispatch datagram\n");
391 return -ENOMEM;
392 }
393
394 if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
395 send_info.len)) {
396 vmci_ioctl_err("error getting datagram\n");
397 kfree(dg);
398 return -EFAULT;
399 }
400
401 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
402 dg->dst.context, dg->dst.resource,
403 dg->src.context, dg->src.resource,
404 (unsigned long long)dg->payload_size);
405
406 /* Get source context id. */
407 cid = vmci_ctx_get_id(vmci_host_dev->context);
408 send_info.result = vmci_datagram_dispatch(cid, dg, true);
409 kfree(dg);
410
411 return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
412}
413
414static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
415 const char *ioctl_name,
416 void __user *uptr)
417{
418 struct vmci_datagram_snd_rcv_info recv_info;
419 struct vmci_datagram *dg = NULL;
420 int retval;
421 size_t size;
422
423 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
424 vmci_ioctl_err("only valid for contexts\n");
425 return -EINVAL;
426 }
427
428 if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
429 return -EFAULT;
430
431 size = recv_info.len;
432 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
433 &size, &dg);
434
435 if (recv_info.result >= VMCI_SUCCESS) {
436 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
437 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
438 kfree(dg);
439 if (retval != 0)
440 return -EFAULT;
441 }
442
443 return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
444}
445
446static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
447 const char *ioctl_name,
448 void __user *uptr)
449{
450 struct vmci_handle handle;
451 int vmci_status;
452 int __user *retptr;
453 u32 cid;
454
455 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
456 vmci_ioctl_err("only valid for contexts\n");
457 return -EINVAL;
458 }
459
460 cid = vmci_ctx_get_id(vmci_host_dev->context);
461
462 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
463 struct vmci_qp_alloc_info_vmvm alloc_info;
464 struct vmci_qp_alloc_info_vmvm __user *info = uptr;
465
466 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
467 return -EFAULT;
468
469 handle = alloc_info.handle;
470 retptr = &info->result;
471
472 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
473 alloc_info.peer,
474 alloc_info.flags,
475 VMCI_NO_PRIVILEGE_FLAGS,
476 alloc_info.produce_size,
477 alloc_info.consume_size,
478 NULL,
479 vmci_host_dev->context);
480
481 if (vmci_status == VMCI_SUCCESS)
482 vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
483 } else {
484 struct vmci_qp_alloc_info alloc_info;
485 struct vmci_qp_alloc_info __user *info = uptr;
486 struct vmci_qp_page_store page_store;
487
488 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
489 return -EFAULT;
490
491 handle = alloc_info.handle;
492 retptr = &info->result;
493
494 page_store.pages = alloc_info.ppn_va;
495 page_store.len = alloc_info.num_ppns;
496
497 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
498 alloc_info.peer,
499 alloc_info.flags,
500 VMCI_NO_PRIVILEGE_FLAGS,
501 alloc_info.produce_size,
502 alloc_info.consume_size,
503 &page_store,
504 vmci_host_dev->context);
505 }
506
507 if (put_user(vmci_status, retptr)) {
508 if (vmci_status >= VMCI_SUCCESS) {
509 vmci_status = vmci_qp_broker_detach(handle,
510 vmci_host_dev->context);
511 }
512 return -EFAULT;
513 }
514
515 return 0;
516}
517
518static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
519 const char *ioctl_name,
520 void __user *uptr)
521{
522 struct vmci_qp_set_va_info set_va_info;
523 struct vmci_qp_set_va_info __user *info = uptr;
524 s32 result;
525
526 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
527 vmci_ioctl_err("only valid for contexts\n");
528 return -EINVAL;
529 }
530
531 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
532 vmci_ioctl_err("is not allowed\n");
533 return -EINVAL;
534 }
535
536 if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
537 return -EFAULT;
538
539 if (set_va_info.va) {
540 /*
541 * VMX is passing down a new VA for the queue
542 * pair mapping.
543 */
544 result = vmci_qp_broker_map(set_va_info.handle,
545 vmci_host_dev->context,
546 set_va_info.va);
547 } else {
548 /*
549 * The queue pair is about to be unmapped by
550 * the VMX.
551 */
552 result = vmci_qp_broker_unmap(set_va_info.handle,
553 vmci_host_dev->context, 0);
554 }
555
556 return put_user(result, &info->result) ? -EFAULT : 0;
557}
558
559static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
560 const char *ioctl_name,
561 void __user *uptr)
562{
563 struct vmci_qp_page_file_info page_file_info;
564 struct vmci_qp_page_file_info __user *info = uptr;
565 s32 result;
566
567 if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
568 vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
569 vmci_ioctl_err("not supported on this VMX (version=%d)\n",
570 vmci_host_dev->user_version);
571 return -EINVAL;
572 }
573
574 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
575 vmci_ioctl_err("only valid for contexts\n");
576 return -EINVAL;
577 }
578
579 if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
580 return -EFAULT;
581
582 /*
583 * Communicate success pre-emptively to the caller. Note that the
584 * basic premise is that it is incumbent upon the caller not to look at
585 * the info.result field until after the ioctl() returns. And then,
586 * only if the ioctl() result indicates no error. We send up the
587 * SUCCESS status before calling SetPageStore() store because failing
588 * to copy up the result code means unwinding the SetPageStore().
589 *
590 * It turns out the logic to unwind a SetPageStore() opens a can of
591 * worms. For example, if a host had created the queue_pair and a
592 * guest attaches and SetPageStore() is successful but writing success
593 * fails, then ... the host has to be stopped from writing (anymore)
594 * data into the queue_pair. That means an additional test in the
595 * VMCI_Enqueue() code path. Ugh.
596 */
597
598 if (put_user(VMCI_SUCCESS, &info->result)) {
599 /*
600 * In this case, we can't write a result field of the
601 * caller's info block. So, we don't even try to
602 * SetPageStore().
603 */
604 return -EFAULT;
605 }
606
607 result = vmci_qp_broker_set_page_store(page_file_info.handle,
608 page_file_info.produce_va,
609 page_file_info.consume_va,
610 vmci_host_dev->context);
611 if (result < VMCI_SUCCESS) {
612 if (put_user(result, &info->result)) {
613 /*
614 * Note that in this case the SetPageStore()
615 * call failed but we were unable to
616 * communicate that to the caller (because the
617 * copy_to_user() call failed). So, if we
618 * simply return an error (in this case
619 * -EFAULT) then the caller will know that the
620 * SetPageStore failed even though we couldn't
621 * put the result code in the result field and
622 * indicate exactly why it failed.
623 *
624 * That says nothing about the issue where we
625 * were once able to write to the caller's info
626 * memory and now can't. Something more
627 * serious is probably going on than the fact
628 * that SetPageStore() didn't work.
629 */
630 return -EFAULT;
631 }
632 }
633
634 return 0;
635}
636
637static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
638 const char *ioctl_name,
639 void __user *uptr)
640{
641 struct vmci_qp_dtch_info detach_info;
642 struct vmci_qp_dtch_info __user *info = uptr;
643 s32 result;
644
645 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
646 vmci_ioctl_err("only valid for contexts\n");
647 return -EINVAL;
648 }
649
650 if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
651 return -EFAULT;
652
653 result = vmci_qp_broker_detach(detach_info.handle,
654 vmci_host_dev->context);
655 if (result == VMCI_SUCCESS &&
656 vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
657 result = VMCI_SUCCESS_LAST_DETACH;
658 }
659
660 return put_user(result, &info->result) ? -EFAULT : 0;
661}
662
663static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
664 const char *ioctl_name,
665 void __user *uptr)
666{
667 struct vmci_ctx_info ar_info;
668 struct vmci_ctx_info __user *info = uptr;
669 s32 result;
670 u32 cid;
671
672 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
673 vmci_ioctl_err("only valid for contexts\n");
674 return -EINVAL;
675 }
676
677 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
678 return -EFAULT;
679
680 cid = vmci_ctx_get_id(vmci_host_dev->context);
681 result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
682
683 return put_user(result, &info->result) ? -EFAULT : 0;
684}
685
686static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
687 const char *ioctl_name,
688 void __user *uptr)
689{
690 struct vmci_ctx_info ar_info;
691 struct vmci_ctx_info __user *info = uptr;
692 u32 cid;
693 int result;
694
695 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
696 vmci_ioctl_err("only valid for contexts\n");
697 return -EINVAL;
698 }
699
700 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
701 return -EFAULT;
702
703 cid = vmci_ctx_get_id(vmci_host_dev->context);
704 result = vmci_ctx_remove_notification(cid,
705 ar_info.remote_cid);
706
707 return put_user(result, &info->result) ? -EFAULT : 0;
708}
709
710static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
711 const char *ioctl_name,
712 void __user *uptr)
713{
714 struct vmci_ctx_chkpt_buf_info get_info;
715 u32 cid;
716 void *cpt_buf;
717 int retval;
718
719 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
720 vmci_ioctl_err("only valid for contexts\n");
721 return -EINVAL;
722 }
723
724 if (copy_from_user(&get_info, uptr, sizeof(get_info)))
725 return -EFAULT;
726
727 cid = vmci_ctx_get_id(vmci_host_dev->context);
728 get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
729 &get_info.buf_size, &cpt_buf);
730 if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
731 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
732 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
733 kfree(cpt_buf);
734
735 if (retval)
736 return -EFAULT;
737 }
738
739 return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
740}
741
742static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
743 const char *ioctl_name,
744 void __user *uptr)
745{
746 struct vmci_ctx_chkpt_buf_info set_info;
747 u32 cid;
748 void *cpt_buf;
749 int retval;
750
751 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
752 vmci_ioctl_err("only valid for contexts\n");
753 return -EINVAL;
754 }
755
756 if (copy_from_user(&set_info, uptr, sizeof(set_info)))
757 return -EFAULT;
758
759 cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
760 if (!cpt_buf) {
761 vmci_ioctl_err(
762 "cannot allocate memory to set cpt state (type=%d)\n",
763 set_info.cpt_type);
764 return -ENOMEM;
765 }
766
767 if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
768 set_info.buf_size)) {
769 retval = -EFAULT;
770 goto out;
771 }
772
773 cid = vmci_ctx_get_id(vmci_host_dev->context);
774 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
775 set_info.buf_size, cpt_buf);
776
777 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
778
779out:
780 kfree(cpt_buf);
781 return retval;
782}
783
784static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
785 const char *ioctl_name,
786 void __user *uptr)
787{
788 u32 __user *u32ptr = uptr;
789
790 return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
791}
792
793static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
794 const char *ioctl_name,
795 void __user *uptr)
796{
797 struct vmci_set_notify_info notify_info;
798
799 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
800 vmci_ioctl_err("only valid for contexts\n");
801 return -EINVAL;
802 }
803
804 if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
805 return -EFAULT;
806
807 if (notify_info.notify_uva) {
808 notify_info.result =
809 vmci_host_setup_notify(vmci_host_dev->context,
810 notify_info.notify_uva);
811 } else {
812 vmci_ctx_unset_notify(vmci_host_dev->context);
813 notify_info.result = VMCI_SUCCESS;
814 }
815
816 return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
817 -EFAULT : 0;
818}
819
820static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
821 const char *ioctl_name,
822 void __user *uptr)
823{
824 struct vmci_dbell_notify_resource_info info;
825 u32 cid;
826
827 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
828 vmci_ioctl_err("invalid for current VMX versions\n");
829 return -EINVAL;
830 }
831
832 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
833 vmci_ioctl_err("only valid for contexts\n");
834 return -EINVAL;
835 }
836
837 if (copy_from_user(&info, uptr, sizeof(info)))
838 return -EFAULT;
839
840 cid = vmci_ctx_get_id(vmci_host_dev->context);
841
842 switch (info.action) {
843 case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
844 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
845 u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
846 info.result = vmci_ctx_notify_dbell(cid, info.handle,
847 flags);
848 } else {
849 info.result = VMCI_ERROR_UNAVAILABLE;
850 }
851 break;
852
853 case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
854 info.result = vmci_ctx_dbell_create(cid, info.handle);
855 break;
856
857 case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
858 info.result = vmci_ctx_dbell_destroy(cid, info.handle);
859 break;
860
861 default:
862 vmci_ioctl_err("got unknown action (action=%d)\n",
863 info.action);
864 info.result = VMCI_ERROR_INVALID_ARGS;
865 }
866
867 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
868}
869
870static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
871 const char *ioctl_name,
872 void __user *uptr)
873{
874 struct vmci_ctx_notify_recv_info info;
875 struct vmci_handle_arr *db_handle_array;
876 struct vmci_handle_arr *qp_handle_array;
877 void __user *ubuf;
878 u32 cid;
879 int retval = 0;
880
881 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
882 vmci_ioctl_err("only valid for contexts\n");
883 return -EINVAL;
884 }
885
886 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
887 vmci_ioctl_err("not supported for the current vmx version\n");
888 return -EINVAL;
889 }
890
891 if (copy_from_user(&info, uptr, sizeof(info)))
892 return -EFAULT;
893
894 if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
895 (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
896 return -EINVAL;
897 }
898
899 cid = vmci_ctx_get_id(vmci_host_dev->context);
900
901 info.result = vmci_ctx_rcv_notifications_get(cid,
902 &db_handle_array, &qp_handle_array);
903 if (info.result != VMCI_SUCCESS)
904 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
905
906 ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
907 info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
908 db_handle_array, &retval);
909 if (info.result == VMCI_SUCCESS && !retval) {
910 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
911 info.result = drv_cp_harray_to_user(ubuf,
912 &info.qp_handle_buf_size,
913 qp_handle_array, &retval);
914 }
915
916 if (!retval && copy_to_user(uptr, &info, sizeof(info)))
917 retval = -EFAULT;
918
919 vmci_ctx_rcv_notifications_release(cid,
920 db_handle_array, qp_handle_array,
921 info.result == VMCI_SUCCESS && !retval);
922
923 return retval;
924}
925
926static long vmci_host_unlocked_ioctl(struct file *filp,
927 unsigned int iocmd, unsigned long ioarg)
928{
929#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
930 char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \
931 return vmci_host_do_ ## ioctl_fn( \
932 vmci_host_dev, name, uptr); \
933 } while (0)
934
935 struct vmci_host_dev *vmci_host_dev = filp->private_data;
936 void __user *uptr = (void __user *)ioarg;
937
938 switch (iocmd) {
939 case IOCTL_VMCI_INIT_CONTEXT:
940 VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
941 case IOCTL_VMCI_DATAGRAM_SEND:
942 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
943 case IOCTL_VMCI_DATAGRAM_RECEIVE:
944 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
945 case IOCTL_VMCI_QUEUEPAIR_ALLOC:
946 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
947 case IOCTL_VMCI_QUEUEPAIR_SETVA:
948 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
949 case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
950 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
951 case IOCTL_VMCI_QUEUEPAIR_DETACH:
952 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
953 case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
954 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
955 case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
956 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
957 case IOCTL_VMCI_CTX_GET_CPT_STATE:
958 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
959 case IOCTL_VMCI_CTX_SET_CPT_STATE:
960 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
961 case IOCTL_VMCI_GET_CONTEXT_ID:
962 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
963 case IOCTL_VMCI_SET_NOTIFY:
964 VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
965 case IOCTL_VMCI_NOTIFY_RESOURCE:
966 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
967 case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
968 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
969
970 case IOCTL_VMCI_VERSION:
971 case IOCTL_VMCI_VERSION2:
972 return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
973
974 default:
975 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
976 return -EINVAL;
977 }
978
979#undef VMCI_DO_IOCTL
980}
981
982static const struct file_operations vmuser_fops = {
983 .owner = THIS_MODULE,
984 .open = vmci_host_open,
985 .release = vmci_host_close,
986 .poll = vmci_host_poll,
987 .unlocked_ioctl = vmci_host_unlocked_ioctl,
988 .compat_ioctl = vmci_host_unlocked_ioctl,
989};
990
991static struct miscdevice vmci_host_miscdev = {
992 .name = "vmci",
993 .minor = MISC_DYNAMIC_MINOR,
994 .fops = &vmuser_fops,
995};
996
997int __init vmci_host_init(void)
998{
999 int error;
1000
1001 host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
1002 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
1003 -1, VMCI_VERSION, NULL);
1004 if (IS_ERR(host_context)) {
1005 error = PTR_ERR(host_context);
1006 pr_warn("Failed to initialize VMCIContext (error%d)\n",
1007 error);
1008 return error;
1009 }
1010
1011 error = misc_register(&vmci_host_miscdev);
1012 if (error) {
1013 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
1014 vmci_host_miscdev.name,
1015 MISC_MAJOR, vmci_host_miscdev.minor,
1016 error);
1017 pr_warn("Unable to initialize host personality\n");
1018 vmci_ctx_destroy(host_context);
1019 return error;
1020 }
1021
1022 pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
1023 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
1024
1025 vmci_host_device_initialized = true;
1026 return 0;
1027}
1028
1029void __exit vmci_host_exit(void)
1030{
1031 int error;
1032
1033 vmci_host_device_initialized = false;
1034
1035 error = misc_deregister(&vmci_host_miscdev);
1036 if (error)
1037 pr_warn("Error unregistering character device: %d\n", error);
1038
1039 vmci_ctx_destroy(host_context);
1040 vmci_qp_broker_exit();
1041
1042 pr_debug("VMCI host driver module unloaded\n");
1043}
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
new file mode 100644
index 000000000000..d94245dbd765
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -0,0 +1,3425 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/highmem.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/pagemap.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/socket.h>
27#include <linux/wait.h>
28#include <linux/vmalloc.h>
29
30#include "vmci_handle_array.h"
31#include "vmci_queue_pair.h"
32#include "vmci_datagram.h"
33#include "vmci_resource.h"
34#include "vmci_context.h"
35#include "vmci_driver.h"
36#include "vmci_event.h"
37#include "vmci_route.h"
38
39/*
40 * In the following, we will distinguish between two kinds of VMX processes -
41 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
42 * VMCI page files in the VMX and supporting VM to VM communication and the
43 * newer ones that use the guest memory directly. We will in the following
44 * refer to the older VMX versions as old-style VMX'en, and the newer ones as
45 * new-style VMX'en.
46 *
47 * The state transition datagram is as follows (the VMCIQPB_ prefix has been
48 * removed for readability) - see below for more details on the transtions:
49 *
50 * -------------- NEW -------------
51 * | |
52 * \_/ \_/
53 * CREATED_NO_MEM <-----------------> CREATED_MEM
54 * | | |
55 * | o-----------------------o |
56 * | | |
57 * \_/ \_/ \_/
58 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
59 * | | |
60 * | o----------------------o |
61 * | | |
62 * \_/ \_/ \_/
63 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
64 * | |
65 * | |
66 * -------------> gone <-------------
67 *
68 * In more detail. When a VMCI queue pair is first created, it will be in the
69 * VMCIQPB_NEW state. It will then move into one of the following states:
70 *
71 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
72 *
73 * - the created was performed by a host endpoint, in which case there is
74 * no backing memory yet.
75 *
76 * - the create was initiated by an old-style VMX, that uses
77 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
78 * a later point in time. This state can be distinguished from the one
79 * above by the context ID of the creator. A host side is not allowed to
80 * attach until the page store has been set.
81 *
82 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
83 * is created by a VMX using the queue pair device backend that
84 * sets the UVAs of the queue pair immediately and stores the
85 * information for later attachers. At this point, it is ready for
86 * the host side to attach to it.
87 *
88 * Once the queue pair is in one of the created states (with the exception of
89 * the case mentioned for older VMX'en above), it is possible to attach to the
90 * queue pair. Again we have two new states possible:
91 *
92 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
93 * paths:
94 *
95 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
96 * pair, and attaches to a queue pair previously created by the host side.
97 *
98 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
99 * already created by a guest.
100 *
101 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
102 * vmci_qp_broker_set_page_store (see below).
103 *
104 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
105 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
106 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
107 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
108 * will be entered.
109 *
110 * From the attached queue pair, the queue pair can enter the shutdown states
111 * when either side of the queue pair detaches. If the guest side detaches
112 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
113 * the content of the queue pair will no longer be available. If the host
114 * side detaches first, the queue pair will either enter the
115 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
116 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
117 * (e.g., the host detaches while a guest is stunned).
118 *
119 * New-style VMX'en will also unmap guest memory, if the guest is
120 * quiesced, e.g., during a snapshot operation. In that case, the guest
121 * memory will no longer be available, and the queue pair will transition from
122 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
123 * in which case the queue pair will transition from the *_NO_MEM state at that
124 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
125 * since the peer may have either attached or detached in the meantime. The
126 * values are laid out such that ++ on a state will move from a *_NO_MEM to a
127 * *_MEM state, and vice versa.
128 */
129
130/*
131 * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
132 * types are passed around to enqueue and dequeue routines. Note that
133 * often the functions passed are simply wrappers around memcpy
134 * itself.
135 *
136 * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
137 * there's an unused last parameter for the hosted side. In
138 * ESX, that parameter holds a buffer type.
139 */
140typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
141 u64 queue_offset, const void *src,
142 size_t src_offset, size_t size);
143typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
144 const struct vmci_queue *queue,
145 u64 queue_offset, size_t size);
146
147/* The Kernel specific component of the struct vmci_queue structure. */
148struct vmci_queue_kern_if {
149 struct page **page;
150 struct page **header_page;
151 void *va;
152 struct mutex __mutex; /* Protects the queue. */
153 struct mutex *mutex; /* Shared by producer and consumer queues. */
154 bool host;
155 size_t num_pages;
156 bool mapped;
157};
158
159/*
160 * This structure is opaque to the clients.
161 */
162struct vmci_qp {
163 struct vmci_handle handle;
164 struct vmci_queue *produce_q;
165 struct vmci_queue *consume_q;
166 u64 produce_q_size;
167 u64 consume_q_size;
168 u32 peer;
169 u32 flags;
170 u32 priv_flags;
171 bool guest_endpoint;
172 unsigned int blocked;
173 unsigned int generation;
174 wait_queue_head_t event;
175};
176
177enum qp_broker_state {
178 VMCIQPB_NEW,
179 VMCIQPB_CREATED_NO_MEM,
180 VMCIQPB_CREATED_MEM,
181 VMCIQPB_ATTACHED_NO_MEM,
182 VMCIQPB_ATTACHED_MEM,
183 VMCIQPB_SHUTDOWN_NO_MEM,
184 VMCIQPB_SHUTDOWN_MEM,
185 VMCIQPB_GONE
186};
187
188#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
189 _qpb->state == VMCIQPB_ATTACHED_MEM || \
190 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
191
192/*
193 * In the queue pair broker, we always use the guest point of view for
194 * the produce and consume queue values and references, e.g., the
195 * produce queue size stored is the guests produce queue size. The
196 * host endpoint will need to swap these around. The only exception is
197 * the local queue pairs on the host, in which case the host endpoint
198 * that creates the queue pair will have the right orientation, and
199 * the attaching host endpoint will need to swap.
200 */
201struct qp_entry {
202 struct list_head list_item;
203 struct vmci_handle handle;
204 u32 peer;
205 u32 flags;
206 u64 produce_size;
207 u64 consume_size;
208 u32 ref_count;
209};
210
211struct qp_broker_entry {
212 struct vmci_resource resource;
213 struct qp_entry qp;
214 u32 create_id;
215 u32 attach_id;
216 enum qp_broker_state state;
217 bool require_trusted_attach;
218 bool created_by_trusted;
219 bool vmci_page_files; /* Created by VMX using VMCI page files */
220 struct vmci_queue *produce_q;
221 struct vmci_queue *consume_q;
222 struct vmci_queue_header saved_produce_q;
223 struct vmci_queue_header saved_consume_q;
224 vmci_event_release_cb wakeup_cb;
225 void *client_data;
226 void *local_mem; /* Kernel memory for local queue pair */
227};
228
229struct qp_guest_endpoint {
230 struct vmci_resource resource;
231 struct qp_entry qp;
232 u64 num_ppns;
233 void *produce_q;
234 void *consume_q;
235 struct ppn_set ppn_set;
236};
237
238struct qp_list {
239 struct list_head head;
240 struct mutex mutex; /* Protect queue list. */
241};
242
243static struct qp_list qp_broker_list = {
244 .head = LIST_HEAD_INIT(qp_broker_list.head),
245 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
246};
247
248static struct qp_list qp_guest_endpoints = {
249 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
250 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
251};
252
253#define INVALID_VMCI_GUEST_MEM_ID 0
254#define QPE_NUM_PAGES(_QPE) ((u32) \
255 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
256 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
257
258
259/*
260 * Frees kernel VA space for a given queue and its queue header, and
261 * frees physical data pages.
262 */
263static void qp_free_queue(void *q, u64 size)
264{
265 struct vmci_queue *queue = q;
266
267 if (queue) {
268 u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
269
270 if (queue->kernel_if->mapped) {
271 vunmap(queue->kernel_if->va);
272 queue->kernel_if->va = NULL;
273 }
274
275 while (i)
276 __free_page(queue->kernel_if->page[--i]);
277
278 vfree(queue->q_header);
279 }
280}
281
282/*
283 * Allocates kernel VA space of specified size, plus space for the
284 * queue structure/kernel interface and the queue header. Allocates
285 * physical pages for the queue data pages.
286 *
287 * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header)
288 * PAGE m+1: struct vmci_queue
289 * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
290 * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
291 */
292static void *qp_alloc_queue(u64 size, u32 flags)
293{
294 u64 i;
295 struct vmci_queue *queue;
296 struct vmci_queue_header *q_header;
297 const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE);
298 const uint queue_size =
299 PAGE_SIZE +
300 sizeof(*queue) + sizeof(*(queue->kernel_if)) +
301 num_data_pages * sizeof(*(queue->kernel_if->page));
302
303 q_header = vmalloc(queue_size);
304 if (!q_header)
305 return NULL;
306
307 queue = (void *)q_header + PAGE_SIZE;
308 queue->q_header = q_header;
309 queue->saved_header = NULL;
310 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
311 queue->kernel_if->header_page = NULL; /* Unused in guest. */
312 queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
313 queue->kernel_if->host = false;
314 queue->kernel_if->va = NULL;
315 queue->kernel_if->mapped = false;
316
317 for (i = 0; i < num_data_pages; i++) {
318 queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
319 if (!queue->kernel_if->page[i])
320 goto fail;
321 }
322
323 if (vmci_qp_pinned(flags)) {
324 queue->kernel_if->va =
325 vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
326 PAGE_KERNEL);
327 if (!queue->kernel_if->va)
328 goto fail;
329
330 queue->kernel_if->mapped = true;
331 }
332
333 return (void *)queue;
334
335 fail:
336 qp_free_queue(queue, i * PAGE_SIZE);
337 return NULL;
338}
339
340/*
341 * Copies from a given buffer or iovector to a VMCI Queue. Uses
342 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
343 * by traversing the offset -> page translation structure for the queue.
344 * Assumes that offset + size does not wrap around in the queue.
345 */
346static int __qp_memcpy_to_queue(struct vmci_queue *queue,
347 u64 queue_offset,
348 const void *src,
349 size_t size,
350 bool is_iovec)
351{
352 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
353 size_t bytes_copied = 0;
354
355 while (bytes_copied < size) {
356 u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
357 size_t page_offset =
358 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
359 void *va;
360 size_t to_copy;
361
362 if (!kernel_if->mapped)
363 va = kmap(kernel_if->page[page_index]);
364 else
365 va = (void *)((u8 *)kernel_if->va +
366 (page_index * PAGE_SIZE));
367
368 if (size - bytes_copied > PAGE_SIZE - page_offset)
369 /* Enough payload to fill up from this page. */
370 to_copy = PAGE_SIZE - page_offset;
371 else
372 to_copy = size - bytes_copied;
373
374 if (is_iovec) {
375 struct iovec *iov = (struct iovec *)src;
376 int err;
377
378 /* The iovec will track bytes_copied internally. */
379 err = memcpy_fromiovec((u8 *)va + page_offset,
380 iov, to_copy);
381 if (err != 0) {
382 kunmap(kernel_if->page[page_index]);
383 return VMCI_ERROR_INVALID_ARGS;
384 }
385 } else {
386 memcpy((u8 *)va + page_offset,
387 (u8 *)src + bytes_copied, to_copy);
388 }
389
390 bytes_copied += to_copy;
391 if (!kernel_if->mapped)
392 kunmap(kernel_if->page[page_index]);
393 }
394
395 return VMCI_SUCCESS;
396}
397
398/*
399 * Copies to a given buffer or iovector from a VMCI Queue. Uses
400 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
401 * by traversing the offset -> page translation structure for the queue.
402 * Assumes that offset + size does not wrap around in the queue.
403 */
404static int __qp_memcpy_from_queue(void *dest,
405 const struct vmci_queue *queue,
406 u64 queue_offset,
407 size_t size,
408 bool is_iovec)
409{
410 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
411 size_t bytes_copied = 0;
412
413 while (bytes_copied < size) {
414 u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
415 size_t page_offset =
416 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
417 void *va;
418 size_t to_copy;
419
420 if (!kernel_if->mapped)
421 va = kmap(kernel_if->page[page_index]);
422 else
423 va = (void *)((u8 *)kernel_if->va +
424 (page_index * PAGE_SIZE));
425
426 if (size - bytes_copied > PAGE_SIZE - page_offset)
427 /* Enough payload to fill up this page. */
428 to_copy = PAGE_SIZE - page_offset;
429 else
430 to_copy = size - bytes_copied;
431
432 if (is_iovec) {
433 struct iovec *iov = (struct iovec *)dest;
434 int err;
435
436 /* The iovec will track bytes_copied internally. */
437 err = memcpy_toiovec(iov, (u8 *)va + page_offset,
438 to_copy);
439 if (err != 0) {
440 kunmap(kernel_if->page[page_index]);
441 return VMCI_ERROR_INVALID_ARGS;
442 }
443 } else {
444 memcpy((u8 *)dest + bytes_copied,
445 (u8 *)va + page_offset, to_copy);
446 }
447
448 bytes_copied += to_copy;
449 if (!kernel_if->mapped)
450 kunmap(kernel_if->page[page_index]);
451 }
452
453 return VMCI_SUCCESS;
454}
455
456/*
457 * Allocates two list of PPNs --- one for the pages in the produce queue,
458 * and the other for the pages in the consume queue. Intializes the list
459 * of PPNs with the page frame numbers of the KVA for the two queues (and
460 * the queue headers).
461 */
462static int qp_alloc_ppn_set(void *prod_q,
463 u64 num_produce_pages,
464 void *cons_q,
465 u64 num_consume_pages, struct ppn_set *ppn_set)
466{
467 u32 *produce_ppns;
468 u32 *consume_ppns;
469 struct vmci_queue *produce_q = prod_q;
470 struct vmci_queue *consume_q = cons_q;
471 u64 i;
472
473 if (!produce_q || !num_produce_pages || !consume_q ||
474 !num_consume_pages || !ppn_set)
475 return VMCI_ERROR_INVALID_ARGS;
476
477 if (ppn_set->initialized)
478 return VMCI_ERROR_ALREADY_EXISTS;
479
480 produce_ppns =
481 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
482 if (!produce_ppns)
483 return VMCI_ERROR_NO_MEM;
484
485 consume_ppns =
486 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
487 if (!consume_ppns) {
488 kfree(produce_ppns);
489 return VMCI_ERROR_NO_MEM;
490 }
491
492 produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header));
493 for (i = 1; i < num_produce_pages; i++) {
494 unsigned long pfn;
495
496 produce_ppns[i] =
497 page_to_pfn(produce_q->kernel_if->page[i - 1]);
498 pfn = produce_ppns[i];
499
500 /* Fail allocation if PFN isn't supported by hypervisor. */
501 if (sizeof(pfn) > sizeof(*produce_ppns)
502 && pfn != produce_ppns[i])
503 goto ppn_error;
504 }
505
506 consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header));
507 for (i = 1; i < num_consume_pages; i++) {
508 unsigned long pfn;
509
510 consume_ppns[i] =
511 page_to_pfn(consume_q->kernel_if->page[i - 1]);
512 pfn = consume_ppns[i];
513
514 /* Fail allocation if PFN isn't supported by hypervisor. */
515 if (sizeof(pfn) > sizeof(*consume_ppns)
516 && pfn != consume_ppns[i])
517 goto ppn_error;
518 }
519
520 ppn_set->num_produce_pages = num_produce_pages;
521 ppn_set->num_consume_pages = num_consume_pages;
522 ppn_set->produce_ppns = produce_ppns;
523 ppn_set->consume_ppns = consume_ppns;
524 ppn_set->initialized = true;
525 return VMCI_SUCCESS;
526
527 ppn_error:
528 kfree(produce_ppns);
529 kfree(consume_ppns);
530 return VMCI_ERROR_INVALID_ARGS;
531}
532
533/*
534 * Frees the two list of PPNs for a queue pair.
535 */
536static void qp_free_ppn_set(struct ppn_set *ppn_set)
537{
538 if (ppn_set->initialized) {
539 /* Do not call these functions on NULL inputs. */
540 kfree(ppn_set->produce_ppns);
541 kfree(ppn_set->consume_ppns);
542 }
543 memset(ppn_set, 0, sizeof(*ppn_set));
544}
545
546/*
547 * Populates the list of PPNs in the hypercall structure with the PPNS
548 * of the produce queue and the consume queue.
549 */
550static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
551{
552 memcpy(call_buf, ppn_set->produce_ppns,
553 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
554 memcpy(call_buf +
555 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
556 ppn_set->consume_ppns,
557 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
558
559 return VMCI_SUCCESS;
560}
561
562static int qp_memcpy_to_queue(struct vmci_queue *queue,
563 u64 queue_offset,
564 const void *src, size_t src_offset, size_t size)
565{
566 return __qp_memcpy_to_queue(queue, queue_offset,
567 (u8 *)src + src_offset, size, false);
568}
569
570static int qp_memcpy_from_queue(void *dest,
571 size_t dest_offset,
572 const struct vmci_queue *queue,
573 u64 queue_offset, size_t size)
574{
575 return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
576 queue, queue_offset, size, false);
577}
578
579/*
580 * Copies from a given iovec from a VMCI Queue.
581 */
582static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
583 u64 queue_offset,
584 const void *src,
585 size_t src_offset, size_t size)
586{
587
588 /*
589 * We ignore src_offset because src is really a struct iovec * and will
590 * maintain offset internally.
591 */
592 return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
593}
594
595/*
596 * Copies to a given iovec from a VMCI Queue.
597 */
598static int qp_memcpy_from_queue_iov(void *dest,
599 size_t dest_offset,
600 const struct vmci_queue *queue,
601 u64 queue_offset, size_t size)
602{
603 /*
604 * We ignore dest_offset because dest is really a struct iovec * and
605 * will maintain offset internally.
606 */
607 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
608}
609
610/*
611 * Allocates kernel VA space of specified size plus space for the queue
612 * and kernel interface. This is different from the guest queue allocator,
613 * because we do not allocate our own queue header/data pages here but
614 * share those of the guest.
615 */
616static struct vmci_queue *qp_host_alloc_queue(u64 size)
617{
618 struct vmci_queue *queue;
619 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
620 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
621 const size_t queue_page_size =
622 num_pages * sizeof(*queue->kernel_if->page);
623
624 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
625 if (queue) {
626 queue->q_header = NULL;
627 queue->saved_header = NULL;
628 queue->kernel_if =
629 (struct vmci_queue_kern_if *)((u8 *)queue +
630 sizeof(*queue));
631 queue->kernel_if->host = true;
632 queue->kernel_if->mutex = NULL;
633 queue->kernel_if->num_pages = num_pages;
634 queue->kernel_if->header_page =
635 (struct page **)((u8 *)queue + queue_size);
636 queue->kernel_if->page = &queue->kernel_if->header_page[1];
637 queue->kernel_if->va = NULL;
638 queue->kernel_if->mapped = false;
639 }
640
641 return queue;
642}
643
644/*
645 * Frees kernel memory for a given queue (header plus translation
646 * structure).
647 */
648static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
649{
650 kfree(queue);
651}
652
653/*
654 * Initialize the mutex for the pair of queues. This mutex is used to
655 * protect the q_header and the buffer from changing out from under any
656 * users of either queue. Of course, it's only any good if the mutexes
657 * are actually acquired. Queue structure must lie on non-paged memory
658 * or we cannot guarantee access to the mutex.
659 */
660static void qp_init_queue_mutex(struct vmci_queue *produce_q,
661 struct vmci_queue *consume_q)
662{
663 /*
664 * Only the host queue has shared state - the guest queues do not
665 * need to synchronize access using a queue mutex.
666 */
667
668 if (produce_q->kernel_if->host) {
669 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
670 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
671 mutex_init(produce_q->kernel_if->mutex);
672 }
673}
674
675/*
676 * Cleans up the mutex for the pair of queues.
677 */
678static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
679 struct vmci_queue *consume_q)
680{
681 if (produce_q->kernel_if->host) {
682 produce_q->kernel_if->mutex = NULL;
683 consume_q->kernel_if->mutex = NULL;
684 }
685}
686
687/*
688 * Acquire the mutex for the queue. Note that the produce_q and
689 * the consume_q share a mutex. So, only one of the two need to
690 * be passed in to this routine. Either will work just fine.
691 */
692static void qp_acquire_queue_mutex(struct vmci_queue *queue)
693{
694 if (queue->kernel_if->host)
695 mutex_lock(queue->kernel_if->mutex);
696}
697
698/*
699 * Release the mutex for the queue. Note that the produce_q and
700 * the consume_q share a mutex. So, only one of the two need to
701 * be passed in to this routine. Either will work just fine.
702 */
703static void qp_release_queue_mutex(struct vmci_queue *queue)
704{
705 if (queue->kernel_if->host)
706 mutex_unlock(queue->kernel_if->mutex);
707}
708
709/*
710 * Helper function to release pages in the PageStoreAttachInfo
711 * previously obtained using get_user_pages.
712 */
713static void qp_release_pages(struct page **pages,
714 u64 num_pages, bool dirty)
715{
716 int i;
717
718 for (i = 0; i < num_pages; i++) {
719 if (dirty)
720 set_page_dirty(pages[i]);
721
722 page_cache_release(pages[i]);
723 pages[i] = NULL;
724 }
725}
726
727/*
728 * Lock the user pages referenced by the {produce,consume}Buffer
729 * struct into memory and populate the {produce,consume}Pages
730 * arrays in the attach structure with them.
731 */
732static int qp_host_get_user_memory(u64 produce_uva,
733 u64 consume_uva,
734 struct vmci_queue *produce_q,
735 struct vmci_queue *consume_q)
736{
737 int retval;
738 int err = VMCI_SUCCESS;
739
740 down_write(&current->mm->mmap_sem);
741 retval = get_user_pages(current,
742 current->mm,
743 (uintptr_t) produce_uva,
744 produce_q->kernel_if->num_pages,
745 1, 0, produce_q->kernel_if->header_page, NULL);
746 if (retval < produce_q->kernel_if->num_pages) {
747 pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
748 qp_release_pages(produce_q->kernel_if->header_page, retval,
749 false);
750 err = VMCI_ERROR_NO_MEM;
751 goto out;
752 }
753
754 retval = get_user_pages(current,
755 current->mm,
756 (uintptr_t) consume_uva,
757 consume_q->kernel_if->num_pages,
758 1, 0, consume_q->kernel_if->header_page, NULL);
759 if (retval < consume_q->kernel_if->num_pages) {
760 pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
761 qp_release_pages(consume_q->kernel_if->header_page, retval,
762 false);
763 qp_release_pages(produce_q->kernel_if->header_page,
764 produce_q->kernel_if->num_pages, false);
765 err = VMCI_ERROR_NO_MEM;
766 }
767
768 out:
769 up_write(&current->mm->mmap_sem);
770
771 return err;
772}
773
774/*
775 * Registers the specification of the user pages used for backing a queue
776 * pair. Enough information to map in pages is stored in the OS specific
777 * part of the struct vmci_queue structure.
778 */
779static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
780 struct vmci_queue *produce_q,
781 struct vmci_queue *consume_q)
782{
783 u64 produce_uva;
784 u64 consume_uva;
785
786 /*
787 * The new style and the old style mapping only differs in
788 * that we either get a single or two UVAs, so we split the
789 * single UVA range at the appropriate spot.
790 */
791 produce_uva = page_store->pages;
792 consume_uva = page_store->pages +
793 produce_q->kernel_if->num_pages * PAGE_SIZE;
794 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
795 consume_q);
796}
797
798/*
799 * Releases and removes the references to user pages stored in the attach
800 * struct. Pages are released from the page cache and may become
801 * swappable again.
802 */
803static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
804 struct vmci_queue *consume_q)
805{
806 qp_release_pages(produce_q->kernel_if->header_page,
807 produce_q->kernel_if->num_pages, true);
808 memset(produce_q->kernel_if->header_page, 0,
809 sizeof(*produce_q->kernel_if->header_page) *
810 produce_q->kernel_if->num_pages);
811 qp_release_pages(consume_q->kernel_if->header_page,
812 consume_q->kernel_if->num_pages, true);
813 memset(consume_q->kernel_if->header_page, 0,
814 sizeof(*consume_q->kernel_if->header_page) *
815 consume_q->kernel_if->num_pages);
816}
817
818/*
819 * Once qp_host_register_user_memory has been performed on a
820 * queue, the queue pair headers can be mapped into the
821 * kernel. Once mapped, they must be unmapped with
822 * qp_host_unmap_queues prior to calling
823 * qp_host_unregister_user_memory.
824 * Pages are pinned.
825 */
826static int qp_host_map_queues(struct vmci_queue *produce_q,
827 struct vmci_queue *consume_q)
828{
829 int result;
830
831 if (!produce_q->q_header || !consume_q->q_header) {
832 struct page *headers[2];
833
834 if (produce_q->q_header != consume_q->q_header)
835 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
836
837 if (produce_q->kernel_if->header_page == NULL ||
838 *produce_q->kernel_if->header_page == NULL)
839 return VMCI_ERROR_UNAVAILABLE;
840
841 headers[0] = *produce_q->kernel_if->header_page;
842 headers[1] = *consume_q->kernel_if->header_page;
843
844 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
845 if (produce_q->q_header != NULL) {
846 consume_q->q_header =
847 (struct vmci_queue_header *)((u8 *)
848 produce_q->q_header +
849 PAGE_SIZE);
850 result = VMCI_SUCCESS;
851 } else {
852 pr_warn("vmap failed\n");
853 result = VMCI_ERROR_NO_MEM;
854 }
855 } else {
856 result = VMCI_SUCCESS;
857 }
858
859 return result;
860}
861
862/*
863 * Unmaps previously mapped queue pair headers from the kernel.
864 * Pages are unpinned.
865 */
866static int qp_host_unmap_queues(u32 gid,
867 struct vmci_queue *produce_q,
868 struct vmci_queue *consume_q)
869{
870 if (produce_q->q_header) {
871 if (produce_q->q_header < consume_q->q_header)
872 vunmap(produce_q->q_header);
873 else
874 vunmap(consume_q->q_header);
875
876 produce_q->q_header = NULL;
877 consume_q->q_header = NULL;
878 }
879
880 return VMCI_SUCCESS;
881}
882
883/*
884 * Finds the entry in the list corresponding to a given handle. Assumes
885 * that the list is locked.
886 */
887static struct qp_entry *qp_list_find(struct qp_list *qp_list,
888 struct vmci_handle handle)
889{
890 struct qp_entry *entry;
891
892 if (vmci_handle_is_invalid(handle))
893 return NULL;
894
895 list_for_each_entry(entry, &qp_list->head, list_item) {
896 if (vmci_handle_is_equal(entry->handle, handle))
897 return entry;
898 }
899
900 return NULL;
901}
902
903/*
904 * Finds the entry in the list corresponding to a given handle.
905 */
906static struct qp_guest_endpoint *
907qp_guest_handle_to_entry(struct vmci_handle handle)
908{
909 struct qp_guest_endpoint *entry;
910 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
911
912 entry = qp ? container_of(
913 qp, struct qp_guest_endpoint, qp) : NULL;
914 return entry;
915}
916
917/*
918 * Finds the entry in the list corresponding to a given handle.
919 */
920static struct qp_broker_entry *
921qp_broker_handle_to_entry(struct vmci_handle handle)
922{
923 struct qp_broker_entry *entry;
924 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
925
926 entry = qp ? container_of(
927 qp, struct qp_broker_entry, qp) : NULL;
928 return entry;
929}
930
931/*
932 * Dispatches a queue pair event message directly into the local event
933 * queue.
934 */
935static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
936{
937 u32 context_id = vmci_get_context_id();
938 struct vmci_event_qp ev;
939
940 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
941 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
942 VMCI_CONTEXT_RESOURCE_ID);
943 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
944 ev.msg.event_data.event =
945 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
946 ev.payload.peer_id = context_id;
947 ev.payload.handle = handle;
948
949 return vmci_event_dispatch(&ev.msg.hdr);
950}
951
952/*
953 * Allocates and initializes a qp_guest_endpoint structure.
954 * Allocates a queue_pair rid (and handle) iff the given entry has
955 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
956 * are reserved handles. Assumes that the QP list mutex is held
957 * by the caller.
958 */
959static struct qp_guest_endpoint *
960qp_guest_endpoint_create(struct vmci_handle handle,
961 u32 peer,
962 u32 flags,
963 u64 produce_size,
964 u64 consume_size,
965 void *produce_q,
966 void *consume_q)
967{
968 int result;
969 struct qp_guest_endpoint *entry;
970 /* One page each for the queue headers. */
971 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
972 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
973
974 if (vmci_handle_is_invalid(handle)) {
975 u32 context_id = vmci_get_context_id();
976
977 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
978 }
979
980 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
981 if (entry) {
982 entry->qp.peer = peer;
983 entry->qp.flags = flags;
984 entry->qp.produce_size = produce_size;
985 entry->qp.consume_size = consume_size;
986 entry->qp.ref_count = 0;
987 entry->num_ppns = num_ppns;
988 entry->produce_q = produce_q;
989 entry->consume_q = consume_q;
990 INIT_LIST_HEAD(&entry->qp.list_item);
991
992 /* Add resource obj */
993 result = vmci_resource_add(&entry->resource,
994 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
995 handle);
996 entry->qp.handle = vmci_resource_handle(&entry->resource);
997 if ((result != VMCI_SUCCESS) ||
998 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
999 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1000 handle.context, handle.resource, result);
1001 kfree(entry);
1002 entry = NULL;
1003 }
1004 }
1005 return entry;
1006}
1007
1008/*
1009 * Frees a qp_guest_endpoint structure.
1010 */
1011static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
1012{
1013 qp_free_ppn_set(&entry->ppn_set);
1014 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
1015 qp_free_queue(entry->produce_q, entry->qp.produce_size);
1016 qp_free_queue(entry->consume_q, entry->qp.consume_size);
1017 /* Unlink from resource hash table and free callback */
1018 vmci_resource_remove(&entry->resource);
1019
1020 kfree(entry);
1021}
1022
1023/*
1024 * Helper to make a queue_pairAlloc hypercall when the driver is
1025 * supporting a guest device.
1026 */
1027static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
1028{
1029 struct vmci_qp_alloc_msg *alloc_msg;
1030 size_t msg_size;
1031 int result;
1032
1033 if (!entry || entry->num_ppns <= 2)
1034 return VMCI_ERROR_INVALID_ARGS;
1035
1036 msg_size = sizeof(*alloc_msg) +
1037 (size_t) entry->num_ppns * sizeof(u32);
1038 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
1039 if (!alloc_msg)
1040 return VMCI_ERROR_NO_MEM;
1041
1042 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1043 VMCI_QUEUEPAIR_ALLOC);
1044 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
1045 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
1046 alloc_msg->handle = entry->qp.handle;
1047 alloc_msg->peer = entry->qp.peer;
1048 alloc_msg->flags = entry->qp.flags;
1049 alloc_msg->produce_size = entry->qp.produce_size;
1050 alloc_msg->consume_size = entry->qp.consume_size;
1051 alloc_msg->num_ppns = entry->num_ppns;
1052
1053 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
1054 &entry->ppn_set);
1055 if (result == VMCI_SUCCESS)
1056 result = vmci_send_datagram(&alloc_msg->hdr);
1057
1058 kfree(alloc_msg);
1059
1060 return result;
1061}
1062
1063/*
1064 * Helper to make a queue_pairDetach hypercall when the driver is
1065 * supporting a guest device.
1066 */
1067static int qp_detatch_hypercall(struct vmci_handle handle)
1068{
1069 struct vmci_qp_detach_msg detach_msg;
1070
1071 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1072 VMCI_QUEUEPAIR_DETACH);
1073 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
1074 detach_msg.hdr.payload_size = sizeof(handle);
1075 detach_msg.handle = handle;
1076
1077 return vmci_send_datagram(&detach_msg.hdr);
1078}
1079
1080/*
1081 * Adds the given entry to the list. Assumes that the list is locked.
1082 */
1083static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1084{
1085 if (entry)
1086 list_add(&entry->list_item, &qp_list->head);
1087}
1088
1089/*
1090 * Removes the given entry from the list. Assumes that the list is locked.
1091 */
1092static void qp_list_remove_entry(struct qp_list *qp_list,
1093 struct qp_entry *entry)
1094{
1095 if (entry)
1096 list_del(&entry->list_item);
1097}
1098
1099/*
1100 * Helper for VMCI queue_pair detach interface. Frees the physical
1101 * pages for the queue pair.
1102 */
1103static int qp_detatch_guest_work(struct vmci_handle handle)
1104{
1105 int result;
1106 struct qp_guest_endpoint *entry;
1107 u32 ref_count = ~0; /* To avoid compiler warning below */
1108
1109 mutex_lock(&qp_guest_endpoints.mutex);
1110
1111 entry = qp_guest_handle_to_entry(handle);
1112 if (!entry) {
1113 mutex_unlock(&qp_guest_endpoints.mutex);
1114 return VMCI_ERROR_NOT_FOUND;
1115 }
1116
1117 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1118 result = VMCI_SUCCESS;
1119
1120 if (entry->qp.ref_count > 1) {
1121 result = qp_notify_peer_local(false, handle);
1122 /*
1123 * We can fail to notify a local queuepair
1124 * because we can't allocate. We still want
1125 * to release the entry if that happens, so
1126 * don't bail out yet.
1127 */
1128 }
1129 } else {
1130 result = qp_detatch_hypercall(handle);
1131 if (result < VMCI_SUCCESS) {
1132 /*
1133 * We failed to notify a non-local queuepair.
1134 * That other queuepair might still be
1135 * accessing the shared memory, so don't
1136 * release the entry yet. It will get cleaned
1137 * up by VMCIqueue_pair_Exit() if necessary
1138 * (assuming we are going away, otherwise why
1139 * did this fail?).
1140 */
1141
1142 mutex_unlock(&qp_guest_endpoints.mutex);
1143 return result;
1144 }
1145 }
1146
1147 /*
1148 * If we get here then we either failed to notify a local queuepair, or
1149 * we succeeded in all cases. Release the entry if required.
1150 */
1151
1152 entry->qp.ref_count--;
1153 if (entry->qp.ref_count == 0)
1154 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1155
1156 /* If we didn't remove the entry, this could change once we unlock. */
1157 if (entry)
1158 ref_count = entry->qp.ref_count;
1159
1160 mutex_unlock(&qp_guest_endpoints.mutex);
1161
1162 if (ref_count == 0)
1163 qp_guest_endpoint_destroy(entry);
1164
1165 return result;
1166}
1167
1168/*
1169 * This functions handles the actual allocation of a VMCI queue
1170 * pair guest endpoint. Allocates physical pages for the queue
1171 * pair. It makes OS dependent calls through generic wrappers.
1172 */
1173static int qp_alloc_guest_work(struct vmci_handle *handle,
1174 struct vmci_queue **produce_q,
1175 u64 produce_size,
1176 struct vmci_queue **consume_q,
1177 u64 consume_size,
1178 u32 peer,
1179 u32 flags,
1180 u32 priv_flags)
1181{
1182 const u64 num_produce_pages =
1183 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1184 const u64 num_consume_pages =
1185 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1186 void *my_produce_q = NULL;
1187 void *my_consume_q = NULL;
1188 int result;
1189 struct qp_guest_endpoint *queue_pair_entry = NULL;
1190
1191 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1192 return VMCI_ERROR_NO_ACCESS;
1193
1194 mutex_lock(&qp_guest_endpoints.mutex);
1195
1196 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1197 if (queue_pair_entry) {
1198 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1199 /* Local attach case. */
1200 if (queue_pair_entry->qp.ref_count > 1) {
1201 pr_devel("Error attempting to attach more than once\n");
1202 result = VMCI_ERROR_UNAVAILABLE;
1203 goto error_keep_entry;
1204 }
1205
1206 if (queue_pair_entry->qp.produce_size != consume_size ||
1207 queue_pair_entry->qp.consume_size !=
1208 produce_size ||
1209 queue_pair_entry->qp.flags !=
1210 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1211 pr_devel("Error mismatched queue pair in local attach\n");
1212 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1213 goto error_keep_entry;
1214 }
1215
1216 /*
1217 * Do a local attach. We swap the consume and
1218 * produce queues for the attacher and deliver
1219 * an attach event.
1220 */
1221 result = qp_notify_peer_local(true, *handle);
1222 if (result < VMCI_SUCCESS)
1223 goto error_keep_entry;
1224
1225 my_produce_q = queue_pair_entry->consume_q;
1226 my_consume_q = queue_pair_entry->produce_q;
1227 goto out;
1228 }
1229
1230 result = VMCI_ERROR_ALREADY_EXISTS;
1231 goto error_keep_entry;
1232 }
1233
1234 my_produce_q = qp_alloc_queue(produce_size, flags);
1235 if (!my_produce_q) {
1236 pr_warn("Error allocating pages for produce queue\n");
1237 result = VMCI_ERROR_NO_MEM;
1238 goto error;
1239 }
1240
1241 my_consume_q = qp_alloc_queue(consume_size, flags);
1242 if (!my_consume_q) {
1243 pr_warn("Error allocating pages for consume queue\n");
1244 result = VMCI_ERROR_NO_MEM;
1245 goto error;
1246 }
1247
1248 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1249 produce_size, consume_size,
1250 my_produce_q, my_consume_q);
1251 if (!queue_pair_entry) {
1252 pr_warn("Error allocating memory in %s\n", __func__);
1253 result = VMCI_ERROR_NO_MEM;
1254 goto error;
1255 }
1256
1257 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1258 num_consume_pages,
1259 &queue_pair_entry->ppn_set);
1260 if (result < VMCI_SUCCESS) {
1261 pr_warn("qp_alloc_ppn_set failed\n");
1262 goto error;
1263 }
1264
1265 /*
1266 * It's only necessary to notify the host if this queue pair will be
1267 * attached to from another context.
1268 */
1269 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1270 /* Local create case. */
1271 u32 context_id = vmci_get_context_id();
1272
1273 /*
1274 * Enforce similar checks on local queue pairs as we
1275 * do for regular ones. The handle's context must
1276 * match the creator or attacher context id (here they
1277 * are both the current context id) and the
1278 * attach-only flag cannot exist during create. We
1279 * also ensure specified peer is this context or an
1280 * invalid one.
1281 */
1282 if (queue_pair_entry->qp.handle.context != context_id ||
1283 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1284 queue_pair_entry->qp.peer != context_id)) {
1285 result = VMCI_ERROR_NO_ACCESS;
1286 goto error;
1287 }
1288
1289 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1290 result = VMCI_ERROR_NOT_FOUND;
1291 goto error;
1292 }
1293 } else {
1294 result = qp_alloc_hypercall(queue_pair_entry);
1295 if (result < VMCI_SUCCESS) {
1296 pr_warn("qp_alloc_hypercall result = %d\n", result);
1297 goto error;
1298 }
1299 }
1300
1301 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1302 (struct vmci_queue *)my_consume_q);
1303
1304 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1305
1306 out:
1307 queue_pair_entry->qp.ref_count++;
1308 *handle = queue_pair_entry->qp.handle;
1309 *produce_q = (struct vmci_queue *)my_produce_q;
1310 *consume_q = (struct vmci_queue *)my_consume_q;
1311
1312 /*
1313 * We should initialize the queue pair header pages on a local
1314 * queue pair create. For non-local queue pairs, the
1315 * hypervisor initializes the header pages in the create step.
1316 */
1317 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1318 queue_pair_entry->qp.ref_count == 1) {
1319 vmci_q_header_init((*produce_q)->q_header, *handle);
1320 vmci_q_header_init((*consume_q)->q_header, *handle);
1321 }
1322
1323 mutex_unlock(&qp_guest_endpoints.mutex);
1324
1325 return VMCI_SUCCESS;
1326
1327 error:
1328 mutex_unlock(&qp_guest_endpoints.mutex);
1329 if (queue_pair_entry) {
1330 /* The queues will be freed inside the destroy routine. */
1331 qp_guest_endpoint_destroy(queue_pair_entry);
1332 } else {
1333 qp_free_queue(my_produce_q, produce_size);
1334 qp_free_queue(my_consume_q, consume_size);
1335 }
1336 return result;
1337
1338 error_keep_entry:
1339 /* This path should only be used when an existing entry was found. */
1340 mutex_unlock(&qp_guest_endpoints.mutex);
1341 return result;
1342}
1343
1344/*
1345 * The first endpoint issuing a queue pair allocation will create the state
1346 * of the queue pair in the queue pair broker.
1347 *
1348 * If the creator is a guest, it will associate a VMX virtual address range
1349 * with the queue pair as specified by the page_store. For compatibility with
1350 * older VMX'en, that would use a separate step to set the VMX virtual
1351 * address range, the virtual address range can be registered later using
1352 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
1353 * used.
1354 *
1355 * If the creator is the host, a page_store of NULL should be used as well,
1356 * since the host is not able to supply a page store for the queue pair.
1357 *
1358 * For older VMX and host callers, the queue pair will be created in the
1359 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
1360 * created in VMCOQPB_CREATED_MEM state.
1361 */
1362static int qp_broker_create(struct vmci_handle handle,
1363 u32 peer,
1364 u32 flags,
1365 u32 priv_flags,
1366 u64 produce_size,
1367 u64 consume_size,
1368 struct vmci_qp_page_store *page_store,
1369 struct vmci_ctx *context,
1370 vmci_event_release_cb wakeup_cb,
1371 void *client_data, struct qp_broker_entry **ent)
1372{
1373 struct qp_broker_entry *entry = NULL;
1374 const u32 context_id = vmci_ctx_get_id(context);
1375 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1376 int result;
1377 u64 guest_produce_size;
1378 u64 guest_consume_size;
1379
1380 /* Do not create if the caller asked not to. */
1381 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1382 return VMCI_ERROR_NOT_FOUND;
1383
1384 /*
1385 * Creator's context ID should match handle's context ID or the creator
1386 * must allow the context in handle's context ID as the "peer".
1387 */
1388 if (handle.context != context_id && handle.context != peer)
1389 return VMCI_ERROR_NO_ACCESS;
1390
1391 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1392 return VMCI_ERROR_DST_UNREACHABLE;
1393
1394 /*
1395 * Creator's context ID for local queue pairs should match the
1396 * peer, if a peer is specified.
1397 */
1398 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1399 return VMCI_ERROR_NO_ACCESS;
1400
1401 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1402 if (!entry)
1403 return VMCI_ERROR_NO_MEM;
1404
1405 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1406 /*
1407 * The queue pair broker entry stores values from the guest
1408 * point of view, so a creating host side endpoint should swap
1409 * produce and consume values -- unless it is a local queue
1410 * pair, in which case no swapping is necessary, since the local
1411 * attacher will swap queues.
1412 */
1413
1414 guest_produce_size = consume_size;
1415 guest_consume_size = produce_size;
1416 } else {
1417 guest_produce_size = produce_size;
1418 guest_consume_size = consume_size;
1419 }
1420
1421 entry->qp.handle = handle;
1422 entry->qp.peer = peer;
1423 entry->qp.flags = flags;
1424 entry->qp.produce_size = guest_produce_size;
1425 entry->qp.consume_size = guest_consume_size;
1426 entry->qp.ref_count = 1;
1427 entry->create_id = context_id;
1428 entry->attach_id = VMCI_INVALID_ID;
1429 entry->state = VMCIQPB_NEW;
1430 entry->require_trusted_attach =
1431 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1432 entry->created_by_trusted =
1433 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1434 entry->vmci_page_files = false;
1435 entry->wakeup_cb = wakeup_cb;
1436 entry->client_data = client_data;
1437 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1438 if (entry->produce_q == NULL) {
1439 result = VMCI_ERROR_NO_MEM;
1440 goto error;
1441 }
1442 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1443 if (entry->consume_q == NULL) {
1444 result = VMCI_ERROR_NO_MEM;
1445 goto error;
1446 }
1447
1448 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1449
1450 INIT_LIST_HEAD(&entry->qp.list_item);
1451
1452 if (is_local) {
1453 u8 *tmp;
1454
1455 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1456 PAGE_SIZE, GFP_KERNEL);
1457 if (entry->local_mem == NULL) {
1458 result = VMCI_ERROR_NO_MEM;
1459 goto error;
1460 }
1461 entry->state = VMCIQPB_CREATED_MEM;
1462 entry->produce_q->q_header = entry->local_mem;
1463 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1464 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1465 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1466 } else if (page_store) {
1467 /*
1468 * The VMX already initialized the queue pair headers, so no
1469 * need for the kernel side to do that.
1470 */
1471 result = qp_host_register_user_memory(page_store,
1472 entry->produce_q,
1473 entry->consume_q);
1474 if (result < VMCI_SUCCESS)
1475 goto error;
1476
1477 entry->state = VMCIQPB_CREATED_MEM;
1478 } else {
1479 /*
1480 * A create without a page_store may be either a host
1481 * side create (in which case we are waiting for the
1482 * guest side to supply the memory) or an old style
1483 * queue pair create (in which case we will expect a
1484 * set page store call as the next step).
1485 */
1486 entry->state = VMCIQPB_CREATED_NO_MEM;
1487 }
1488
1489 qp_list_add_entry(&qp_broker_list, &entry->qp);
1490 if (ent != NULL)
1491 *ent = entry;
1492
1493 /* Add to resource obj */
1494 result = vmci_resource_add(&entry->resource,
1495 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1496 handle);
1497 if (result != VMCI_SUCCESS) {
1498 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1499 handle.context, handle.resource, result);
1500 goto error;
1501 }
1502
1503 entry->qp.handle = vmci_resource_handle(&entry->resource);
1504 if (is_local) {
1505 vmci_q_header_init(entry->produce_q->q_header,
1506 entry->qp.handle);
1507 vmci_q_header_init(entry->consume_q->q_header,
1508 entry->qp.handle);
1509 }
1510
1511 vmci_ctx_qp_create(context, entry->qp.handle);
1512
1513 return VMCI_SUCCESS;
1514
1515 error:
1516 if (entry != NULL) {
1517 qp_host_free_queue(entry->produce_q, guest_produce_size);
1518 qp_host_free_queue(entry->consume_q, guest_consume_size);
1519 kfree(entry);
1520 }
1521
1522 return result;
1523}
1524
1525/*
1526 * Enqueues an event datagram to notify the peer VM attached to
1527 * the given queue pair handle about attach/detach event by the
1528 * given VM. Returns Payload size of datagram enqueued on
1529 * success, error code otherwise.
1530 */
1531static int qp_notify_peer(bool attach,
1532 struct vmci_handle handle,
1533 u32 my_id,
1534 u32 peer_id)
1535{
1536 int rv;
1537 struct vmci_event_qp ev;
1538
1539 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1540 peer_id == VMCI_INVALID_ID)
1541 return VMCI_ERROR_INVALID_ARGS;
1542
1543 /*
1544 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
1545 * number of pending events from the hypervisor to a given VM
1546 * otherwise a rogue VM could do an arbitrary number of attach
1547 * and detach operations causing memory pressure in the host
1548 * kernel.
1549 */
1550
1551 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1552 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1553 VMCI_CONTEXT_RESOURCE_ID);
1554 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1555 ev.msg.event_data.event = attach ?
1556 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1557 ev.payload.handle = handle;
1558 ev.payload.peer_id = my_id;
1559
1560 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1561 &ev.msg.hdr, false);
1562 if (rv < VMCI_SUCCESS)
1563 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1564 attach ? "ATTACH" : "DETACH", peer_id);
1565
1566 return rv;
1567}
1568
1569/*
1570 * The second endpoint issuing a queue pair allocation will attach to
1571 * the queue pair registered with the queue pair broker.
1572 *
1573 * If the attacher is a guest, it will associate a VMX virtual address
1574 * range with the queue pair as specified by the page_store. At this
1575 * point, the already attach host endpoint may start using the queue
1576 * pair, and an attach event is sent to it. For compatibility with
1577 * older VMX'en, that used a separate step to set the VMX virtual
1578 * address range, the virtual address range can be registered later
1579 * using vmci_qp_broker_set_page_store. In that case, a page_store of
1580 * NULL should be used, and the attach event will be generated once
1581 * the actual page store has been set.
1582 *
1583 * If the attacher is the host, a page_store of NULL should be used as
1584 * well, since the page store information is already set by the guest.
1585 *
1586 * For new VMX and host callers, the queue pair will be moved to the
1587 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
1588 * moved to the VMCOQPB_ATTACHED_NO_MEM state.
1589 */
1590static int qp_broker_attach(struct qp_broker_entry *entry,
1591 u32 peer,
1592 u32 flags,
1593 u32 priv_flags,
1594 u64 produce_size,
1595 u64 consume_size,
1596 struct vmci_qp_page_store *page_store,
1597 struct vmci_ctx *context,
1598 vmci_event_release_cb wakeup_cb,
1599 void *client_data,
1600 struct qp_broker_entry **ent)
1601{
1602 const u32 context_id = vmci_ctx_get_id(context);
1603 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1604 int result;
1605
1606 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1607 entry->state != VMCIQPB_CREATED_MEM)
1608 return VMCI_ERROR_UNAVAILABLE;
1609
1610 if (is_local) {
1611 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1612 context_id != entry->create_id) {
1613 return VMCI_ERROR_INVALID_ARGS;
1614 }
1615 } else if (context_id == entry->create_id ||
1616 context_id == entry->attach_id) {
1617 return VMCI_ERROR_ALREADY_EXISTS;
1618 }
1619
1620 if (VMCI_CONTEXT_IS_VM(context_id) &&
1621 VMCI_CONTEXT_IS_VM(entry->create_id))
1622 return VMCI_ERROR_DST_UNREACHABLE;
1623
1624 /*
1625 * If we are attaching from a restricted context then the queuepair
1626 * must have been created by a trusted endpoint.
1627 */
1628 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1629 !entry->created_by_trusted)
1630 return VMCI_ERROR_NO_ACCESS;
1631
1632 /*
1633 * If we are attaching to a queuepair that was created by a restricted
1634 * context then we must be trusted.
1635 */
1636 if (entry->require_trusted_attach &&
1637 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1638 return VMCI_ERROR_NO_ACCESS;
1639
1640 /*
1641 * If the creator specifies VMCI_INVALID_ID in "peer" field, access
1642 * control check is not performed.
1643 */
1644 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1645 return VMCI_ERROR_NO_ACCESS;
1646
1647 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1648 /*
1649 * Do not attach if the caller doesn't support Host Queue Pairs
1650 * and a host created this queue pair.
1651 */
1652
1653 if (!vmci_ctx_supports_host_qp(context))
1654 return VMCI_ERROR_INVALID_RESOURCE;
1655
1656 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1657 struct vmci_ctx *create_context;
1658 bool supports_host_qp;
1659
1660 /*
1661 * Do not attach a host to a user created queue pair if that
1662 * user doesn't support host queue pair end points.
1663 */
1664
1665 create_context = vmci_ctx_get(entry->create_id);
1666 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1667 vmci_ctx_put(create_context);
1668
1669 if (!supports_host_qp)
1670 return VMCI_ERROR_INVALID_RESOURCE;
1671 }
1672
1673 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1674 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1675
1676 if (context_id != VMCI_HOST_CONTEXT_ID) {
1677 /*
1678 * The queue pair broker entry stores values from the guest
1679 * point of view, so an attaching guest should match the values
1680 * stored in the entry.
1681 */
1682
1683 if (entry->qp.produce_size != produce_size ||
1684 entry->qp.consume_size != consume_size) {
1685 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1686 }
1687 } else if (entry->qp.produce_size != consume_size ||
1688 entry->qp.consume_size != produce_size) {
1689 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1690 }
1691
1692 if (context_id != VMCI_HOST_CONTEXT_ID) {
1693 /*
1694 * If a guest attached to a queue pair, it will supply
1695 * the backing memory. If this is a pre NOVMVM vmx,
1696 * the backing memory will be supplied by calling
1697 * vmci_qp_broker_set_page_store() following the
1698 * return of the vmci_qp_broker_alloc() call. If it is
1699 * a vmx of version NOVMVM or later, the page store
1700 * must be supplied as part of the
1701 * vmci_qp_broker_alloc call. Under all circumstances
1702 * must the initially created queue pair not have any
1703 * memory associated with it already.
1704 */
1705
1706 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1707 return VMCI_ERROR_INVALID_ARGS;
1708
1709 if (page_store != NULL) {
1710 /*
1711 * Patch up host state to point to guest
1712 * supplied memory. The VMX already
1713 * initialized the queue pair headers, so no
1714 * need for the kernel side to do that.
1715 */
1716
1717 result = qp_host_register_user_memory(page_store,
1718 entry->produce_q,
1719 entry->consume_q);
1720 if (result < VMCI_SUCCESS)
1721 return result;
1722
1723 /*
1724 * Preemptively load in the headers if non-blocking to
1725 * prevent blocking later.
1726 */
1727 if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
1728 result = qp_host_map_queues(entry->produce_q,
1729 entry->consume_q);
1730 if (result < VMCI_SUCCESS) {
1731 qp_host_unregister_user_memory(
1732 entry->produce_q,
1733 entry->consume_q);
1734 return result;
1735 }
1736 }
1737
1738 entry->state = VMCIQPB_ATTACHED_MEM;
1739 } else {
1740 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1741 }
1742 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1743 /*
1744 * The host side is attempting to attach to a queue
1745 * pair that doesn't have any memory associated with
1746 * it. This must be a pre NOVMVM vmx that hasn't set
1747 * the page store information yet, or a quiesced VM.
1748 */
1749
1750 return VMCI_ERROR_UNAVAILABLE;
1751 } else {
1752 /*
1753 * For non-blocking queue pairs, we cannot rely on
1754 * enqueue/dequeue to map in the pages on the
1755 * host-side, since it may block, so we make an
1756 * attempt here.
1757 */
1758
1759 if (flags & VMCI_QPFLAG_NONBLOCK) {
1760 result =
1761 qp_host_map_queues(entry->produce_q,
1762 entry->consume_q);
1763 if (result < VMCI_SUCCESS)
1764 return result;
1765
1766 entry->qp.flags |= flags &
1767 (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
1768 }
1769
1770 /* The host side has successfully attached to a queue pair. */
1771 entry->state = VMCIQPB_ATTACHED_MEM;
1772 }
1773
1774 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1775 result =
1776 qp_notify_peer(true, entry->qp.handle, context_id,
1777 entry->create_id);
1778 if (result < VMCI_SUCCESS)
1779 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1780 entry->create_id, entry->qp.handle.context,
1781 entry->qp.handle.resource);
1782 }
1783
1784 entry->attach_id = context_id;
1785 entry->qp.ref_count++;
1786 if (wakeup_cb) {
1787 entry->wakeup_cb = wakeup_cb;
1788 entry->client_data = client_data;
1789 }
1790
1791 /*
1792 * When attaching to local queue pairs, the context already has
1793 * an entry tracking the queue pair, so don't add another one.
1794 */
1795 if (!is_local)
1796 vmci_ctx_qp_create(context, entry->qp.handle);
1797
1798 if (ent != NULL)
1799 *ent = entry;
1800
1801 return VMCI_SUCCESS;
1802}
1803
1804/*
1805 * queue_pair_Alloc for use when setting up queue pair endpoints
1806 * on the host.
1807 */
1808static int qp_broker_alloc(struct vmci_handle handle,
1809 u32 peer,
1810 u32 flags,
1811 u32 priv_flags,
1812 u64 produce_size,
1813 u64 consume_size,
1814 struct vmci_qp_page_store *page_store,
1815 struct vmci_ctx *context,
1816 vmci_event_release_cb wakeup_cb,
1817 void *client_data,
1818 struct qp_broker_entry **ent,
1819 bool *swap)
1820{
1821 const u32 context_id = vmci_ctx_get_id(context);
1822 bool create;
1823 struct qp_broker_entry *entry = NULL;
1824 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1825 int result;
1826
1827 if (vmci_handle_is_invalid(handle) ||
1828 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1829 !(produce_size || consume_size) ||
1830 !context || context_id == VMCI_INVALID_ID ||
1831 handle.context == VMCI_INVALID_ID) {
1832 return VMCI_ERROR_INVALID_ARGS;
1833 }
1834
1835 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1836 return VMCI_ERROR_INVALID_ARGS;
1837
1838 /*
1839 * In the initial argument check, we ensure that non-vmkernel hosts
1840 * are not allowed to create local queue pairs.
1841 */
1842
1843 mutex_lock(&qp_broker_list.mutex);
1844
1845 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1846 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1847 context_id, handle.context, handle.resource);
1848 mutex_unlock(&qp_broker_list.mutex);
1849 return VMCI_ERROR_ALREADY_EXISTS;
1850 }
1851
1852 if (handle.resource != VMCI_INVALID_ID)
1853 entry = qp_broker_handle_to_entry(handle);
1854
1855 if (!entry) {
1856 create = true;
1857 result =
1858 qp_broker_create(handle, peer, flags, priv_flags,
1859 produce_size, consume_size, page_store,
1860 context, wakeup_cb, client_data, ent);
1861 } else {
1862 create = false;
1863 result =
1864 qp_broker_attach(entry, peer, flags, priv_flags,
1865 produce_size, consume_size, page_store,
1866 context, wakeup_cb, client_data, ent);
1867 }
1868
1869 mutex_unlock(&qp_broker_list.mutex);
1870
1871 if (swap)
1872 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1873 !(create && is_local);
1874
1875 return result;
1876}
1877
1878/*
1879 * This function implements the kernel API for allocating a queue
1880 * pair.
1881 */
1882static int qp_alloc_host_work(struct vmci_handle *handle,
1883 struct vmci_queue **produce_q,
1884 u64 produce_size,
1885 struct vmci_queue **consume_q,
1886 u64 consume_size,
1887 u32 peer,
1888 u32 flags,
1889 u32 priv_flags,
1890 vmci_event_release_cb wakeup_cb,
1891 void *client_data)
1892{
1893 struct vmci_handle new_handle;
1894 struct vmci_ctx *context;
1895 struct qp_broker_entry *entry;
1896 int result;
1897 bool swap;
1898
1899 if (vmci_handle_is_invalid(*handle)) {
1900 new_handle = vmci_make_handle(
1901 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1902 } else
1903 new_handle = *handle;
1904
1905 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1906 entry = NULL;
1907 result =
1908 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1909 produce_size, consume_size, NULL, context,
1910 wakeup_cb, client_data, &entry, &swap);
1911 if (result == VMCI_SUCCESS) {
1912 if (swap) {
1913 /*
1914 * If this is a local queue pair, the attacher
1915 * will swap around produce and consume
1916 * queues.
1917 */
1918
1919 *produce_q = entry->consume_q;
1920 *consume_q = entry->produce_q;
1921 } else {
1922 *produce_q = entry->produce_q;
1923 *consume_q = entry->consume_q;
1924 }
1925
1926 *handle = vmci_resource_handle(&entry->resource);
1927 } else {
1928 *handle = VMCI_INVALID_HANDLE;
1929 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1930 result);
1931 }
1932 vmci_ctx_put(context);
1933 return result;
1934}
1935
1936/*
1937 * Allocates a VMCI queue_pair. Only checks validity of input
1938 * arguments. The real work is done in the host or guest
1939 * specific function.
1940 */
1941int vmci_qp_alloc(struct vmci_handle *handle,
1942 struct vmci_queue **produce_q,
1943 u64 produce_size,
1944 struct vmci_queue **consume_q,
1945 u64 consume_size,
1946 u32 peer,
1947 u32 flags,
1948 u32 priv_flags,
1949 bool guest_endpoint,
1950 vmci_event_release_cb wakeup_cb,
1951 void *client_data)
1952{
1953 if (!handle || !produce_q || !consume_q ||
1954 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1955 return VMCI_ERROR_INVALID_ARGS;
1956
1957 if (guest_endpoint) {
1958 return qp_alloc_guest_work(handle, produce_q,
1959 produce_size, consume_q,
1960 consume_size, peer,
1961 flags, priv_flags);
1962 } else {
1963 return qp_alloc_host_work(handle, produce_q,
1964 produce_size, consume_q,
1965 consume_size, peer, flags,
1966 priv_flags, wakeup_cb, client_data);
1967 }
1968}
1969
1970/*
1971 * This function implements the host kernel API for detaching from
1972 * a queue pair.
1973 */
1974static int qp_detatch_host_work(struct vmci_handle handle)
1975{
1976 int result;
1977 struct vmci_ctx *context;
1978
1979 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1980
1981 result = vmci_qp_broker_detach(handle, context);
1982
1983 vmci_ctx_put(context);
1984 return result;
1985}
1986
1987/*
1988 * Detaches from a VMCI queue_pair. Only checks validity of input argument.
1989 * Real work is done in the host or guest specific function.
1990 */
1991static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1992{
1993 if (vmci_handle_is_invalid(handle))
1994 return VMCI_ERROR_INVALID_ARGS;
1995
1996 if (guest_endpoint)
1997 return qp_detatch_guest_work(handle);
1998 else
1999 return qp_detatch_host_work(handle);
2000}
2001
2002/*
2003 * Returns the entry from the head of the list. Assumes that the list is
2004 * locked.
2005 */
2006static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
2007{
2008 if (!list_empty(&qp_list->head)) {
2009 struct qp_entry *entry =
2010 list_first_entry(&qp_list->head, struct qp_entry,
2011 list_item);
2012 return entry;
2013 }
2014
2015 return NULL;
2016}
2017
2018void vmci_qp_broker_exit(void)
2019{
2020 struct qp_entry *entry;
2021 struct qp_broker_entry *be;
2022
2023 mutex_lock(&qp_broker_list.mutex);
2024
2025 while ((entry = qp_list_get_head(&qp_broker_list))) {
2026 be = (struct qp_broker_entry *)entry;
2027
2028 qp_list_remove_entry(&qp_broker_list, entry);
2029 kfree(be);
2030 }
2031
2032 mutex_unlock(&qp_broker_list.mutex);
2033}
2034
2035/*
2036 * Requests that a queue pair be allocated with the VMCI queue
2037 * pair broker. Allocates a queue pair entry if one does not
2038 * exist. Attaches to one if it exists, and retrieves the page
2039 * files backing that queue_pair. Assumes that the queue pair
2040 * broker lock is held.
2041 */
2042int vmci_qp_broker_alloc(struct vmci_handle handle,
2043 u32 peer,
2044 u32 flags,
2045 u32 priv_flags,
2046 u64 produce_size,
2047 u64 consume_size,
2048 struct vmci_qp_page_store *page_store,
2049 struct vmci_ctx *context)
2050{
2051 return qp_broker_alloc(handle, peer, flags, priv_flags,
2052 produce_size, consume_size,
2053 page_store, context, NULL, NULL, NULL, NULL);
2054}
2055
2056/*
2057 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
2058 * step to add the UVAs of the VMX mapping of the queue pair. This function
2059 * provides backwards compatibility with such VMX'en, and takes care of
2060 * registering the page store for a queue pair previously allocated by the
2061 * VMX during create or attach. This function will move the queue pair state
2062 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
2063 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
2064 * attached state with memory, the queue pair is ready to be used by the
2065 * host peer, and an attached event will be generated.
2066 *
2067 * Assumes that the queue pair broker lock is held.
2068 *
2069 * This function is only used by the hosted platform, since there is no
2070 * issue with backwards compatibility for vmkernel.
2071 */
2072int vmci_qp_broker_set_page_store(struct vmci_handle handle,
2073 u64 produce_uva,
2074 u64 consume_uva,
2075 struct vmci_ctx *context)
2076{
2077 struct qp_broker_entry *entry;
2078 int result;
2079 const u32 context_id = vmci_ctx_get_id(context);
2080
2081 if (vmci_handle_is_invalid(handle) || !context ||
2082 context_id == VMCI_INVALID_ID)
2083 return VMCI_ERROR_INVALID_ARGS;
2084
2085 /*
2086 * We only support guest to host queue pairs, so the VMX must
2087 * supply UVAs for the mapped page files.
2088 */
2089
2090 if (produce_uva == 0 || consume_uva == 0)
2091 return VMCI_ERROR_INVALID_ARGS;
2092
2093 mutex_lock(&qp_broker_list.mutex);
2094
2095 if (!vmci_ctx_qp_exists(context, handle)) {
2096 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2097 context_id, handle.context, handle.resource);
2098 result = VMCI_ERROR_NOT_FOUND;
2099 goto out;
2100 }
2101
2102 entry = qp_broker_handle_to_entry(handle);
2103 if (!entry) {
2104 result = VMCI_ERROR_NOT_FOUND;
2105 goto out;
2106 }
2107
2108 /*
2109 * If I'm the owner then I can set the page store.
2110 *
2111 * Or, if a host created the queue_pair and I'm the attached peer
2112 * then I can set the page store.
2113 */
2114 if (entry->create_id != context_id &&
2115 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2116 entry->attach_id != context_id)) {
2117 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2118 goto out;
2119 }
2120
2121 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2122 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2123 result = VMCI_ERROR_UNAVAILABLE;
2124 goto out;
2125 }
2126
2127 result = qp_host_get_user_memory(produce_uva, consume_uva,
2128 entry->produce_q, entry->consume_q);
2129 if (result < VMCI_SUCCESS)
2130 goto out;
2131
2132 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2133 if (result < VMCI_SUCCESS) {
2134 qp_host_unregister_user_memory(entry->produce_q,
2135 entry->consume_q);
2136 goto out;
2137 }
2138
2139 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2140 entry->state = VMCIQPB_CREATED_MEM;
2141 else
2142 entry->state = VMCIQPB_ATTACHED_MEM;
2143
2144 entry->vmci_page_files = true;
2145
2146 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2147 result =
2148 qp_notify_peer(true, handle, context_id, entry->create_id);
2149 if (result < VMCI_SUCCESS) {
2150 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2151 entry->create_id, entry->qp.handle.context,
2152 entry->qp.handle.resource);
2153 }
2154 }
2155
2156 result = VMCI_SUCCESS;
2157 out:
2158 mutex_unlock(&qp_broker_list.mutex);
2159 return result;
2160}
2161
2162/*
2163 * Resets saved queue headers for the given QP broker
2164 * entry. Should be used when guest memory becomes available
2165 * again, or the guest detaches.
2166 */
2167static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2168{
2169 entry->produce_q->saved_header = NULL;
2170 entry->consume_q->saved_header = NULL;
2171}
2172
2173/*
2174 * The main entry point for detaching from a queue pair registered with the
2175 * queue pair broker. If more than one endpoint is attached to the queue
2176 * pair, the first endpoint will mainly decrement a reference count and
2177 * generate a notification to its peer. The last endpoint will clean up
2178 * the queue pair state registered with the broker.
2179 *
2180 * When a guest endpoint detaches, it will unmap and unregister the guest
2181 * memory backing the queue pair. If the host is still attached, it will
2182 * no longer be able to access the queue pair content.
2183 *
2184 * If the queue pair is already in a state where there is no memory
2185 * registered for the queue pair (any *_NO_MEM state), it will transition to
2186 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
2187 * endpoint is the first of two endpoints to detach. If the host endpoint is
2188 * the first out of two to detach, the queue pair will move to the
2189 * VMCIQPB_SHUTDOWN_MEM state.
2190 */
2191int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2192{
2193 struct qp_broker_entry *entry;
2194 const u32 context_id = vmci_ctx_get_id(context);
2195 u32 peer_id;
2196 bool is_local = false;
2197 int result;
2198
2199 if (vmci_handle_is_invalid(handle) || !context ||
2200 context_id == VMCI_INVALID_ID) {
2201 return VMCI_ERROR_INVALID_ARGS;
2202 }
2203
2204 mutex_lock(&qp_broker_list.mutex);
2205
2206 if (!vmci_ctx_qp_exists(context, handle)) {
2207 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2208 context_id, handle.context, handle.resource);
2209 result = VMCI_ERROR_NOT_FOUND;
2210 goto out;
2211 }
2212
2213 entry = qp_broker_handle_to_entry(handle);
2214 if (!entry) {
2215 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2216 context_id, handle.context, handle.resource);
2217 result = VMCI_ERROR_NOT_FOUND;
2218 goto out;
2219 }
2220
2221 if (context_id != entry->create_id && context_id != entry->attach_id) {
2222 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2223 goto out;
2224 }
2225
2226 if (context_id == entry->create_id) {
2227 peer_id = entry->attach_id;
2228 entry->create_id = VMCI_INVALID_ID;
2229 } else {
2230 peer_id = entry->create_id;
2231 entry->attach_id = VMCI_INVALID_ID;
2232 }
2233 entry->qp.ref_count--;
2234
2235 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2236
2237 if (context_id != VMCI_HOST_CONTEXT_ID) {
2238 bool headers_mapped;
2239
2240 /*
2241 * Pre NOVMVM vmx'en may detach from a queue pair
2242 * before setting the page store, and in that case
2243 * there is no user memory to detach from. Also, more
2244 * recent VMX'en may detach from a queue pair in the
2245 * quiesced state.
2246 */
2247
2248 qp_acquire_queue_mutex(entry->produce_q);
2249 headers_mapped = entry->produce_q->q_header ||
2250 entry->consume_q->q_header;
2251 if (QPBROKERSTATE_HAS_MEM(entry)) {
2252 result =
2253 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2254 entry->produce_q,
2255 entry->consume_q);
2256 if (result < VMCI_SUCCESS)
2257 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2258 handle.context, handle.resource,
2259 result);
2260
2261 if (entry->vmci_page_files)
2262 qp_host_unregister_user_memory(entry->produce_q,
2263 entry->
2264 consume_q);
2265 else
2266 qp_host_unregister_user_memory(entry->produce_q,
2267 entry->
2268 consume_q);
2269
2270 }
2271
2272 if (!headers_mapped)
2273 qp_reset_saved_headers(entry);
2274
2275 qp_release_queue_mutex(entry->produce_q);
2276
2277 if (!headers_mapped && entry->wakeup_cb)
2278 entry->wakeup_cb(entry->client_data);
2279
2280 } else {
2281 if (entry->wakeup_cb) {
2282 entry->wakeup_cb = NULL;
2283 entry->client_data = NULL;
2284 }
2285 }
2286
2287 if (entry->qp.ref_count == 0) {
2288 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2289
2290 if (is_local)
2291 kfree(entry->local_mem);
2292
2293 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2294 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2295 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2296 /* Unlink from resource hash table and free callback */
2297 vmci_resource_remove(&entry->resource);
2298
2299 kfree(entry);
2300
2301 vmci_ctx_qp_destroy(context, handle);
2302 } else {
2303 qp_notify_peer(false, handle, context_id, peer_id);
2304 if (context_id == VMCI_HOST_CONTEXT_ID &&
2305 QPBROKERSTATE_HAS_MEM(entry)) {
2306 entry->state = VMCIQPB_SHUTDOWN_MEM;
2307 } else {
2308 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2309 }
2310
2311 if (!is_local)
2312 vmci_ctx_qp_destroy(context, handle);
2313
2314 }
2315 result = VMCI_SUCCESS;
2316 out:
2317 mutex_unlock(&qp_broker_list.mutex);
2318 return result;
2319}
2320
2321/*
2322 * Establishes the necessary mappings for a queue pair given a
2323 * reference to the queue pair guest memory. This is usually
2324 * called when a guest is unquiesced and the VMX is allowed to
2325 * map guest memory once again.
2326 */
2327int vmci_qp_broker_map(struct vmci_handle handle,
2328 struct vmci_ctx *context,
2329 u64 guest_mem)
2330{
2331 struct qp_broker_entry *entry;
2332 const u32 context_id = vmci_ctx_get_id(context);
2333 bool is_local = false;
2334 int result;
2335
2336 if (vmci_handle_is_invalid(handle) || !context ||
2337 context_id == VMCI_INVALID_ID)
2338 return VMCI_ERROR_INVALID_ARGS;
2339
2340 mutex_lock(&qp_broker_list.mutex);
2341
2342 if (!vmci_ctx_qp_exists(context, handle)) {
2343 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2344 context_id, handle.context, handle.resource);
2345 result = VMCI_ERROR_NOT_FOUND;
2346 goto out;
2347 }
2348
2349 entry = qp_broker_handle_to_entry(handle);
2350 if (!entry) {
2351 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2352 context_id, handle.context, handle.resource);
2353 result = VMCI_ERROR_NOT_FOUND;
2354 goto out;
2355 }
2356
2357 if (context_id != entry->create_id && context_id != entry->attach_id) {
2358 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2359 goto out;
2360 }
2361
2362 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2363 result = VMCI_SUCCESS;
2364
2365 if (context_id != VMCI_HOST_CONTEXT_ID) {
2366 struct vmci_qp_page_store page_store;
2367
2368 page_store.pages = guest_mem;
2369 page_store.len = QPE_NUM_PAGES(entry->qp);
2370
2371 qp_acquire_queue_mutex(entry->produce_q);
2372 qp_reset_saved_headers(entry);
2373 result =
2374 qp_host_register_user_memory(&page_store,
2375 entry->produce_q,
2376 entry->consume_q);
2377 qp_release_queue_mutex(entry->produce_q);
2378 if (result == VMCI_SUCCESS) {
2379 /* Move state from *_NO_MEM to *_MEM */
2380
2381 entry->state++;
2382
2383 if (entry->wakeup_cb)
2384 entry->wakeup_cb(entry->client_data);
2385 }
2386 }
2387
2388 out:
2389 mutex_unlock(&qp_broker_list.mutex);
2390 return result;
2391}
2392
2393/*
2394 * Saves a snapshot of the queue headers for the given QP broker
2395 * entry. Should be used when guest memory is unmapped.
2396 * Results:
2397 * VMCI_SUCCESS on success, appropriate error code if guest memory
2398 * can't be accessed..
2399 */
2400static int qp_save_headers(struct qp_broker_entry *entry)
2401{
2402 int result;
2403
2404 if (entry->produce_q->saved_header != NULL &&
2405 entry->consume_q->saved_header != NULL) {
2406 /*
2407 * If the headers have already been saved, we don't need to do
2408 * it again, and we don't want to map in the headers
2409 * unnecessarily.
2410 */
2411
2412 return VMCI_SUCCESS;
2413 }
2414
2415 if (NULL == entry->produce_q->q_header ||
2416 NULL == entry->consume_q->q_header) {
2417 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2418 if (result < VMCI_SUCCESS)
2419 return result;
2420 }
2421
2422 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2423 sizeof(entry->saved_produce_q));
2424 entry->produce_q->saved_header = &entry->saved_produce_q;
2425 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2426 sizeof(entry->saved_consume_q));
2427 entry->consume_q->saved_header = &entry->saved_consume_q;
2428
2429 return VMCI_SUCCESS;
2430}
2431
2432/*
2433 * Removes all references to the guest memory of a given queue pair, and
2434 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
2435 * called when a VM is being quiesced where access to guest memory should
2436 * avoided.
2437 */
2438int vmci_qp_broker_unmap(struct vmci_handle handle,
2439 struct vmci_ctx *context,
2440 u32 gid)
2441{
2442 struct qp_broker_entry *entry;
2443 const u32 context_id = vmci_ctx_get_id(context);
2444 bool is_local = false;
2445 int result;
2446
2447 if (vmci_handle_is_invalid(handle) || !context ||
2448 context_id == VMCI_INVALID_ID)
2449 return VMCI_ERROR_INVALID_ARGS;
2450
2451 mutex_lock(&qp_broker_list.mutex);
2452
2453 if (!vmci_ctx_qp_exists(context, handle)) {
2454 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2455 context_id, handle.context, handle.resource);
2456 result = VMCI_ERROR_NOT_FOUND;
2457 goto out;
2458 }
2459
2460 entry = qp_broker_handle_to_entry(handle);
2461 if (!entry) {
2462 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2463 context_id, handle.context, handle.resource);
2464 result = VMCI_ERROR_NOT_FOUND;
2465 goto out;
2466 }
2467
2468 if (context_id != entry->create_id && context_id != entry->attach_id) {
2469 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2470 goto out;
2471 }
2472
2473 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2474
2475 if (context_id != VMCI_HOST_CONTEXT_ID) {
2476 qp_acquire_queue_mutex(entry->produce_q);
2477 result = qp_save_headers(entry);
2478 if (result < VMCI_SUCCESS)
2479 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2480 handle.context, handle.resource, result);
2481
2482 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2483
2484 /*
2485 * On hosted, when we unmap queue pairs, the VMX will also
2486 * unmap the guest memory, so we invalidate the previously
2487 * registered memory. If the queue pair is mapped again at a
2488 * later point in time, we will need to reregister the user
2489 * memory with a possibly new user VA.
2490 */
2491 qp_host_unregister_user_memory(entry->produce_q,
2492 entry->consume_q);
2493
2494 /*
2495 * Move state from *_MEM to *_NO_MEM.
2496 */
2497 entry->state--;
2498
2499 qp_release_queue_mutex(entry->produce_q);
2500 }
2501
2502 result = VMCI_SUCCESS;
2503
2504 out:
2505 mutex_unlock(&qp_broker_list.mutex);
2506 return result;
2507}
2508
2509/*
2510 * Destroys all guest queue pair endpoints. If active guest queue
2511 * pairs still exist, hypercalls to attempt detach from these
2512 * queue pairs will be made. Any failure to detach is silently
2513 * ignored.
2514 */
2515void vmci_qp_guest_endpoints_exit(void)
2516{
2517 struct qp_entry *entry;
2518 struct qp_guest_endpoint *ep;
2519
2520 mutex_lock(&qp_guest_endpoints.mutex);
2521
2522 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2523 ep = (struct qp_guest_endpoint *)entry;
2524
2525 /* Don't make a hypercall for local queue_pairs. */
2526 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2527 qp_detatch_hypercall(entry->handle);
2528
2529 /* We cannot fail the exit, so let's reset ref_count. */
2530 entry->ref_count = 0;
2531 qp_list_remove_entry(&qp_guest_endpoints, entry);
2532
2533 qp_guest_endpoint_destroy(ep);
2534 }
2535
2536 mutex_unlock(&qp_guest_endpoints.mutex);
2537}
2538
2539/*
2540 * Helper routine that will lock the queue pair before subsequent
2541 * operations.
2542 * Note: Non-blocking on the host side is currently only implemented in ESX.
2543 * Since non-blocking isn't yet implemented on the host personality we
2544 * have no reason to acquire a spin lock. So to avoid the use of an
2545 * unnecessary lock only acquire the mutex if we can block.
2546 * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore
2547 * we can use the same locking function for access to both the queue
2548 * and the queue headers as it is the same logic. Assert this behvior.
2549 */
2550static void qp_lock(const struct vmci_qp *qpair)
2551{
2552 if (vmci_can_block(qpair->flags))
2553 qp_acquire_queue_mutex(qpair->produce_q);
2554}
2555
2556/*
2557 * Helper routine that unlocks the queue pair after calling
2558 * qp_lock. Respects non-blocking and pinning flags.
2559 */
2560static void qp_unlock(const struct vmci_qp *qpair)
2561{
2562 if (vmci_can_block(qpair->flags))
2563 qp_release_queue_mutex(qpair->produce_q);
2564}
2565
2566/*
2567 * The queue headers may not be mapped at all times. If a queue is
2568 * currently not mapped, it will be attempted to do so.
2569 */
2570static int qp_map_queue_headers(struct vmci_queue *produce_q,
2571 struct vmci_queue *consume_q,
2572 bool can_block)
2573{
2574 int result;
2575
2576 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2577 if (can_block)
2578 result = qp_host_map_queues(produce_q, consume_q);
2579 else
2580 result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
2581
2582 if (result < VMCI_SUCCESS)
2583 return (produce_q->saved_header &&
2584 consume_q->saved_header) ?
2585 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2586 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2587 }
2588
2589 return VMCI_SUCCESS;
2590}
2591
2592/*
2593 * Helper routine that will retrieve the produce and consume
2594 * headers of a given queue pair. If the guest memory of the
2595 * queue pair is currently not available, the saved queue headers
2596 * will be returned, if these are available.
2597 */
2598static int qp_get_queue_headers(const struct vmci_qp *qpair,
2599 struct vmci_queue_header **produce_q_header,
2600 struct vmci_queue_header **consume_q_header)
2601{
2602 int result;
2603
2604 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q,
2605 vmci_can_block(qpair->flags));
2606 if (result == VMCI_SUCCESS) {
2607 *produce_q_header = qpair->produce_q->q_header;
2608 *consume_q_header = qpair->consume_q->q_header;
2609 } else if (qpair->produce_q->saved_header &&
2610 qpair->consume_q->saved_header) {
2611 *produce_q_header = qpair->produce_q->saved_header;
2612 *consume_q_header = qpair->consume_q->saved_header;
2613 result = VMCI_SUCCESS;
2614 }
2615
2616 return result;
2617}
2618
2619/*
2620 * Callback from VMCI queue pair broker indicating that a queue
2621 * pair that was previously not ready, now either is ready or
2622 * gone forever.
2623 */
2624static int qp_wakeup_cb(void *client_data)
2625{
2626 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2627
2628 qp_lock(qpair);
2629 while (qpair->blocked > 0) {
2630 qpair->blocked--;
2631 qpair->generation++;
2632 wake_up(&qpair->event);
2633 }
2634 qp_unlock(qpair);
2635
2636 return VMCI_SUCCESS;
2637}
2638
2639/*
2640 * Makes the calling thread wait for the queue pair to become
2641 * ready for host side access. Returns true when thread is
2642 * woken up after queue pair state change, false otherwise.
2643 */
2644static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2645{
2646 unsigned int generation;
2647
2648 if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
2649 return false;
2650
2651 qpair->blocked++;
2652 generation = qpair->generation;
2653 qp_unlock(qpair);
2654 wait_event(qpair->event, generation != qpair->generation);
2655 qp_lock(qpair);
2656
2657 return true;
2658}
2659
2660/*
2661 * Enqueues a given buffer to the produce queue using the provided
2662 * function. As many bytes as possible (space available in the queue)
2663 * are enqueued. Assumes the queue->mutex has been acquired. Returns
2664 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
2665 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
2666 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
2667 * an error occured when accessing the buffer,
2668 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
2669 * available. Otherwise, the number of bytes written to the queue is
2670 * returned. Updates the tail pointer of the produce queue.
2671 */
2672static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2673 struct vmci_queue *consume_q,
2674 const u64 produce_q_size,
2675 const void *buf,
2676 size_t buf_size,
2677 vmci_memcpy_to_queue_func memcpy_to_queue,
2678 bool can_block)
2679{
2680 s64 free_space;
2681 u64 tail;
2682 size_t written;
2683 ssize_t result;
2684
2685 result = qp_map_queue_headers(produce_q, consume_q, can_block);
2686 if (unlikely(result != VMCI_SUCCESS))
2687 return result;
2688
2689 free_space = vmci_q_header_free_space(produce_q->q_header,
2690 consume_q->q_header,
2691 produce_q_size);
2692 if (free_space == 0)
2693 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2694
2695 if (free_space < VMCI_SUCCESS)
2696 return (ssize_t) free_space;
2697
2698 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2699 tail = vmci_q_header_producer_tail(produce_q->q_header);
2700 if (likely(tail + written < produce_q_size)) {
2701 result = memcpy_to_queue(produce_q, tail, buf, 0, written);
2702 } else {
2703 /* Tail pointer wraps around. */
2704
2705 const size_t tmp = (size_t) (produce_q_size - tail);
2706
2707 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
2708 if (result >= VMCI_SUCCESS)
2709 result = memcpy_to_queue(produce_q, 0, buf, tmp,
2710 written - tmp);
2711 }
2712
2713 if (result < VMCI_SUCCESS)
2714 return result;
2715
2716 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2717 produce_q_size);
2718 return written;
2719}
2720
2721/*
2722 * Dequeues data (if available) from the given consume queue. Writes data
2723 * to the user provided buffer using the provided function.
2724 * Assumes the queue->mutex has been acquired.
2725 * Results:
2726 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
2727 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
2728 * (as defined by the queue size).
2729 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
2730 * Otherwise the number of bytes dequeued is returned.
2731 * Side effects:
2732 * Updates the head pointer of the consume queue.
2733 */
2734static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2735 struct vmci_queue *consume_q,
2736 const u64 consume_q_size,
2737 void *buf,
2738 size_t buf_size,
2739 vmci_memcpy_from_queue_func memcpy_from_queue,
2740 bool update_consumer,
2741 bool can_block)
2742{
2743 s64 buf_ready;
2744 u64 head;
2745 size_t read;
2746 ssize_t result;
2747
2748 result = qp_map_queue_headers(produce_q, consume_q, can_block);
2749 if (unlikely(result != VMCI_SUCCESS))
2750 return result;
2751
2752 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2753 produce_q->q_header,
2754 consume_q_size);
2755 if (buf_ready == 0)
2756 return VMCI_ERROR_QUEUEPAIR_NODATA;
2757
2758 if (buf_ready < VMCI_SUCCESS)
2759 return (ssize_t) buf_ready;
2760
2761 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2762 head = vmci_q_header_consumer_head(produce_q->q_header);
2763 if (likely(head + read < consume_q_size)) {
2764 result = memcpy_from_queue(buf, 0, consume_q, head, read);
2765 } else {
2766 /* Head pointer wraps around. */
2767
2768 const size_t tmp = (size_t) (consume_q_size - head);
2769
2770 result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
2771 if (result >= VMCI_SUCCESS)
2772 result = memcpy_from_queue(buf, tmp, consume_q, 0,
2773 read - tmp);
2774
2775 }
2776
2777 if (result < VMCI_SUCCESS)
2778 return result;
2779
2780 if (update_consumer)
2781 vmci_q_header_add_consumer_head(produce_q->q_header,
2782 read, consume_q_size);
2783
2784 return read;
2785}
2786
2787/*
2788 * vmci_qpair_alloc() - Allocates a queue pair.
2789 * @qpair: Pointer for the new vmci_qp struct.
2790 * @handle: Handle to track the resource.
2791 * @produce_qsize: Desired size of the producer queue.
2792 * @consume_qsize: Desired size of the consumer queue.
2793 * @peer: ContextID of the peer.
2794 * @flags: VMCI flags.
2795 * @priv_flags: VMCI priviledge flags.
2796 *
2797 * This is the client interface for allocating the memory for a
2798 * vmci_qp structure and then attaching to the underlying
2799 * queue. If an error occurs allocating the memory for the
2800 * vmci_qp structure no attempt is made to attach. If an
2801 * error occurs attaching, then the structure is freed.
2802 */
2803int vmci_qpair_alloc(struct vmci_qp **qpair,
2804 struct vmci_handle *handle,
2805 u64 produce_qsize,
2806 u64 consume_qsize,
2807 u32 peer,
2808 u32 flags,
2809 u32 priv_flags)
2810{
2811 struct vmci_qp *my_qpair;
2812 int retval;
2813 struct vmci_handle src = VMCI_INVALID_HANDLE;
2814 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2815 enum vmci_route route;
2816 vmci_event_release_cb wakeup_cb;
2817 void *client_data;
2818
2819 /*
2820 * Restrict the size of a queuepair. The device already
2821 * enforces a limit on the total amount of memory that can be
2822 * allocated to queuepairs for a guest. However, we try to
2823 * allocate this memory before we make the queuepair
2824 * allocation hypercall. On Linux, we allocate each page
2825 * separately, which means rather than fail, the guest will
2826 * thrash while it tries to allocate, and will become
2827 * increasingly unresponsive to the point where it appears to
2828 * be hung. So we place a limit on the size of an individual
2829 * queuepair here, and leave the device to enforce the
2830 * restriction on total queuepair memory. (Note that this
2831 * doesn't prevent all cases; a user with only this much
2832 * physical memory could still get into trouble.) The error
2833 * used by the device is NO_RESOURCES, so use that here too.
2834 */
2835
2836 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2837 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2838 return VMCI_ERROR_NO_RESOURCES;
2839
2840 retval = vmci_route(&src, &dst, false, &route);
2841 if (retval < VMCI_SUCCESS)
2842 route = vmci_guest_code_active() ?
2843 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2844
2845 /* If NONBLOCK or PINNED is set, we better be the guest personality. */
2846 if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) &&
2847 VMCI_ROUTE_AS_GUEST != route) {
2848 pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
2849 return VMCI_ERROR_INVALID_ARGS;
2850 }
2851
2852 /*
2853 * Limit the size of pinned QPs and check sanity.
2854 *
2855 * Pinned pages implies non-blocking mode. Mutexes aren't acquired
2856 * when the NONBLOCK flag is set in qpair code; and also should not be
2857 * acquired when the PINNED flagged is set. Since pinning pages
2858 * implies we want speed, it makes no sense not to have NONBLOCK
2859 * set if PINNED is set. Hence enforce this implication.
2860 */
2861 if (vmci_qp_pinned(flags)) {
2862 if (vmci_can_block(flags)) {
2863 pr_err("Attempted to enable pinning w/o non-blocking");
2864 return VMCI_ERROR_INVALID_ARGS;
2865 }
2866
2867 if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
2868 return VMCI_ERROR_NO_RESOURCES;
2869 }
2870
2871 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2872 if (!my_qpair)
2873 return VMCI_ERROR_NO_MEM;
2874
2875 my_qpair->produce_q_size = produce_qsize;
2876 my_qpair->consume_q_size = consume_qsize;
2877 my_qpair->peer = peer;
2878 my_qpair->flags = flags;
2879 my_qpair->priv_flags = priv_flags;
2880
2881 wakeup_cb = NULL;
2882 client_data = NULL;
2883
2884 if (VMCI_ROUTE_AS_HOST == route) {
2885 my_qpair->guest_endpoint = false;
2886 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2887 my_qpair->blocked = 0;
2888 my_qpair->generation = 0;
2889 init_waitqueue_head(&my_qpair->event);
2890 wakeup_cb = qp_wakeup_cb;
2891 client_data = (void *)my_qpair;
2892 }
2893 } else {
2894 my_qpair->guest_endpoint = true;
2895 }
2896
2897 retval = vmci_qp_alloc(handle,
2898 &my_qpair->produce_q,
2899 my_qpair->produce_q_size,
2900 &my_qpair->consume_q,
2901 my_qpair->consume_q_size,
2902 my_qpair->peer,
2903 my_qpair->flags,
2904 my_qpair->priv_flags,
2905 my_qpair->guest_endpoint,
2906 wakeup_cb, client_data);
2907
2908 if (retval < VMCI_SUCCESS) {
2909 kfree(my_qpair);
2910 return retval;
2911 }
2912
2913 *qpair = my_qpair;
2914 my_qpair->handle = *handle;
2915
2916 return retval;
2917}
2918EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2919
2920/*
2921 * vmci_qpair_detach() - Detatches the client from a queue pair.
2922 * @qpair: Reference of a pointer to the qpair struct.
2923 *
2924 * This is the client interface for detaching from a VMCIQPair.
2925 * Note that this routine will free the memory allocated for the
2926 * vmci_qp structure too.
2927 */
2928int vmci_qpair_detach(struct vmci_qp **qpair)
2929{
2930 int result;
2931 struct vmci_qp *old_qpair;
2932
2933 if (!qpair || !(*qpair))
2934 return VMCI_ERROR_INVALID_ARGS;
2935
2936 old_qpair = *qpair;
2937 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2938
2939 /*
2940 * The guest can fail to detach for a number of reasons, and
2941 * if it does so, it will cleanup the entry (if there is one).
2942 * The host can fail too, but it won't cleanup the entry
2943 * immediately, it will do that later when the context is
2944 * freed. Either way, we need to release the qpair struct
2945 * here; there isn't much the caller can do, and we don't want
2946 * to leak.
2947 */
2948
2949 memset(old_qpair, 0, sizeof(*old_qpair));
2950 old_qpair->handle = VMCI_INVALID_HANDLE;
2951 old_qpair->peer = VMCI_INVALID_ID;
2952 kfree(old_qpair);
2953 *qpair = NULL;
2954
2955 return result;
2956}
2957EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2958
2959/*
2960 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
2961 * @qpair: Pointer to the queue pair struct.
2962 * @producer_tail: Reference used for storing producer tail index.
2963 * @consumer_head: Reference used for storing the consumer head index.
2964 *
2965 * This is the client interface for getting the current indexes of the
2966 * QPair from the point of the view of the caller as the producer.
2967 */
2968int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2969 u64 *producer_tail,
2970 u64 *consumer_head)
2971{
2972 struct vmci_queue_header *produce_q_header;
2973 struct vmci_queue_header *consume_q_header;
2974 int result;
2975
2976 if (!qpair)
2977 return VMCI_ERROR_INVALID_ARGS;
2978
2979 qp_lock(qpair);
2980 result =
2981 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2982 if (result == VMCI_SUCCESS)
2983 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2984 producer_tail, consumer_head);
2985 qp_unlock(qpair);
2986
2987 if (result == VMCI_SUCCESS &&
2988 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2989 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2990 return VMCI_ERROR_INVALID_SIZE;
2991
2992 return result;
2993}
2994EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2995
2996/*
2997 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
2998 * @qpair: Pointer to the queue pair struct.
2999 * @consumer_tail: Reference used for storing consumer tail index.
3000 * @producer_head: Reference used for storing the producer head index.
3001 *
3002 * This is the client interface for getting the current indexes of the
3003 * QPair from the point of the view of the caller as the consumer.
3004 */
3005int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
3006 u64 *consumer_tail,
3007 u64 *producer_head)
3008{
3009 struct vmci_queue_header *produce_q_header;
3010 struct vmci_queue_header *consume_q_header;
3011 int result;
3012
3013 if (!qpair)
3014 return VMCI_ERROR_INVALID_ARGS;
3015
3016 qp_lock(qpair);
3017 result =
3018 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3019 if (result == VMCI_SUCCESS)
3020 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
3021 consumer_tail, producer_head);
3022 qp_unlock(qpair);
3023
3024 if (result == VMCI_SUCCESS &&
3025 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
3026 (producer_head && *producer_head >= qpair->consume_q_size)))
3027 return VMCI_ERROR_INVALID_SIZE;
3028
3029 return result;
3030}
3031EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
3032
3033/*
3034 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
3035 * @qpair: Pointer to the queue pair struct.
3036 *
3037 * This is the client interface for getting the amount of free
3038 * space in the QPair from the point of the view of the caller as
3039 * the producer which is the common case. Returns < 0 if err, else
3040 * available bytes into which data can be enqueued if > 0.
3041 */
3042s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
3043{
3044 struct vmci_queue_header *produce_q_header;
3045 struct vmci_queue_header *consume_q_header;
3046 s64 result;
3047
3048 if (!qpair)
3049 return VMCI_ERROR_INVALID_ARGS;
3050
3051 qp_lock(qpair);
3052 result =
3053 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3054 if (result == VMCI_SUCCESS)
3055 result = vmci_q_header_free_space(produce_q_header,
3056 consume_q_header,
3057 qpair->produce_q_size);
3058 else
3059 result = 0;
3060
3061 qp_unlock(qpair);
3062
3063 return result;
3064}
3065EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
3066
3067/*
3068 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
3069 * @qpair: Pointer to the queue pair struct.
3070 *
3071 * This is the client interface for getting the amount of free
3072 * space in the QPair from the point of the view of the caller as
3073 * the consumer which is not the common case. Returns < 0 if err, else
3074 * available bytes into which data can be enqueued if > 0.
3075 */
3076s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
3077{
3078 struct vmci_queue_header *produce_q_header;
3079 struct vmci_queue_header *consume_q_header;
3080 s64 result;
3081
3082 if (!qpair)
3083 return VMCI_ERROR_INVALID_ARGS;
3084
3085 qp_lock(qpair);
3086 result =
3087 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3088 if (result == VMCI_SUCCESS)
3089 result = vmci_q_header_free_space(consume_q_header,
3090 produce_q_header,
3091 qpair->consume_q_size);
3092 else
3093 result = 0;
3094
3095 qp_unlock(qpair);
3096
3097 return result;
3098}
3099EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
3100
3101/*
3102 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
3103 * producer queue.
3104 * @qpair: Pointer to the queue pair struct.
3105 *
3106 * This is the client interface for getting the amount of
3107 * enqueued data in the QPair from the point of the view of the
3108 * caller as the producer which is not the common case. Returns < 0 if err,
3109 * else available bytes that may be read.
3110 */
3111s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
3112{
3113 struct vmci_queue_header *produce_q_header;
3114 struct vmci_queue_header *consume_q_header;
3115 s64 result;
3116
3117 if (!qpair)
3118 return VMCI_ERROR_INVALID_ARGS;
3119
3120 qp_lock(qpair);
3121 result =
3122 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3123 if (result == VMCI_SUCCESS)
3124 result = vmci_q_header_buf_ready(produce_q_header,
3125 consume_q_header,
3126 qpair->produce_q_size);
3127 else
3128 result = 0;
3129
3130 qp_unlock(qpair);
3131
3132 return result;
3133}
3134EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
3135
3136/*
3137 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
3138 * consumer queue.
3139 * @qpair: Pointer to the queue pair struct.
3140 *
3141 * This is the client interface for getting the amount of
3142 * enqueued data in the QPair from the point of the view of the
3143 * caller as the consumer which is the normal case. Returns < 0 if err,
3144 * else available bytes that may be read.
3145 */
3146s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
3147{
3148 struct vmci_queue_header *produce_q_header;
3149 struct vmci_queue_header *consume_q_header;
3150 s64 result;
3151
3152 if (!qpair)
3153 return VMCI_ERROR_INVALID_ARGS;
3154
3155 qp_lock(qpair);
3156 result =
3157 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3158 if (result == VMCI_SUCCESS)
3159 result = vmci_q_header_buf_ready(consume_q_header,
3160 produce_q_header,
3161 qpair->consume_q_size);
3162 else
3163 result = 0;
3164
3165 qp_unlock(qpair);
3166
3167 return result;
3168}
3169EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3170
3171/*
3172 * vmci_qpair_enqueue() - Throw data on the queue.
3173 * @qpair: Pointer to the queue pair struct.
3174 * @buf: Pointer to buffer containing data
3175 * @buf_size: Length of buffer.
3176 * @buf_type: Buffer type (Unused).
3177 *
3178 * This is the client interface for enqueueing data into the queue.
3179 * Returns number of bytes enqueued or < 0 on error.
3180 */
3181ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3182 const void *buf,
3183 size_t buf_size,
3184 int buf_type)
3185{
3186 ssize_t result;
3187
3188 if (!qpair || !buf)
3189 return VMCI_ERROR_INVALID_ARGS;
3190
3191 qp_lock(qpair);
3192
3193 do {
3194 result = qp_enqueue_locked(qpair->produce_q,
3195 qpair->consume_q,
3196 qpair->produce_q_size,
3197 buf, buf_size,
3198 qp_memcpy_to_queue,
3199 vmci_can_block(qpair->flags));
3200
3201 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3202 !qp_wait_for_ready_queue(qpair))
3203 result = VMCI_ERROR_WOULD_BLOCK;
3204
3205 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3206
3207 qp_unlock(qpair);
3208
3209 return result;
3210}
3211EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3212
3213/*
3214 * vmci_qpair_dequeue() - Get data from the queue.
3215 * @qpair: Pointer to the queue pair struct.
3216 * @buf: Pointer to buffer for the data
3217 * @buf_size: Length of buffer.
3218 * @buf_type: Buffer type (Unused).
3219 *
3220 * This is the client interface for dequeueing data from the queue.
3221 * Returns number of bytes dequeued or < 0 on error.
3222 */
3223ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3224 void *buf,
3225 size_t buf_size,
3226 int buf_type)
3227{
3228 ssize_t result;
3229
3230 if (!qpair || !buf)
3231 return VMCI_ERROR_INVALID_ARGS;
3232
3233 qp_lock(qpair);
3234
3235 do {
3236 result = qp_dequeue_locked(qpair->produce_q,
3237 qpair->consume_q,
3238 qpair->consume_q_size,
3239 buf, buf_size,
3240 qp_memcpy_from_queue, true,
3241 vmci_can_block(qpair->flags));
3242
3243 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3244 !qp_wait_for_ready_queue(qpair))
3245 result = VMCI_ERROR_WOULD_BLOCK;
3246
3247 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3248
3249 qp_unlock(qpair);
3250
3251 return result;
3252}
3253EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3254
3255/*
3256 * vmci_qpair_peek() - Peek at the data in the queue.
3257 * @qpair: Pointer to the queue pair struct.
3258 * @buf: Pointer to buffer for the data
3259 * @buf_size: Length of buffer.
3260 * @buf_type: Buffer type (Unused on Linux).
3261 *
3262 * This is the client interface for peeking into a queue. (I.e.,
3263 * copy data from the queue without updating the head pointer.)
3264 * Returns number of bytes dequeued or < 0 on error.
3265 */
3266ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3267 void *buf,
3268 size_t buf_size,
3269 int buf_type)
3270{
3271 ssize_t result;
3272
3273 if (!qpair || !buf)
3274 return VMCI_ERROR_INVALID_ARGS;
3275
3276 qp_lock(qpair);
3277
3278 do {
3279 result = qp_dequeue_locked(qpair->produce_q,
3280 qpair->consume_q,
3281 qpair->consume_q_size,
3282 buf, buf_size,
3283 qp_memcpy_from_queue, false,
3284 vmci_can_block(qpair->flags));
3285
3286 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3287 !qp_wait_for_ready_queue(qpair))
3288 result = VMCI_ERROR_WOULD_BLOCK;
3289
3290 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3291
3292 qp_unlock(qpair);
3293
3294 return result;
3295}
3296EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3297
3298/*
3299 * vmci_qpair_enquev() - Throw data on the queue using iov.
3300 * @qpair: Pointer to the queue pair struct.
3301 * @iov: Pointer to buffer containing data
3302 * @iov_size: Length of buffer.
3303 * @buf_type: Buffer type (Unused).
3304 *
3305 * This is the client interface for enqueueing data into the queue.
3306 * This function uses IO vectors to handle the work. Returns number
3307 * of bytes enqueued or < 0 on error.
3308 */
3309ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3310 void *iov,
3311 size_t iov_size,
3312 int buf_type)
3313{
3314 ssize_t result;
3315
3316 if (!qpair || !iov)
3317 return VMCI_ERROR_INVALID_ARGS;
3318
3319 qp_lock(qpair);
3320
3321 do {
3322 result = qp_enqueue_locked(qpair->produce_q,
3323 qpair->consume_q,
3324 qpair->produce_q_size,
3325 iov, iov_size,
3326 qp_memcpy_to_queue_iov,
3327 vmci_can_block(qpair->flags));
3328
3329 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3330 !qp_wait_for_ready_queue(qpair))
3331 result = VMCI_ERROR_WOULD_BLOCK;
3332
3333 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3334
3335 qp_unlock(qpair);
3336
3337 return result;
3338}
3339EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3340
3341/*
3342 * vmci_qpair_dequev() - Get data from the queue using iov.
3343 * @qpair: Pointer to the queue pair struct.
3344 * @iov: Pointer to buffer for the data
3345 * @iov_size: Length of buffer.
3346 * @buf_type: Buffer type (Unused).
3347 *
3348 * This is the client interface for dequeueing data from the queue.
3349 * This function uses IO vectors to handle the work. Returns number
3350 * of bytes dequeued or < 0 on error.
3351 */
3352ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3353 void *iov,
3354 size_t iov_size,
3355 int buf_type)
3356{
3357 ssize_t result;
3358
3359 if (!qpair || !iov)
3360 return VMCI_ERROR_INVALID_ARGS;
3361
3362 qp_lock(qpair);
3363
3364 do {
3365 result = qp_dequeue_locked(qpair->produce_q,
3366 qpair->consume_q,
3367 qpair->consume_q_size,
3368 iov, iov_size,
3369 qp_memcpy_from_queue_iov,
3370 true, vmci_can_block(qpair->flags));
3371
3372 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3373 !qp_wait_for_ready_queue(qpair))
3374 result = VMCI_ERROR_WOULD_BLOCK;
3375
3376 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3377
3378 qp_unlock(qpair);
3379
3380 return result;
3381}
3382EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3383
3384/*
3385 * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3386 * @qpair: Pointer to the queue pair struct.
3387 * @iov: Pointer to buffer for the data
3388 * @iov_size: Length of buffer.
3389 * @buf_type: Buffer type (Unused on Linux).
3390 *
3391 * This is the client interface for peeking into a queue. (I.e.,
3392 * copy data from the queue without updating the head pointer.)
3393 * This function uses IO vectors to handle the work. Returns number
3394 * of bytes peeked or < 0 on error.
3395 */
3396ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3397 void *iov,
3398 size_t iov_size,
3399 int buf_type)
3400{
3401 ssize_t result;
3402
3403 if (!qpair || !iov)
3404 return VMCI_ERROR_INVALID_ARGS;
3405
3406 qp_lock(qpair);
3407
3408 do {
3409 result = qp_dequeue_locked(qpair->produce_q,
3410 qpair->consume_q,
3411 qpair->consume_q_size,
3412 iov, iov_size,
3413 qp_memcpy_from_queue_iov,
3414 false, vmci_can_block(qpair->flags));
3415
3416 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3417 !qp_wait_for_ready_queue(qpair))
3418 result = VMCI_ERROR_WOULD_BLOCK;
3419
3420 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3421
3422 qp_unlock(qpair);
3423 return result;
3424}
3425EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
new file mode 100644
index 000000000000..58c6959f6b6d
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -0,0 +1,191 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMCI_QUEUE_PAIR_H_
17#define _VMCI_QUEUE_PAIR_H_
18
19#include <linux/vmw_vmci_defs.h>
20#include <linux/types.h>
21
22#include "vmci_context.h"
23
24/* Callback needed for correctly waiting on events. */
25typedef int (*vmci_event_release_cb) (void *client_data);
26
27/* Guest device port I/O. */
28struct ppn_set {
29 u64 num_produce_pages;
30 u64 num_consume_pages;
31 u32 *produce_ppns;
32 u32 *consume_ppns;
33 bool initialized;
34};
35
36/* VMCIqueue_pairAllocInfo */
37struct vmci_qp_alloc_info {
38 struct vmci_handle handle;
39 u32 peer;
40 u32 flags;
41 u64 produce_size;
42 u64 consume_size;
43 u64 ppn_va; /* Start VA of queue pair PPNs. */
44 u64 num_ppns;
45 s32 result;
46 u32 version;
47};
48
49/* VMCIqueue_pairSetVAInfo */
50struct vmci_qp_set_va_info {
51 struct vmci_handle handle;
52 u64 va; /* Start VA of queue pair PPNs. */
53 u64 num_ppns;
54 u32 version;
55 s32 result;
56};
57
58/*
59 * For backwards compatibility, here is a version of the
60 * VMCIqueue_pairPageFileInfo before host support end-points was added.
61 * Note that the current version of that structure requires VMX to
62 * pass down the VA of the mapped file. Before host support was added
63 * there was nothing of the sort. So, when the driver sees the ioctl
64 * with a parameter that is the sizeof
65 * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version
66 * of VMX running can't attach to host end points because it doesn't
67 * provide the VA of the mapped files.
68 *
69 * The Linux driver doesn't get an indication of the size of the
70 * structure passed down from user space. So, to fix a long standing
71 * but unfiled bug, the _pad field has been renamed to version.
72 * Existing versions of VMX always initialize the PageFileInfo
73 * structure so that _pad, er, version is set to 0.
74 *
75 * A version value of 1 indicates that the size of the structure has
76 * been increased to include two UVA's: produce_uva and consume_uva.
77 * These UVA's are of the mmap()'d queue contents backing files.
78 *
79 * In addition, if when VMX is sending down the
80 * VMCIqueue_pairPageFileInfo structure it gets an error then it will
81 * try again with the _NoHostQP version of the file to see if an older
82 * VMCI kernel module is running.
83 */
84
85/* VMCIqueue_pairPageFileInfo */
86struct vmci_qp_page_file_info {
87 struct vmci_handle handle;
88 u64 produce_page_file; /* User VA. */
89 u64 consume_page_file; /* User VA. */
90 u64 produce_page_file_size; /* Size of the file name array. */
91 u64 consume_page_file_size; /* Size of the file name array. */
92 s32 result;
93 u32 version; /* Was _pad. */
94 u64 produce_va; /* User VA of the mapped file. */
95 u64 consume_va; /* User VA of the mapped file. */
96};
97
98/* vmci queuepair detach info */
99struct vmci_qp_dtch_info {
100 struct vmci_handle handle;
101 s32 result;
102 u32 _pad;
103};
104
105/*
106 * struct vmci_qp_page_store describes how the memory of a given queue pair
107 * is backed. When the queue pair is between the host and a guest, the
108 * page store consists of references to the guest pages. On vmkernel,
109 * this is a list of PPNs, and on hosted, it is a user VA where the
110 * queue pair is mapped into the VMX address space.
111 */
112struct vmci_qp_page_store {
113 /* Reference to pages backing the queue pair. */
114 u64 pages;
115 /* Length of pageList/virtual addres range (in pages). */
116 u32 len;
117};
118
119/*
120 * This data type contains the information about a queue.
121 * There are two queues (hence, queue pairs) per transaction model between a
122 * pair of end points, A & B. One queue is used by end point A to transmit
123 * commands and responses to B. The other queue is used by B to transmit
124 * commands and responses.
125 *
126 * struct vmci_queue_kern_if is a per-OS defined Queue structure. It contains
127 * either a direct pointer to the linear address of the buffer contents or a
128 * pointer to structures which help the OS locate those data pages. See
129 * vmciKernelIf.c for each platform for its definition.
130 */
131struct vmci_queue {
132 struct vmci_queue_header *q_header;
133 struct vmci_queue_header *saved_header;
134 struct vmci_queue_kern_if *kernel_if;
135};
136
137/*
138 * Utility function that checks whether the fields of the page
139 * store contain valid values.
140 * Result:
141 * true if the page store is wellformed. false otherwise.
142 */
143static inline bool
144VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
145{
146 return page_store->len >= 2;
147}
148
149/*
150 * Helper function to check if the non-blocking flag
151 * is set for a given queue pair.
152 */
153static inline bool vmci_can_block(u32 flags)
154{
155 return !(flags & VMCI_QPFLAG_NONBLOCK);
156}
157
158/*
159 * Helper function to check if the queue pair is pinned
160 * into memory.
161 */
162static inline bool vmci_qp_pinned(u32 flags)
163{
164 return flags & VMCI_QPFLAG_PINNED;
165}
166
167void vmci_qp_broker_exit(void);
168int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
169 u32 flags, u32 priv_flags,
170 u64 produce_size, u64 consume_size,
171 struct vmci_qp_page_store *page_store,
172 struct vmci_ctx *context);
173int vmci_qp_broker_set_page_store(struct vmci_handle handle,
174 u64 produce_uva, u64 consume_uva,
175 struct vmci_ctx *context);
176int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context);
177
178void vmci_qp_guest_endpoints_exit(void);
179
180int vmci_qp_alloc(struct vmci_handle *handle,
181 struct vmci_queue **produce_q, u64 produce_size,
182 struct vmci_queue **consume_q, u64 consume_size,
183 u32 peer, u32 flags, u32 priv_flags,
184 bool guest_endpoint, vmci_event_release_cb wakeup_cb,
185 void *client_data);
186int vmci_qp_broker_map(struct vmci_handle handle,
187 struct vmci_ctx *context, u64 guest_mem);
188int vmci_qp_broker_unmap(struct vmci_handle handle,
189 struct vmci_ctx *context, u32 gid);
190
191#endif /* _VMCI_QUEUE_PAIR_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
new file mode 100644
index 000000000000..a196f84a4fd2
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -0,0 +1,229 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/hash.h>
18#include <linux/types.h>
19#include <linux/rculist.h>
20
21#include "vmci_resource.h"
22#include "vmci_driver.h"
23
24
25#define VMCI_RESOURCE_HASH_BITS 7
26#define VMCI_RESOURCE_HASH_BUCKETS (1 << VMCI_RESOURCE_HASH_BITS)
27
28struct vmci_hash_table {
29 spinlock_t lock;
30 struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
31};
32
33static struct vmci_hash_table vmci_resource_table = {
34 .lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
35};
36
37static unsigned int vmci_resource_hash(struct vmci_handle handle)
38{
39 return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
40}
41
42/*
43 * Gets a resource (if one exists) matching given handle from the hash table.
44 */
45static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
46 enum vmci_resource_type type)
47{
48 struct vmci_resource *r, *resource = NULL;
49 struct hlist_node *node;
50 unsigned int idx = vmci_resource_hash(handle);
51
52 rcu_read_lock();
53 hlist_for_each_entry_rcu(r, node,
54 &vmci_resource_table.entries[idx], node) {
55 u32 cid = r->handle.context;
56 u32 rid = r->handle.resource;
57
58 if (r->type == type &&
59 rid == handle.resource &&
60 (cid == handle.context || cid == VMCI_INVALID_ID)) {
61 resource = r;
62 break;
63 }
64 }
65 rcu_read_unlock();
66
67 return resource;
68}
69
70/*
71 * Find an unused resource ID and return it. The first
72 * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
73 * its value + 1.
74 * Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
75 */
76static u32 vmci_resource_find_id(u32 context_id,
77 enum vmci_resource_type resource_type)
78{
79 static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
80 u32 old_rid = resource_id;
81 u32 current_rid;
82
83 /*
84 * Generate a unique resource ID. Keep on trying until we wrap around
85 * in the RID space.
86 */
87 do {
88 struct vmci_handle handle;
89
90 current_rid = resource_id;
91 resource_id++;
92 if (unlikely(resource_id == VMCI_INVALID_ID)) {
93 /* Skip the reserved rids. */
94 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
95 }
96
97 handle = vmci_make_handle(context_id, current_rid);
98 if (!vmci_resource_lookup(handle, resource_type))
99 return current_rid;
100 } while (resource_id != old_rid);
101
102 return VMCI_INVALID_ID;
103}
104
105
106int vmci_resource_add(struct vmci_resource *resource,
107 enum vmci_resource_type resource_type,
108 struct vmci_handle handle)
109
110{
111 unsigned int idx;
112 int result;
113
114 spin_lock(&vmci_resource_table.lock);
115
116 if (handle.resource == VMCI_INVALID_ID) {
117 handle.resource = vmci_resource_find_id(handle.context,
118 resource_type);
119 if (handle.resource == VMCI_INVALID_ID) {
120 result = VMCI_ERROR_NO_HANDLE;
121 goto out;
122 }
123 } else if (vmci_resource_lookup(handle, resource_type)) {
124 result = VMCI_ERROR_ALREADY_EXISTS;
125 goto out;
126 }
127
128 resource->handle = handle;
129 resource->type = resource_type;
130 INIT_HLIST_NODE(&resource->node);
131 kref_init(&resource->kref);
132 init_completion(&resource->done);
133
134 idx = vmci_resource_hash(resource->handle);
135 hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
136
137 result = VMCI_SUCCESS;
138
139out:
140 spin_unlock(&vmci_resource_table.lock);
141 return result;
142}
143
144void vmci_resource_remove(struct vmci_resource *resource)
145{
146 struct vmci_handle handle = resource->handle;
147 unsigned int idx = vmci_resource_hash(handle);
148 struct vmci_resource *r;
149 struct hlist_node *node;
150
151 /* Remove resource from hash table. */
152 spin_lock(&vmci_resource_table.lock);
153
154 hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) {
155 if (vmci_handle_is_equal(r->handle, resource->handle)) {
156 hlist_del_init_rcu(&r->node);
157 break;
158 }
159 }
160
161 spin_unlock(&vmci_resource_table.lock);
162 synchronize_rcu();
163
164 vmci_resource_put(resource);
165 wait_for_completion(&resource->done);
166}
167
168struct vmci_resource *
169vmci_resource_by_handle(struct vmci_handle resource_handle,
170 enum vmci_resource_type resource_type)
171{
172 struct vmci_resource *r, *resource = NULL;
173
174 rcu_read_lock();
175
176 r = vmci_resource_lookup(resource_handle, resource_type);
177 if (r &&
178 (resource_type == r->type ||
179 resource_type == VMCI_RESOURCE_TYPE_ANY)) {
180 resource = vmci_resource_get(r);
181 }
182
183 rcu_read_unlock();
184
185 return resource;
186}
187
188/*
189 * Get a reference to given resource.
190 */
191struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
192{
193 kref_get(&resource->kref);
194
195 return resource;
196}
197
198static void vmci_release_resource(struct kref *kref)
199{
200 struct vmci_resource *resource =
201 container_of(kref, struct vmci_resource, kref);
202
203 /* Verify the resource has been unlinked from hash table */
204 WARN_ON(!hlist_unhashed(&resource->node));
205
206 /* Signal that container of this resource can now be destroyed */
207 complete(&resource->done);
208}
209
210/*
211 * Resource's release function will get called if last reference.
212 * If it is the last reference, then we are sure that nobody else
213 * can increment the count again (it's gone from the resource hash
214 * table), so there's no need for locking here.
215 */
216int vmci_resource_put(struct vmci_resource *resource)
217{
218 /*
219 * We propagate the information back to caller in case it wants to know
220 * whether entry was freed.
221 */
222 return kref_put(&resource->kref, vmci_release_resource) ?
223 VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
224}
225
226struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
227{
228 return resource->handle;
229}
diff --git a/drivers/misc/vmw_vmci/vmci_resource.h b/drivers/misc/vmw_vmci/vmci_resource.h
new file mode 100644
index 000000000000..9190cd298bee
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.h
@@ -0,0 +1,59 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMCI_RESOURCE_H_
17#define _VMCI_RESOURCE_H_
18
19#include <linux/vmw_vmci_defs.h>
20#include <linux/types.h>
21
22#include "vmci_context.h"
23
24
25enum vmci_resource_type {
26 VMCI_RESOURCE_TYPE_ANY,
27 VMCI_RESOURCE_TYPE_API,
28 VMCI_RESOURCE_TYPE_GROUP,
29 VMCI_RESOURCE_TYPE_DATAGRAM,
30 VMCI_RESOURCE_TYPE_DOORBELL,
31 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
32 VMCI_RESOURCE_TYPE_QPAIR_HOST
33};
34
35struct vmci_resource {
36 struct vmci_handle handle;
37 enum vmci_resource_type type;
38 struct hlist_node node;
39 struct kref kref;
40 struct completion done;
41};
42
43
44int vmci_resource_add(struct vmci_resource *resource,
45 enum vmci_resource_type resource_type,
46 struct vmci_handle handle);
47
48void vmci_resource_remove(struct vmci_resource *resource);
49
50struct vmci_resource *
51vmci_resource_by_handle(struct vmci_handle resource_handle,
52 enum vmci_resource_type resource_type);
53
54struct vmci_resource *vmci_resource_get(struct vmci_resource *resource);
55int vmci_resource_put(struct vmci_resource *resource);
56
57struct vmci_handle vmci_resource_handle(struct vmci_resource *resource);
58
59#endif /* _VMCI_RESOURCE_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_route.c b/drivers/misc/vmw_vmci/vmci_route.c
new file mode 100644
index 000000000000..91090658b929
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.c
@@ -0,0 +1,226 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18
19#include "vmci_context.h"
20#include "vmci_driver.h"
21#include "vmci_route.h"
22
23/*
24 * Make a routing decision for the given source and destination handles.
25 * This will try to determine the route using the handles and the available
26 * devices. Will set the source context if it is invalid.
27 */
28int vmci_route(struct vmci_handle *src,
29 const struct vmci_handle *dst,
30 bool from_guest,
31 enum vmci_route *route)
32{
33 bool has_host_device = vmci_host_code_active();
34 bool has_guest_device = vmci_guest_code_active();
35
36 *route = VMCI_ROUTE_NONE;
37
38 /*
39 * "from_guest" is only ever set to true by
40 * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent),
41 * which comes from the VMX, so we know it is coming from a
42 * guest.
43 *
44 * To avoid inconsistencies, test these once. We will test
45 * them again when we do the actual send to ensure that we do
46 * not touch a non-existent device.
47 */
48
49 /* Must have a valid destination context. */
50 if (VMCI_INVALID_ID == dst->context)
51 return VMCI_ERROR_INVALID_ARGS;
52
53 /* Anywhere to hypervisor. */
54 if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) {
55
56 /*
57 * If this message already came from a guest then we
58 * cannot send it to the hypervisor. It must come
59 * from a local client.
60 */
61 if (from_guest)
62 return VMCI_ERROR_DST_UNREACHABLE;
63
64 /*
65 * We must be acting as a guest in order to send to
66 * the hypervisor.
67 */
68 if (!has_guest_device)
69 return VMCI_ERROR_DEVICE_NOT_FOUND;
70
71 /* And we cannot send if the source is the host context. */
72 if (VMCI_HOST_CONTEXT_ID == src->context)
73 return VMCI_ERROR_INVALID_ARGS;
74
75 /*
76 * If the client passed the ANON source handle then
77 * respect it (both context and resource are invalid).
78 * However, if they passed only an invalid context,
79 * then they probably mean ANY, in which case we
80 * should set the real context here before passing it
81 * down.
82 */
83 if (VMCI_INVALID_ID == src->context &&
84 VMCI_INVALID_ID != src->resource)
85 src->context = vmci_get_context_id();
86
87 /* Send from local client down to the hypervisor. */
88 *route = VMCI_ROUTE_AS_GUEST;
89 return VMCI_SUCCESS;
90 }
91
92 /* Anywhere to local client on host. */
93 if (VMCI_HOST_CONTEXT_ID == dst->context) {
94 /*
95 * If it is not from a guest but we are acting as a
96 * guest, then we need to send it down to the host.
97 * Note that if we are also acting as a host then this
98 * will prevent us from sending from local client to
99 * local client, but we accept that restriction as a
100 * way to remove any ambiguity from the host context.
101 */
102 if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) {
103 /*
104 * If the hypervisor is the source, this is
105 * host local communication. The hypervisor
106 * may send vmci event datagrams to the host
107 * itself, but it will never send datagrams to
108 * an "outer host" through the guest device.
109 */
110
111 if (has_host_device) {
112 *route = VMCI_ROUTE_AS_HOST;
113 return VMCI_SUCCESS;
114 } else {
115 return VMCI_ERROR_DEVICE_NOT_FOUND;
116 }
117 }
118
119 if (!from_guest && has_guest_device) {
120 /* If no source context then use the current. */
121 if (VMCI_INVALID_ID == src->context)
122 src->context = vmci_get_context_id();
123
124 /* Send it from local client down to the host. */
125 *route = VMCI_ROUTE_AS_GUEST;
126 return VMCI_SUCCESS;
127 }
128
129 /*
130 * Otherwise we already received it from a guest and
131 * it is destined for a local client on this host, or
132 * it is from another local client on this host. We
133 * must be acting as a host to service it.
134 */
135 if (!has_host_device)
136 return VMCI_ERROR_DEVICE_NOT_FOUND;
137
138 if (VMCI_INVALID_ID == src->context) {
139 /*
140 * If it came from a guest then it must have a
141 * valid context. Otherwise we can use the
142 * host context.
143 */
144 if (from_guest)
145 return VMCI_ERROR_INVALID_ARGS;
146
147 src->context = VMCI_HOST_CONTEXT_ID;
148 }
149
150 /* Route to local client. */
151 *route = VMCI_ROUTE_AS_HOST;
152 return VMCI_SUCCESS;
153 }
154
155 /*
156 * If we are acting as a host then this might be destined for
157 * a guest.
158 */
159 if (has_host_device) {
160 /* It will have a context if it is meant for a guest. */
161 if (vmci_ctx_exists(dst->context)) {
162 if (VMCI_INVALID_ID == src->context) {
163 /*
164 * If it came from a guest then it
165 * must have a valid context.
166 * Otherwise we can use the host
167 * context.
168 */
169
170 if (from_guest)
171 return VMCI_ERROR_INVALID_ARGS;
172
173 src->context = VMCI_HOST_CONTEXT_ID;
174 } else if (VMCI_CONTEXT_IS_VM(src->context) &&
175 src->context != dst->context) {
176 /*
177 * VM to VM communication is not
178 * allowed. Since we catch all
179 * communication destined for the host
180 * above, this must be destined for a
181 * VM since there is a valid context.
182 */
183
184 return VMCI_ERROR_DST_UNREACHABLE;
185 }
186
187 /* Pass it up to the guest. */
188 *route = VMCI_ROUTE_AS_HOST;
189 return VMCI_SUCCESS;
190 } else if (!has_guest_device) {
191 /*
192 * The host is attempting to reach a CID
193 * without an active context, and we can't
194 * send it down, since we have no guest
195 * device.
196 */
197
198 return VMCI_ERROR_DST_UNREACHABLE;
199 }
200 }
201
202 /*
203 * We must be a guest trying to send to another guest, which means
204 * we need to send it down to the host. We do not filter out VM to
205 * VM communication here, since we want to be able to use the guest
206 * driver on older versions that do support VM to VM communication.
207 */
208 if (!has_guest_device) {
209 /*
210 * Ending up here means we have neither guest nor host
211 * device.
212 */
213 return VMCI_ERROR_DEVICE_NOT_FOUND;
214 }
215
216 /* If no source context then use the current context. */
217 if (VMCI_INVALID_ID == src->context)
218 src->context = vmci_get_context_id();
219
220 /*
221 * Send it from local client down to the host, which will
222 * route it to the other guest for us.
223 */
224 *route = VMCI_ROUTE_AS_GUEST;
225 return VMCI_SUCCESS;
226}
diff --git a/drivers/misc/vmw_vmci/vmci_route.h b/drivers/misc/vmw_vmci/vmci_route.h
new file mode 100644
index 000000000000..3b30e82419c3
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.h
@@ -0,0 +1,30 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMCI_ROUTE_H_
17#define _VMCI_ROUTE_H_
18
19#include <linux/vmw_vmci_defs.h>
20
21enum vmci_route {
22 VMCI_ROUTE_NONE,
23 VMCI_ROUTE_AS_HOST,
24 VMCI_ROUTE_AS_GUEST,
25};
26
27int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst,
28 bool from_guest, enum vmci_route *route);
29
30#endif /* _VMCI_ROUTE_H_ */
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index cc8a8fad455a..3be8b94d7914 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -457,7 +457,7 @@ config MMC_SDHI
457 457
458config MMC_CB710 458config MMC_CB710
459 tristate "ENE CB710 MMC/SD Interface support" 459 tristate "ENE CB710 MMC/SD Interface support"
460 depends on PCI 460 depends on PCI && GENERIC_HARDIRQS
461 select CB710_CORE 461 select CB710_CORE
462 help 462 help
463 This option enables support for MMC/SD part of ENE CB710/720 Flash 463 This option enables support for MMC/SD part of ENE CB710/720 Flash
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2334190ff8d2..56c2d75a63d4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -188,6 +188,10 @@ config NETPOLL_TRAP
188config NET_POLL_CONTROLLER 188config NET_POLL_CONTROLLER
189 def_bool NETPOLL 189 def_bool NETPOLL
190 190
191config NTB_NETDEV
192 tristate "Virtual Ethernet over NTB"
193 depends on NTB
194
191config RIONET 195config RIONET
192 tristate "RapidIO Ethernet over messaging driver support" 196 tristate "RapidIO Ethernet over messaging driver support"
193 depends on RAPIDIO 197 depends on RAPIDIO
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 335db78fd987..ef3d090efedf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -71,3 +71,4 @@ obj-$(CONFIG_USB_IPHETH) += usb/
71obj-$(CONFIG_USB_CDC_PHONET) += usb/ 71obj-$(CONFIG_USB_CDC_PHONET) += usb/
72 72
73obj-$(CONFIG_HYPERV_NET) += hyperv/ 73obj-$(CONFIG_HYPERV_NET) += hyperv/
74obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d5202a4b0877..5f85205cd12b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -498,8 +498,7 @@ static int netvsc_remove(struct hv_device *dev)
498 498
499static const struct hv_vmbus_device_id id_table[] = { 499static const struct hv_vmbus_device_id id_table[] = {
500 /* Network guid */ 500 /* Network guid */
501 { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46, 501 { HV_NIC_GUID, },
502 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
503 { }, 502 { },
504}; 503};
505 504
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
new file mode 100644
index 000000000000..ed947dd76fbd
--- /dev/null
+++ b/drivers/net/ntb_netdev.c
@@ -0,0 +1,408 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Network Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48#include <linux/etherdevice.h>
49#include <linux/ethtool.h>
50#include <linux/module.h>
51#include <linux/pci.h>
52#include <linux/ntb.h>
53
54#define NTB_NETDEV_VER "0.7"
55
56MODULE_DESCRIPTION(KBUILD_MODNAME);
57MODULE_VERSION(NTB_NETDEV_VER);
58MODULE_LICENSE("Dual BSD/GPL");
59MODULE_AUTHOR("Intel Corporation");
60
61struct ntb_netdev {
62 struct list_head list;
63 struct pci_dev *pdev;
64 struct net_device *ndev;
65 struct ntb_transport_qp *qp;
66};
67
68#define NTB_TX_TIMEOUT_MS 1000
69#define NTB_RXQ_SIZE 100
70
71static LIST_HEAD(dev_list);
72
73static void ntb_netdev_event_handler(void *data, int status)
74{
75 struct net_device *ndev = data;
76 struct ntb_netdev *dev = netdev_priv(ndev);
77
78 netdev_dbg(ndev, "Event %x, Link %x\n", status,
79 ntb_transport_link_query(dev->qp));
80
81 /* Currently, only link status event is supported */
82 if (status)
83 netif_carrier_on(ndev);
84 else
85 netif_carrier_off(ndev);
86}
87
88static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
89 void *data, int len)
90{
91 struct net_device *ndev = qp_data;
92 struct sk_buff *skb;
93 int rc;
94
95 skb = data;
96 if (!skb)
97 return;
98
99 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
100
101 skb_put(skb, len);
102 skb->protocol = eth_type_trans(skb, ndev);
103 skb->ip_summed = CHECKSUM_NONE;
104
105 if (netif_rx(skb) == NET_RX_DROP) {
106 ndev->stats.rx_errors++;
107 ndev->stats.rx_dropped++;
108 } else {
109 ndev->stats.rx_packets++;
110 ndev->stats.rx_bytes += len;
111 }
112
113 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
114 if (!skb) {
115 ndev->stats.rx_errors++;
116 ndev->stats.rx_frame_errors++;
117 return;
118 }
119
120 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
121 if (rc) {
122 dev_kfree_skb(skb);
123 ndev->stats.rx_errors++;
124 ndev->stats.rx_fifo_errors++;
125 }
126}
127
128static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
129 void *data, int len)
130{
131 struct net_device *ndev = qp_data;
132 struct sk_buff *skb;
133
134 skb = data;
135 if (!skb || !ndev)
136 return;
137
138 if (len > 0) {
139 ndev->stats.tx_packets++;
140 ndev->stats.tx_bytes += skb->len;
141 } else {
142 ndev->stats.tx_errors++;
143 ndev->stats.tx_aborted_errors++;
144 }
145
146 dev_kfree_skb(skb);
147}
148
149static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
150 struct net_device *ndev)
151{
152 struct ntb_netdev *dev = netdev_priv(ndev);
153 int rc;
154
155 netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
156
157 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
158 if (rc)
159 goto err;
160
161 return NETDEV_TX_OK;
162
163err:
164 ndev->stats.tx_dropped++;
165 ndev->stats.tx_errors++;
166 return NETDEV_TX_BUSY;
167}
168
169static int ntb_netdev_open(struct net_device *ndev)
170{
171 struct ntb_netdev *dev = netdev_priv(ndev);
172 struct sk_buff *skb;
173 int rc, i, len;
174
175 /* Add some empty rx bufs */
176 for (i = 0; i < NTB_RXQ_SIZE; i++) {
177 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
178 if (!skb) {
179 rc = -ENOMEM;
180 goto err;
181 }
182
183 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
184 ndev->mtu + ETH_HLEN);
185 if (rc == -EINVAL)
186 goto err;
187 }
188
189 netif_carrier_off(ndev);
190 ntb_transport_link_up(dev->qp);
191
192 return 0;
193
194err:
195 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
196 dev_kfree_skb(skb);
197 return rc;
198}
199
200static int ntb_netdev_close(struct net_device *ndev)
201{
202 struct ntb_netdev *dev = netdev_priv(ndev);
203 struct sk_buff *skb;
204 int len;
205
206 ntb_transport_link_down(dev->qp);
207
208 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
209 dev_kfree_skb(skb);
210
211 return 0;
212}
213
214static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
215{
216 struct ntb_netdev *dev = netdev_priv(ndev);
217 struct sk_buff *skb;
218 int len, rc;
219
220 if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
221 return -EINVAL;
222
223 if (!netif_running(ndev)) {
224 ndev->mtu = new_mtu;
225 return 0;
226 }
227
228 /* Bring down the link and dispose of posted rx entries */
229 ntb_transport_link_down(dev->qp);
230
231 if (ndev->mtu < new_mtu) {
232 int i;
233
234 for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
235 dev_kfree_skb(skb);
236
237 for (; i; i--) {
238 skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
239 if (!skb) {
240 rc = -ENOMEM;
241 goto err;
242 }
243
244 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
245 new_mtu + ETH_HLEN);
246 if (rc) {
247 dev_kfree_skb(skb);
248 goto err;
249 }
250 }
251 }
252
253 ndev->mtu = new_mtu;
254
255 ntb_transport_link_up(dev->qp);
256
257 return 0;
258
259err:
260 ntb_transport_link_down(dev->qp);
261
262 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
263 dev_kfree_skb(skb);
264
265 netdev_err(ndev, "Error changing MTU, device inoperable\n");
266 return rc;
267}
268
269static const struct net_device_ops ntb_netdev_ops = {
270 .ndo_open = ntb_netdev_open,
271 .ndo_stop = ntb_netdev_close,
272 .ndo_start_xmit = ntb_netdev_start_xmit,
273 .ndo_change_mtu = ntb_netdev_change_mtu,
274 .ndo_set_mac_address = eth_mac_addr,
275};
276
277static void ntb_get_drvinfo(struct net_device *ndev,
278 struct ethtool_drvinfo *info)
279{
280 struct ntb_netdev *dev = netdev_priv(ndev);
281
282 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
283 strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
284 strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
285}
286
287static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
288{
289 cmd->supported = SUPPORTED_Backplane;
290 cmd->advertising = ADVERTISED_Backplane;
291 cmd->speed = SPEED_UNKNOWN;
292 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
293 cmd->duplex = DUPLEX_FULL;
294 cmd->port = PORT_OTHER;
295 cmd->phy_address = 0;
296 cmd->transceiver = XCVR_DUMMY1;
297 cmd->autoneg = AUTONEG_ENABLE;
298 cmd->maxtxpkt = 0;
299 cmd->maxrxpkt = 0;
300
301 return 0;
302}
303
304static const struct ethtool_ops ntb_ethtool_ops = {
305 .get_drvinfo = ntb_get_drvinfo,
306 .get_link = ethtool_op_get_link,
307 .get_settings = ntb_get_settings,
308};
309
310static const struct ntb_queue_handlers ntb_netdev_handlers = {
311 .tx_handler = ntb_netdev_tx_handler,
312 .rx_handler = ntb_netdev_rx_handler,
313 .event_handler = ntb_netdev_event_handler,
314};
315
316static int ntb_netdev_probe(struct pci_dev *pdev)
317{
318 struct net_device *ndev;
319 struct ntb_netdev *dev;
320 int rc;
321
322 ndev = alloc_etherdev(sizeof(struct ntb_netdev));
323 if (!ndev)
324 return -ENOMEM;
325
326 dev = netdev_priv(ndev);
327 dev->ndev = ndev;
328 dev->pdev = pdev;
329 BUG_ON(!dev->pdev);
330 ndev->features = NETIF_F_HIGHDMA;
331
332 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
333
334 ndev->hw_features = ndev->features;
335 ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
336
337 random_ether_addr(ndev->perm_addr);
338 memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
339
340 ndev->netdev_ops = &ntb_netdev_ops;
341 SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
342
343 dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
344 if (!dev->qp) {
345 rc = -EIO;
346 goto err;
347 }
348
349 ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
350
351 rc = register_netdev(ndev);
352 if (rc)
353 goto err1;
354
355 list_add(&dev->list, &dev_list);
356 dev_info(&pdev->dev, "%s created\n", ndev->name);
357 return 0;
358
359err1:
360 ntb_transport_free_queue(dev->qp);
361err:
362 free_netdev(ndev);
363 return rc;
364}
365
366static void ntb_netdev_remove(struct pci_dev *pdev)
367{
368 struct net_device *ndev;
369 struct ntb_netdev *dev;
370
371 list_for_each_entry(dev, &dev_list, list) {
372 if (dev->pdev == pdev)
373 break;
374 }
375 if (dev == NULL)
376 return;
377
378 ndev = dev->ndev;
379
380 unregister_netdev(ndev);
381 ntb_transport_free_queue(dev->qp);
382 free_netdev(ndev);
383}
384
385static struct ntb_client ntb_netdev_client = {
386 .driver.name = KBUILD_MODNAME,
387 .driver.owner = THIS_MODULE,
388 .probe = ntb_netdev_probe,
389 .remove = ntb_netdev_remove,
390};
391
392static int __init ntb_netdev_init_module(void)
393{
394 int rc;
395
396 rc = ntb_register_client_dev(KBUILD_MODNAME);
397 if (rc)
398 return rc;
399 return ntb_register_client(&ntb_netdev_client);
400}
401module_init(ntb_netdev_init_module);
402
403static void __exit ntb_netdev_exit_module(void)
404{
405 ntb_unregister_client(&ntb_netdev_client);
406 ntb_unregister_client_dev(KBUILD_MODNAME);
407}
408module_exit(ntb_netdev_exit_module);
diff --git a/drivers/ntb/Kconfig b/drivers/ntb/Kconfig
new file mode 100644
index 000000000000..37ee6495acc1
--- /dev/null
+++ b/drivers/ntb/Kconfig
@@ -0,0 +1,13 @@
1config NTB
2 tristate "Intel Non-Transparent Bridge support"
3 depends on PCI
4 depends on X86_64
5 help
6 The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
7 connecting 2 systems. When configured, writes to the device's PCI
8 mapped memory will be mirrored to a buffer on the remote system. The
9 ntb Linux driver uses this point-to-point communication as a method to
10 transfer data from one system to the other.
11
12 If unsure, say N.
13
diff --git a/drivers/ntb/Makefile b/drivers/ntb/Makefile
new file mode 100644
index 000000000000..15cb59fd354e
--- /dev/null
+++ b/drivers/ntb/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_NTB) += ntb.o
2
3ntb-objs := ntb_hw.o ntb_transport.o
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
new file mode 100644
index 000000000000..f802e7c92356
--- /dev/null
+++ b/drivers/ntb/ntb_hw.c
@@ -0,0 +1,1141 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48#include <linux/debugfs.h>
49#include <linux/init.h>
50#include <linux/interrupt.h>
51#include <linux/module.h>
52#include <linux/pci.h>
53#include <linux/slab.h>
54#include "ntb_hw.h"
55#include "ntb_regs.h"
56
57#define NTB_NAME "Intel(R) PCI-E Non-Transparent Bridge Driver"
58#define NTB_VER "0.25"
59
60MODULE_DESCRIPTION(NTB_NAME);
61MODULE_VERSION(NTB_VER);
62MODULE_LICENSE("Dual BSD/GPL");
63MODULE_AUTHOR("Intel Corporation");
64
65enum {
66 NTB_CONN_CLASSIC = 0,
67 NTB_CONN_B2B,
68 NTB_CONN_RP,
69};
70
71enum {
72 NTB_DEV_USD = 0,
73 NTB_DEV_DSD,
74};
75
76enum {
77 SNB_HW = 0,
78 BWD_HW,
79};
80
81/* Translate memory window 0,1 to BAR 2,4 */
82#define MW_TO_BAR(mw) (mw * 2 + 2)
83
84static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = {
85 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
86 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
87 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF)},
88 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_JSF)},
89 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_SNB)},
90 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
91 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB)},
92 {0}
93};
94MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
95
96/**
97 * ntb_register_event_callback() - register event callback
98 * @ndev: pointer to ntb_device instance
99 * @func: callback function to register
100 *
101 * This function registers a callback for any HW driver events such as link
102 * up/down, power management notices and etc.
103 *
104 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
105 */
106int ntb_register_event_callback(struct ntb_device *ndev,
107 void (*func)(void *handle, enum ntb_hw_event event))
108{
109 if (ndev->event_cb)
110 return -EINVAL;
111
112 ndev->event_cb = func;
113
114 return 0;
115}
116
117/**
118 * ntb_unregister_event_callback() - unregisters the event callback
119 * @ndev: pointer to ntb_device instance
120 *
121 * This function unregisters the existing callback from transport
122 */
123void ntb_unregister_event_callback(struct ntb_device *ndev)
124{
125 ndev->event_cb = NULL;
126}
127
128/**
129 * ntb_register_db_callback() - register a callback for doorbell interrupt
130 * @ndev: pointer to ntb_device instance
131 * @idx: doorbell index to register callback, zero based
132 * @func: callback function to register
133 *
134 * This function registers a callback function for the doorbell interrupt
135 * on the primary side. The function will unmask the doorbell as well to
136 * allow interrupt.
137 *
138 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
139 */
140int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
141 void *data, void (*func)(void *data, int db_num))
142{
143 unsigned long mask;
144
145 if (idx >= ndev->max_cbs || ndev->db_cb[idx].callback) {
146 dev_warn(&ndev->pdev->dev, "Invalid Index.\n");
147 return -EINVAL;
148 }
149
150 ndev->db_cb[idx].callback = func;
151 ndev->db_cb[idx].data = data;
152
153 /* unmask interrupt */
154 mask = readw(ndev->reg_ofs.pdb_mask);
155 clear_bit(idx * ndev->bits_per_vector, &mask);
156 writew(mask, ndev->reg_ofs.pdb_mask);
157
158 return 0;
159}
160
161/**
162 * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt
163 * @ndev: pointer to ntb_device instance
164 * @idx: doorbell index to register callback, zero based
165 *
166 * This function unregisters a callback function for the doorbell interrupt
167 * on the primary side. The function will also mask the said doorbell.
168 */
169void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
170{
171 unsigned long mask;
172
173 if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback)
174 return;
175
176 mask = readw(ndev->reg_ofs.pdb_mask);
177 set_bit(idx * ndev->bits_per_vector, &mask);
178 writew(mask, ndev->reg_ofs.pdb_mask);
179
180 ndev->db_cb[idx].callback = NULL;
181}
182
183/**
184 * ntb_find_transport() - find the transport pointer
185 * @transport: pointer to pci device
186 *
187 * Given the pci device pointer, return the transport pointer passed in when
188 * the transport attached when it was inited.
189 *
190 * RETURNS: pointer to transport.
191 */
192void *ntb_find_transport(struct pci_dev *pdev)
193{
194 struct ntb_device *ndev = pci_get_drvdata(pdev);
195 return ndev->ntb_transport;
196}
197
198/**
199 * ntb_register_transport() - Register NTB transport with NTB HW driver
200 * @transport: transport identifier
201 *
202 * This function allows a transport to reserve the hardware driver for
203 * NTB usage.
204 *
205 * RETURNS: pointer to ntb_device, NULL on error.
206 */
207struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void *transport)
208{
209 struct ntb_device *ndev = pci_get_drvdata(pdev);
210
211 if (ndev->ntb_transport)
212 return NULL;
213
214 ndev->ntb_transport = transport;
215 return ndev;
216}
217
218/**
219 * ntb_unregister_transport() - Unregister the transport with the NTB HW driver
220 * @ndev - ntb_device of the transport to be freed
221 *
222 * This function unregisters the transport from the HW driver and performs any
223 * necessary cleanups.
224 */
225void ntb_unregister_transport(struct ntb_device *ndev)
226{
227 int i;
228
229 if (!ndev->ntb_transport)
230 return;
231
232 for (i = 0; i < ndev->max_cbs; i++)
233 ntb_unregister_db_callback(ndev, i);
234
235 ntb_unregister_event_callback(ndev);
236 ndev->ntb_transport = NULL;
237}
238
239/**
240 * ntb_write_local_spad() - write to the secondary scratchpad register
241 * @ndev: pointer to ntb_device instance
242 * @idx: index to the scratchpad register, 0 based
243 * @val: the data value to put into the register
244 *
245 * This function allows writing of a 32bit value to the indexed scratchpad
246 * register. This writes over the data mirrored to the local scratchpad register
247 * by the remote system.
248 *
249 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
250 */
251int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
252{
253 if (idx >= ndev->limits.max_spads)
254 return -EINVAL;
255
256 dev_dbg(&ndev->pdev->dev, "Writing %x to local scratch pad index %d\n",
257 val, idx);
258 writel(val, ndev->reg_ofs.spad_read + idx * 4);
259
260 return 0;
261}
262
263/**
264 * ntb_read_local_spad() - read from the primary scratchpad register
265 * @ndev: pointer to ntb_device instance
266 * @idx: index to scratchpad register, 0 based
267 * @val: pointer to 32bit integer for storing the register value
268 *
269 * This function allows reading of the 32bit scratchpad register on
270 * the primary (internal) side. This allows the local system to read data
271 * written and mirrored to the scratchpad register by the remote system.
272 *
273 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
274 */
275int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
276{
277 if (idx >= ndev->limits.max_spads)
278 return -EINVAL;
279
280 *val = readl(ndev->reg_ofs.spad_write + idx * 4);
281 dev_dbg(&ndev->pdev->dev,
282 "Reading %x from local scratch pad index %d\n", *val, idx);
283
284 return 0;
285}
286
287/**
288 * ntb_write_remote_spad() - write to the secondary scratchpad register
289 * @ndev: pointer to ntb_device instance
290 * @idx: index to the scratchpad register, 0 based
291 * @val: the data value to put into the register
292 *
293 * This function allows writing of a 32bit value to the indexed scratchpad
294 * register. The register resides on the secondary (external) side. This allows
295 * the local system to write data to be mirrored to the remote systems
296 * scratchpad register.
297 *
298 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
299 */
300int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
301{
302 if (idx >= ndev->limits.max_spads)
303 return -EINVAL;
304
305 dev_dbg(&ndev->pdev->dev, "Writing %x to remote scratch pad index %d\n",
306 val, idx);
307 writel(val, ndev->reg_ofs.spad_write + idx * 4);
308
309 return 0;
310}
311
312/**
313 * ntb_read_remote_spad() - read from the primary scratchpad register
314 * @ndev: pointer to ntb_device instance
315 * @idx: index to scratchpad register, 0 based
316 * @val: pointer to 32bit integer for storing the register value
317 *
318 * This function allows reading of the 32bit scratchpad register on
319 * the primary (internal) side. This alloows the local system to read the data
320 * it wrote to be mirrored on the remote system.
321 *
322 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
323 */
324int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
325{
326 if (idx >= ndev->limits.max_spads)
327 return -EINVAL;
328
329 *val = readl(ndev->reg_ofs.spad_read + idx * 4);
330 dev_dbg(&ndev->pdev->dev,
331 "Reading %x from remote scratch pad index %d\n", *val, idx);
332
333 return 0;
334}
335
336/**
337 * ntb_get_mw_vbase() - get virtual addr for the NTB memory window
338 * @ndev: pointer to ntb_device instance
339 * @mw: memory window number
340 *
341 * This function provides the base virtual address of the memory window
342 * specified.
343 *
344 * RETURNS: pointer to virtual address, or NULL on error.
345 */
346void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
347{
348 if (mw > NTB_NUM_MW)
349 return NULL;
350
351 return ndev->mw[mw].vbase;
352}
353
354/**
355 * ntb_get_mw_size() - return size of NTB memory window
356 * @ndev: pointer to ntb_device instance
357 * @mw: memory window number
358 *
359 * This function provides the physical size of the memory window specified
360 *
361 * RETURNS: the size of the memory window or zero on error
362 */
363resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
364{
365 if (mw > NTB_NUM_MW)
366 return 0;
367
368 return ndev->mw[mw].bar_sz;
369}
370
371/**
372 * ntb_set_mw_addr - set the memory window address
373 * @ndev: pointer to ntb_device instance
374 * @mw: memory window number
375 * @addr: base address for data
376 *
377 * This function sets the base physical address of the memory window. This
378 * memory address is where data from the remote system will be transfered into
379 * or out of depending on how the transport is configured.
380 */
381void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
382{
383 if (mw > NTB_NUM_MW)
384 return;
385
386 dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
387 MW_TO_BAR(mw));
388
389 ndev->mw[mw].phys_addr = addr;
390
391 switch (MW_TO_BAR(mw)) {
392 case NTB_BAR_23:
393 writeq(addr, ndev->reg_ofs.sbar2_xlat);
394 break;
395 case NTB_BAR_45:
396 writeq(addr, ndev->reg_ofs.sbar4_xlat);
397 break;
398 }
399}
400
401/**
402 * ntb_ring_sdb() - Set the doorbell on the secondary/external side
403 * @ndev: pointer to ntb_device instance
404 * @db: doorbell to ring
405 *
406 * This function allows triggering of a doorbell on the secondary/external
407 * side that will initiate an interrupt on the remote host
408 *
409 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
410 */
411void ntb_ring_sdb(struct ntb_device *ndev, unsigned int db)
412{
413 dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db);
414
415 if (ndev->hw_type == BWD_HW)
416 writeq((u64) 1 << db, ndev->reg_ofs.sdb);
417 else
418 writew(((1 << ndev->bits_per_vector) - 1) <<
419 (db * ndev->bits_per_vector), ndev->reg_ofs.sdb);
420}
421
422static void ntb_link_event(struct ntb_device *ndev, int link_state)
423{
424 unsigned int event;
425
426 if (ndev->link_status == link_state)
427 return;
428
429 if (link_state == NTB_LINK_UP) {
430 u16 status;
431
432 dev_info(&ndev->pdev->dev, "Link Up\n");
433 ndev->link_status = NTB_LINK_UP;
434 event = NTB_EVENT_HW_LINK_UP;
435
436 if (ndev->hw_type == BWD_HW)
437 status = readw(ndev->reg_ofs.lnk_stat);
438 else {
439 int rc = pci_read_config_word(ndev->pdev,
440 SNB_LINK_STATUS_OFFSET,
441 &status);
442 if (rc)
443 return;
444 }
445 dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n",
446 (status & NTB_LINK_WIDTH_MASK) >> 4,
447 (status & NTB_LINK_SPEED_MASK));
448 } else {
449 dev_info(&ndev->pdev->dev, "Link Down\n");
450 ndev->link_status = NTB_LINK_DOWN;
451 event = NTB_EVENT_HW_LINK_DOWN;
452 }
453
454 /* notify the upper layer if we have an event change */
455 if (ndev->event_cb)
456 ndev->event_cb(ndev->ntb_transport, event);
457}
458
459static int ntb_link_status(struct ntb_device *ndev)
460{
461 int link_state;
462
463 if (ndev->hw_type == BWD_HW) {
464 u32 ntb_cntl;
465
466 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
467 if (ntb_cntl & BWD_CNTL_LINK_DOWN)
468 link_state = NTB_LINK_DOWN;
469 else
470 link_state = NTB_LINK_UP;
471 } else {
472 u16 status;
473 int rc;
474
475 rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
476 &status);
477 if (rc)
478 return rc;
479
480 if (status & NTB_LINK_STATUS_ACTIVE)
481 link_state = NTB_LINK_UP;
482 else
483 link_state = NTB_LINK_DOWN;
484 }
485
486 ntb_link_event(ndev, link_state);
487
488 return 0;
489}
490
491/* BWD doesn't have link status interrupt, poll on that platform */
492static void bwd_link_poll(struct work_struct *work)
493{
494 struct ntb_device *ndev = container_of(work, struct ntb_device,
495 hb_timer.work);
496 unsigned long ts = jiffies;
497
498 /* If we haven't gotten an interrupt in a while, check the BWD link
499 * status bit
500 */
501 if (ts > ndev->last_ts + NTB_HB_TIMEOUT) {
502 int rc = ntb_link_status(ndev);
503 if (rc)
504 dev_err(&ndev->pdev->dev,
505 "Error determining link status\n");
506 }
507
508 schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
509}
510
511static int ntb_xeon_setup(struct ntb_device *ndev)
512{
513 int rc;
514 u8 val;
515
516 ndev->hw_type = SNB_HW;
517
518 rc = pci_read_config_byte(ndev->pdev, NTB_PPD_OFFSET, &val);
519 if (rc)
520 return rc;
521
522 switch (val & SNB_PPD_CONN_TYPE) {
523 case NTB_CONN_B2B:
524 ndev->conn_type = NTB_CONN_B2B;
525 break;
526 case NTB_CONN_CLASSIC:
527 case NTB_CONN_RP:
528 default:
529 dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
530 return -EINVAL;
531 }
532
533 if (val & SNB_PPD_DEV_TYPE)
534 ndev->dev_type = NTB_DEV_DSD;
535 else
536 ndev->dev_type = NTB_DEV_USD;
537
538 ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
539 ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
540 ndev->reg_ofs.sbar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
541 ndev->reg_ofs.sbar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
542 ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET;
543 ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_LINK_STATUS_OFFSET;
544 ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
545 ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
546
547 if (ndev->conn_type == NTB_CONN_B2B) {
548 ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET;
549 ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET;
550 ndev->limits.max_spads = SNB_MAX_SPADS;
551 } else {
552 ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
553 ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
554 ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS;
555 }
556
557 ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
558 ndev->limits.msix_cnt = SNB_MSIX_CNT;
559 ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
560
561 return 0;
562}
563
564static int ntb_bwd_setup(struct ntb_device *ndev)
565{
566 int rc;
567 u32 val;
568
569 ndev->hw_type = BWD_HW;
570
571 rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &val);
572 if (rc)
573 return rc;
574
575 switch ((val & BWD_PPD_CONN_TYPE) >> 8) {
576 case NTB_CONN_B2B:
577 ndev->conn_type = NTB_CONN_B2B;
578 break;
579 case NTB_CONN_RP:
580 default:
581 dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
582 return -EINVAL;
583 }
584
585 if (val & BWD_PPD_DEV_TYPE)
586 ndev->dev_type = NTB_DEV_DSD;
587 else
588 ndev->dev_type = NTB_DEV_USD;
589
590 /* Initiate PCI-E link training */
591 rc = pci_write_config_dword(ndev->pdev, NTB_PPD_OFFSET,
592 val | BWD_PPD_INIT_LINK);
593 if (rc)
594 return rc;
595
596 ndev->reg_ofs.pdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
597 ndev->reg_ofs.pdb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
598 ndev->reg_ofs.sbar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
599 ndev->reg_ofs.sbar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
600 ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET;
601 ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET;
602 ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET;
603 ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET;
604
605 if (ndev->conn_type == NTB_CONN_B2B) {
606 ndev->reg_ofs.sdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
607 ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
608 ndev->limits.max_spads = BWD_MAX_SPADS;
609 } else {
610 ndev->reg_ofs.sdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
611 ndev->reg_ofs.spad_write = ndev->reg_base + BWD_SPAD_OFFSET;
612 ndev->limits.max_spads = BWD_MAX_COMPAT_SPADS;
613 }
614
615 ndev->limits.max_db_bits = BWD_MAX_DB_BITS;
616 ndev->limits.msix_cnt = BWD_MSIX_CNT;
617 ndev->bits_per_vector = BWD_DB_BITS_PER_VEC;
618
619 /* Since bwd doesn't have a link interrupt, setup a poll timer */
620 INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll);
621 schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
622
623 return 0;
624}
625
626static int ntb_device_setup(struct ntb_device *ndev)
627{
628 int rc;
629
630 switch (ndev->pdev->device) {
631 case PCI_DEVICE_ID_INTEL_NTB_2ND_SNB:
632 case PCI_DEVICE_ID_INTEL_NTB_RP_JSF:
633 case PCI_DEVICE_ID_INTEL_NTB_RP_SNB:
634 case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF:
635 case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB:
636 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
637 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
638 rc = ntb_xeon_setup(ndev);
639 break;
640 case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
641 rc = ntb_bwd_setup(ndev);
642 break;
643 default:
644 rc = -ENODEV;
645 }
646
647 /* Enable Bus Master and Memory Space on the secondary side */
648 writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd);
649
650 return rc;
651}
652
653static void ntb_device_free(struct ntb_device *ndev)
654{
655 if (ndev->hw_type == BWD_HW)
656 cancel_delayed_work_sync(&ndev->hb_timer);
657}
658
659static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
660{
661 struct ntb_db_cb *db_cb = data;
662 struct ntb_device *ndev = db_cb->ndev;
663
664 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
665 db_cb->db_num);
666
667 if (db_cb->callback)
668 db_cb->callback(db_cb->data, db_cb->db_num);
669
670 /* No need to check for the specific HB irq, any interrupt means
671 * we're connected.
672 */
673 ndev->last_ts = jiffies;
674
675 writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.pdb);
676
677 return IRQ_HANDLED;
678}
679
680static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
681{
682 struct ntb_db_cb *db_cb = data;
683 struct ntb_device *ndev = db_cb->ndev;
684
685 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
686 db_cb->db_num);
687
688 if (db_cb->callback)
689 db_cb->callback(db_cb->data, db_cb->db_num);
690
691 /* On Sandybridge, there are 16 bits in the interrupt register
692 * but only 4 vectors. So, 5 bits are assigned to the first 3
693 * vectors, with the 4th having a single bit for link
694 * interrupts.
695 */
696 writew(((1 << ndev->bits_per_vector) - 1) <<
697 (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.pdb);
698
699 return IRQ_HANDLED;
700}
701
702/* Since we do not have a HW doorbell in BWD, this is only used in JF/JT */
703static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
704{
705 struct ntb_device *ndev = dev;
706 int rc;
707
708 dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for Events\n", irq);
709
710 rc = ntb_link_status(ndev);
711 if (rc)
712 dev_err(&ndev->pdev->dev, "Error determining link status\n");
713
714 /* bit 15 is always the link bit */
715 writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.pdb);
716
717 return IRQ_HANDLED;
718}
719
720static irqreturn_t ntb_interrupt(int irq, void *dev)
721{
722 struct ntb_device *ndev = dev;
723 unsigned int i = 0;
724
725 if (ndev->hw_type == BWD_HW) {
726 u64 pdb = readq(ndev->reg_ofs.pdb);
727
728 dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %Lx\n", irq, pdb);
729
730 while (pdb) {
731 i = __ffs(pdb);
732 pdb &= pdb - 1;
733 bwd_callback_msix_irq(irq, &ndev->db_cb[i]);
734 }
735 } else {
736 u16 pdb = readw(ndev->reg_ofs.pdb);
737
738 dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %x sdb %x\n", irq,
739 pdb, readw(ndev->reg_ofs.sdb));
740
741 if (pdb & SNB_DB_HW_LINK) {
742 xeon_event_msix_irq(irq, dev);
743 pdb &= ~SNB_DB_HW_LINK;
744 }
745
746 while (pdb) {
747 i = __ffs(pdb);
748 pdb &= pdb - 1;
749 xeon_callback_msix_irq(irq, &ndev->db_cb[i]);
750 }
751 }
752
753 return IRQ_HANDLED;
754}
755
756static int ntb_setup_msix(struct ntb_device *ndev)
757{
758 struct pci_dev *pdev = ndev->pdev;
759 struct msix_entry *msix;
760 int msix_entries;
761 int rc, i, pos;
762 u16 val;
763
764 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
765 if (!pos) {
766 rc = -EIO;
767 goto err;
768 }
769
770 rc = pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &val);
771 if (rc)
772 goto err;
773
774 msix_entries = msix_table_size(val);
775 if (msix_entries > ndev->limits.msix_cnt) {
776 rc = -EINVAL;
777 goto err;
778 }
779
780 ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
781 GFP_KERNEL);
782 if (!ndev->msix_entries) {
783 rc = -ENOMEM;
784 goto err;
785 }
786
787 for (i = 0; i < msix_entries; i++)
788 ndev->msix_entries[i].entry = i;
789
790 rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
791 if (rc < 0)
792 goto err1;
793 if (rc > 0) {
794 /* On SNB, the link interrupt is always tied to 4th vector. If
795 * we can't get all 4, then we can't use MSI-X.
796 */
797 if (ndev->hw_type != BWD_HW) {
798 rc = -EIO;
799 goto err1;
800 }
801
802 dev_warn(&pdev->dev,
803 "Only %d MSI-X vectors. Limiting the number of queues to that number.\n",
804 rc);
805 msix_entries = rc;
806 }
807
808 for (i = 0; i < msix_entries; i++) {
809 msix = &ndev->msix_entries[i];
810 WARN_ON(!msix->vector);
811
812 /* Use the last MSI-X vector for Link status */
813 if (ndev->hw_type == BWD_HW) {
814 rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
815 "ntb-callback-msix", &ndev->db_cb[i]);
816 if (rc)
817 goto err2;
818 } else {
819 if (i == msix_entries - 1) {
820 rc = request_irq(msix->vector,
821 xeon_event_msix_irq, 0,
822 "ntb-event-msix", ndev);
823 if (rc)
824 goto err2;
825 } else {
826 rc = request_irq(msix->vector,
827 xeon_callback_msix_irq, 0,
828 "ntb-callback-msix",
829 &ndev->db_cb[i]);
830 if (rc)
831 goto err2;
832 }
833 }
834 }
835
836 ndev->num_msix = msix_entries;
837 if (ndev->hw_type == BWD_HW)
838 ndev->max_cbs = msix_entries;
839 else
840 ndev->max_cbs = msix_entries - 1;
841
842 return 0;
843
844err2:
845 while (--i >= 0) {
846 msix = &ndev->msix_entries[i];
847 if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
848 free_irq(msix->vector, ndev);
849 else
850 free_irq(msix->vector, &ndev->db_cb[i]);
851 }
852 pci_disable_msix(pdev);
853err1:
854 kfree(ndev->msix_entries);
855 dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
856err:
857 ndev->num_msix = 0;
858 return rc;
859}
860
861static int ntb_setup_msi(struct ntb_device *ndev)
862{
863 struct pci_dev *pdev = ndev->pdev;
864 int rc;
865
866 rc = pci_enable_msi(pdev);
867 if (rc)
868 return rc;
869
870 rc = request_irq(pdev->irq, ntb_interrupt, 0, "ntb-msi", ndev);
871 if (rc) {
872 pci_disable_msi(pdev);
873 dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
874 return rc;
875 }
876
877 return 0;
878}
879
880static int ntb_setup_intx(struct ntb_device *ndev)
881{
882 struct pci_dev *pdev = ndev->pdev;
883 int rc;
884
885 pci_msi_off(pdev);
886
887 /* Verify intx is enabled */
888 pci_intx(pdev, 1);
889
890 rc = request_irq(pdev->irq, ntb_interrupt, IRQF_SHARED, "ntb-intx",
891 ndev);
892 if (rc)
893 return rc;
894
895 return 0;
896}
897
898static int ntb_setup_interrupts(struct ntb_device *ndev)
899{
900 int rc;
901
902 /* On BWD, disable all interrupts. On SNB, disable all but Link
903 * Interrupt. The rest will be unmasked as callbacks are registered.
904 */
905 if (ndev->hw_type == BWD_HW)
906 writeq(~0, ndev->reg_ofs.pdb_mask);
907 else
908 writew(~(1 << ndev->limits.max_db_bits),
909 ndev->reg_ofs.pdb_mask);
910
911 rc = ntb_setup_msix(ndev);
912 if (!rc)
913 goto done;
914
915 ndev->bits_per_vector = 1;
916 ndev->max_cbs = ndev->limits.max_db_bits;
917
918 rc = ntb_setup_msi(ndev);
919 if (!rc)
920 goto done;
921
922 rc = ntb_setup_intx(ndev);
923 if (rc) {
924 dev_err(&ndev->pdev->dev, "no usable interrupts\n");
925 return rc;
926 }
927
928done:
929 return 0;
930}
931
932static void ntb_free_interrupts(struct ntb_device *ndev)
933{
934 struct pci_dev *pdev = ndev->pdev;
935
936 /* mask interrupts */
937 if (ndev->hw_type == BWD_HW)
938 writeq(~0, ndev->reg_ofs.pdb_mask);
939 else
940 writew(~0, ndev->reg_ofs.pdb_mask);
941
942 if (ndev->num_msix) {
943 struct msix_entry *msix;
944 u32 i;
945
946 for (i = 0; i < ndev->num_msix; i++) {
947 msix = &ndev->msix_entries[i];
948 if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
949 free_irq(msix->vector, ndev);
950 else
951 free_irq(msix->vector, &ndev->db_cb[i]);
952 }
953 pci_disable_msix(pdev);
954 } else {
955 free_irq(pdev->irq, ndev);
956
957 if (pci_dev_msi_enabled(pdev))
958 pci_disable_msi(pdev);
959 }
960}
961
962static int ntb_create_callbacks(struct ntb_device *ndev)
963{
964 int i;
965
966 /* Checken-egg issue. We won't know how many callbacks are necessary
967 * until we see how many MSI-X vectors we get, but these pointers need
968 * to be passed into the MSI-X register fucntion. So, we allocate the
969 * max, knowing that they might not all be used, to work around this.
970 */
971 ndev->db_cb = kcalloc(ndev->limits.max_db_bits,
972 sizeof(struct ntb_db_cb),
973 GFP_KERNEL);
974 if (!ndev->db_cb)
975 return -ENOMEM;
976
977 for (i = 0; i < ndev->limits.max_db_bits; i++) {
978 ndev->db_cb[i].db_num = i;
979 ndev->db_cb[i].ndev = ndev;
980 }
981
982 return 0;
983}
984
985static void ntb_free_callbacks(struct ntb_device *ndev)
986{
987 int i;
988
989 for (i = 0; i < ndev->limits.max_db_bits; i++)
990 ntb_unregister_db_callback(ndev, i);
991
992 kfree(ndev->db_cb);
993}
994
995static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
996{
997 struct ntb_device *ndev;
998 int rc, i;
999
1000 ndev = kzalloc(sizeof(struct ntb_device), GFP_KERNEL);
1001 if (!ndev)
1002 return -ENOMEM;
1003
1004 ndev->pdev = pdev;
1005 ndev->link_status = NTB_LINK_DOWN;
1006 pci_set_drvdata(pdev, ndev);
1007
1008 rc = pci_enable_device(pdev);
1009 if (rc)
1010 goto err;
1011
1012 pci_set_master(ndev->pdev);
1013
1014 rc = pci_request_selected_regions(pdev, NTB_BAR_MASK, KBUILD_MODNAME);
1015 if (rc)
1016 goto err1;
1017
1018 ndev->reg_base = pci_ioremap_bar(pdev, NTB_BAR_MMIO);
1019 if (!ndev->reg_base) {
1020 dev_warn(&pdev->dev, "Cannot remap BAR 0\n");
1021 rc = -EIO;
1022 goto err2;
1023 }
1024
1025 for (i = 0; i < NTB_NUM_MW; i++) {
1026 ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i));
1027 ndev->mw[i].vbase =
1028 ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
1029 ndev->mw[i].bar_sz);
1030 dev_info(&pdev->dev, "MW %d size %d\n", i,
1031 (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
1032 if (!ndev->mw[i].vbase) {
1033 dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
1034 MW_TO_BAR(i));
1035 rc = -EIO;
1036 goto err3;
1037 }
1038 }
1039
1040 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1041 if (rc) {
1042 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1043 if (rc)
1044 goto err3;
1045
1046 dev_warn(&pdev->dev, "Cannot DMA highmem\n");
1047 }
1048
1049 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1050 if (rc) {
1051 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1052 if (rc)
1053 goto err3;
1054
1055 dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
1056 }
1057
1058 rc = ntb_device_setup(ndev);
1059 if (rc)
1060 goto err3;
1061
1062 rc = ntb_create_callbacks(ndev);
1063 if (rc)
1064 goto err4;
1065
1066 rc = ntb_setup_interrupts(ndev);
1067 if (rc)
1068 goto err5;
1069
1070 /* The scratchpad registers keep the values between rmmod/insmod,
1071 * blast them now
1072 */
1073 for (i = 0; i < ndev->limits.max_spads; i++) {
1074 ntb_write_local_spad(ndev, i, 0);
1075 ntb_write_remote_spad(ndev, i, 0);
1076 }
1077
1078 rc = ntb_transport_init(pdev);
1079 if (rc)
1080 goto err6;
1081
1082 /* Let's bring the NTB link up */
1083 writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP,
1084 ndev->reg_ofs.lnk_cntl);
1085
1086 return 0;
1087
1088err6:
1089 ntb_free_interrupts(ndev);
1090err5:
1091 ntb_free_callbacks(ndev);
1092err4:
1093 ntb_device_free(ndev);
1094err3:
1095 for (i--; i >= 0; i--)
1096 iounmap(ndev->mw[i].vbase);
1097 iounmap(ndev->reg_base);
1098err2:
1099 pci_release_selected_regions(pdev, NTB_BAR_MASK);
1100err1:
1101 pci_disable_device(pdev);
1102err:
1103 kfree(ndev);
1104
1105 dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
1106 return rc;
1107}
1108
1109static void ntb_pci_remove(struct pci_dev *pdev)
1110{
1111 struct ntb_device *ndev = pci_get_drvdata(pdev);
1112 int i;
1113 u32 ntb_cntl;
1114
1115 /* Bring NTB link down */
1116 ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
1117 ntb_cntl |= NTB_LINK_DISABLE;
1118 writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
1119
1120 ntb_transport_free(ndev->ntb_transport);
1121
1122 ntb_free_interrupts(ndev);
1123 ntb_free_callbacks(ndev);
1124 ntb_device_free(ndev);
1125
1126 for (i = 0; i < NTB_NUM_MW; i++)
1127 iounmap(ndev->mw[i].vbase);
1128
1129 iounmap(ndev->reg_base);
1130 pci_release_selected_regions(pdev, NTB_BAR_MASK);
1131 pci_disable_device(pdev);
1132 kfree(ndev);
1133}
1134
1135static struct pci_driver ntb_pci_driver = {
1136 .name = KBUILD_MODNAME,
1137 .id_table = ntb_pci_tbl,
1138 .probe = ntb_pci_probe,
1139 .remove = ntb_pci_remove,
1140};
1141module_pci_driver(ntb_pci_driver);
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
new file mode 100644
index 000000000000..3a3038ca83e6
--- /dev/null
+++ b/drivers/ntb/ntb_hw.h
@@ -0,0 +1,181 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48
49#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
50#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF 0x3726
51#define PCI_DEVICE_ID_INTEL_NTB_RP_JSF 0x3727
52#define PCI_DEVICE_ID_INTEL_NTB_RP_SNB 0x3C08
53#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D
54#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB 0x3C0E
55#define PCI_DEVICE_ID_INTEL_NTB_2ND_SNB 0x3C0F
56#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
57
58#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
59
60#define NTB_BAR_MMIO 0
61#define NTB_BAR_23 2
62#define NTB_BAR_45 4
63#define NTB_BAR_MASK ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
64 (1 << NTB_BAR_45))
65
66#define NTB_LINK_DOWN 0
67#define NTB_LINK_UP 1
68
69#define NTB_HB_TIMEOUT msecs_to_jiffies(1000)
70
71#define NTB_NUM_MW 2
72
73enum ntb_hw_event {
74 NTB_EVENT_SW_EVENT0 = 0,
75 NTB_EVENT_SW_EVENT1,
76 NTB_EVENT_SW_EVENT2,
77 NTB_EVENT_HW_ERROR,
78 NTB_EVENT_HW_LINK_UP,
79 NTB_EVENT_HW_LINK_DOWN,
80};
81
82struct ntb_mw {
83 dma_addr_t phys_addr;
84 void __iomem *vbase;
85 resource_size_t bar_sz;
86};
87
88struct ntb_db_cb {
89 void (*callback) (void *data, int db_num);
90 unsigned int db_num;
91 void *data;
92 struct ntb_device *ndev;
93};
94
95struct ntb_device {
96 struct pci_dev *pdev;
97 struct msix_entry *msix_entries;
98 void __iomem *reg_base;
99 struct ntb_mw mw[NTB_NUM_MW];
100 struct {
101 unsigned int max_spads;
102 unsigned int max_db_bits;
103 unsigned int msix_cnt;
104 } limits;
105 struct {
106 void __iomem *pdb;
107 void __iomem *pdb_mask;
108 void __iomem *sdb;
109 void __iomem *sbar2_xlat;
110 void __iomem *sbar4_xlat;
111 void __iomem *spad_write;
112 void __iomem *spad_read;
113 void __iomem *lnk_cntl;
114 void __iomem *lnk_stat;
115 void __iomem *spci_cmd;
116 } reg_ofs;
117 struct ntb_transport *ntb_transport;
118 void (*event_cb)(void *handle, enum ntb_hw_event event);
119
120 struct ntb_db_cb *db_cb;
121 unsigned char hw_type;
122 unsigned char conn_type;
123 unsigned char dev_type;
124 unsigned char num_msix;
125 unsigned char bits_per_vector;
126 unsigned char max_cbs;
127 unsigned char link_status;
128 struct delayed_work hb_timer;
129 unsigned long last_ts;
130};
131
132/**
133 * ntb_hw_link_status() - return the hardware link status
134 * @ndev: pointer to ntb_device instance
135 *
136 * Returns true if the hardware is connected to the remote system
137 *
138 * RETURNS: true or false based on the hardware link state
139 */
140static inline bool ntb_hw_link_status(struct ntb_device *ndev)
141{
142 return ndev->link_status == NTB_LINK_UP;
143}
144
145/**
146 * ntb_query_pdev() - return the pci_dev pointer
147 * @ndev: pointer to ntb_device instance
148 *
149 * Given the ntb pointer return the pci_dev pointerfor the NTB hardware device
150 *
151 * RETURNS: a pointer to the ntb pci_dev
152 */
153static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
154{
155 return ndev->pdev;
156}
157
158struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
159 void *transport);
160void ntb_unregister_transport(struct ntb_device *ndev);
161void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
162int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
163 void *data, void (*db_cb_func) (void *data,
164 int db_num));
165void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
166int ntb_register_event_callback(struct ntb_device *ndev,
167 void (*event_cb_func) (void *handle,
168 enum ntb_hw_event event));
169void ntb_unregister_event_callback(struct ntb_device *ndev);
170int ntb_get_max_spads(struct ntb_device *ndev);
171int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
172int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
173int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
174int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
175void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
176resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
177void ntb_ring_sdb(struct ntb_device *ndev, unsigned int idx);
178void *ntb_find_transport(struct pci_dev *pdev);
179
180int ntb_transport_init(struct pci_dev *pdev);
181void ntb_transport_free(void *transport);
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
new file mode 100644
index 000000000000..5bfa8c06c059
--- /dev/null
+++ b/drivers/ntb/ntb_regs.h
@@ -0,0 +1,139 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48
49#define NTB_LINK_ENABLE 0x0000
50#define NTB_LINK_DISABLE 0x0002
51#define NTB_LINK_STATUS_ACTIVE 0x2000
52#define NTB_LINK_SPEED_MASK 0x000f
53#define NTB_LINK_WIDTH_MASK 0x03f0
54
55#define SNB_MSIX_CNT 4
56#define SNB_MAX_SPADS 16
57#define SNB_MAX_COMPAT_SPADS 8
58/* Reserve the uppermost bit for link interrupt */
59#define SNB_MAX_DB_BITS 15
60#define SNB_DB_BITS_PER_VEC 5
61
62#define SNB_DB_HW_LINK 0x8000
63
64#define SNB_PCICMD_OFFSET 0x0504
65#define SNB_DEVCTRL_OFFSET 0x0598
66#define SNB_LINK_STATUS_OFFSET 0x01A2
67
68#define SNB_PBAR2LMT_OFFSET 0x0000
69#define SNB_PBAR4LMT_OFFSET 0x0008
70#define SNB_PBAR2XLAT_OFFSET 0x0010
71#define SNB_PBAR4XLAT_OFFSET 0x0018
72#define SNB_SBAR2LMT_OFFSET 0x0020
73#define SNB_SBAR4LMT_OFFSET 0x0028
74#define SNB_SBAR2XLAT_OFFSET 0x0030
75#define SNB_SBAR4XLAT_OFFSET 0x0038
76#define SNB_SBAR0BASE_OFFSET 0x0040
77#define SNB_SBAR2BASE_OFFSET 0x0048
78#define SNB_SBAR4BASE_OFFSET 0x0050
79#define SNB_NTBCNTL_OFFSET 0x0058
80#define SNB_SBDF_OFFSET 0x005C
81#define SNB_PDOORBELL_OFFSET 0x0060
82#define SNB_PDBMSK_OFFSET 0x0062
83#define SNB_SDOORBELL_OFFSET 0x0064
84#define SNB_SDBMSK_OFFSET 0x0066
85#define SNB_USMEMMISS 0x0070
86#define SNB_SPAD_OFFSET 0x0080
87#define SNB_SPADSEMA4_OFFSET 0x00c0
88#define SNB_WCCNTRL_OFFSET 0x00e0
89#define SNB_B2B_SPAD_OFFSET 0x0100
90#define SNB_B2B_DOORBELL_OFFSET 0x0140
91#define SNB_B2B_XLAT_OFFSET 0x0144
92
93#define BWD_MSIX_CNT 34
94#define BWD_MAX_SPADS 16
95#define BWD_MAX_COMPAT_SPADS 16
96#define BWD_MAX_DB_BITS 34
97#define BWD_DB_BITS_PER_VEC 1
98
99#define BWD_PCICMD_OFFSET 0xb004
100#define BWD_MBAR23_OFFSET 0xb018
101#define BWD_MBAR45_OFFSET 0xb020
102#define BWD_DEVCTRL_OFFSET 0xb048
103#define BWD_LINK_STATUS_OFFSET 0xb052
104
105#define BWD_SBAR2XLAT_OFFSET 0x0008
106#define BWD_SBAR4XLAT_OFFSET 0x0010
107#define BWD_PDOORBELL_OFFSET 0x0020
108#define BWD_PDBMSK_OFFSET 0x0028
109#define BWD_NTBCNTL_OFFSET 0x0060
110#define BWD_EBDF_OFFSET 0x0064
111#define BWD_SPAD_OFFSET 0x0080
112#define BWD_SPADSEMA_OFFSET 0x00c0
113#define BWD_STKYSPAD_OFFSET 0x00c4
114#define BWD_PBAR2XLAT_OFFSET 0x8008
115#define BWD_PBAR4XLAT_OFFSET 0x8010
116#define BWD_B2B_DOORBELL_OFFSET 0x8020
117#define BWD_B2B_SPAD_OFFSET 0x8080
118#define BWD_B2B_SPADSEMA_OFFSET 0x80c0
119#define BWD_B2B_STKYSPAD_OFFSET 0x80c4
120
121#define NTB_CNTL_BAR23_SNOOP (1 << 2)
122#define NTB_CNTL_BAR45_SNOOP (1 << 6)
123#define BWD_CNTL_LINK_DOWN (1 << 16)
124
125#define NTB_PPD_OFFSET 0x00D4
126#define SNB_PPD_CONN_TYPE 0x0003
127#define SNB_PPD_DEV_TYPE 0x0010
128#define BWD_PPD_INIT_LINK 0x0008
129#define BWD_PPD_CONN_TYPE 0x0300
130#define BWD_PPD_DEV_TYPE 0x1000
131
132#define BWD_PBAR2XLAT_USD_ADDR 0x0000004000000000
133#define BWD_PBAR4XLAT_USD_ADDR 0x0000008000000000
134#define BWD_MBAR23_USD_ADDR 0x000000410000000C
135#define BWD_MBAR45_USD_ADDR 0x000000810000000C
136#define BWD_PBAR2XLAT_DSD_ADDR 0x0000004100000000
137#define BWD_PBAR4XLAT_DSD_ADDR 0x0000008100000000
138#define BWD_MBAR23_DSD_ADDR 0x000000400000000C
139#define BWD_MBAR45_DSD_ADDR 0x000000800000000C
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
new file mode 100644
index 000000000000..e0bdfd7f9930
--- /dev/null
+++ b/drivers/ntb/ntb_transport.c
@@ -0,0 +1,1441 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48#include <linux/debugfs.h>
49#include <linux/delay.h>
50#include <linux/dma-mapping.h>
51#include <linux/errno.h>
52#include <linux/export.h>
53#include <linux/interrupt.h>
54#include <linux/module.h>
55#include <linux/pci.h>
56#include <linux/slab.h>
57#include <linux/types.h>
58#include <linux/ntb.h>
59#include "ntb_hw.h"
60
61#define NTB_TRANSPORT_VERSION 2
62
63static unsigned int transport_mtu = 0x401E;
64module_param(transport_mtu, uint, 0644);
65MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
66
67static unsigned char max_num_clients = 2;
68module_param(max_num_clients, byte, 0644);
69MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
70
71struct ntb_queue_entry {
72 /* ntb_queue list reference */
73 struct list_head entry;
74 /* pointers to data to be transfered */
75 void *cb_data;
76 void *buf;
77 unsigned int len;
78 unsigned int flags;
79};
80
81struct ntb_rx_info {
82 unsigned int entry;
83};
84
85struct ntb_transport_qp {
86 struct ntb_transport *transport;
87 struct ntb_device *ndev;
88 void *cb_data;
89
90 bool client_ready;
91 bool qp_link;
92 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
93
94 struct ntb_rx_info __iomem *rx_info;
95 struct ntb_rx_info *remote_rx_info;
96
97 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
98 void *data, int len);
99 struct list_head tx_free_q;
100 spinlock_t ntb_tx_free_q_lock;
101 void __iomem *tx_mw;
102 unsigned int tx_index;
103 unsigned int tx_max_entry;
104 unsigned int tx_max_frame;
105
106 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
107 void *data, int len);
108 struct tasklet_struct rx_work;
109 struct list_head rx_pend_q;
110 struct list_head rx_free_q;
111 spinlock_t ntb_rx_pend_q_lock;
112 spinlock_t ntb_rx_free_q_lock;
113 void *rx_buff;
114 unsigned int rx_index;
115 unsigned int rx_max_entry;
116 unsigned int rx_max_frame;
117
118 void (*event_handler) (void *data, int status);
119 struct delayed_work link_work;
120 struct work_struct link_cleanup;
121
122 struct dentry *debugfs_dir;
123 struct dentry *debugfs_stats;
124
125 /* Stats */
126 u64 rx_bytes;
127 u64 rx_pkts;
128 u64 rx_ring_empty;
129 u64 rx_err_no_buf;
130 u64 rx_err_oflow;
131 u64 rx_err_ver;
132 u64 tx_bytes;
133 u64 tx_pkts;
134 u64 tx_ring_full;
135};
136
137struct ntb_transport_mw {
138 size_t size;
139 void *virt_addr;
140 dma_addr_t dma_addr;
141};
142
143struct ntb_transport_client_dev {
144 struct list_head entry;
145 struct device dev;
146};
147
148struct ntb_transport {
149 struct list_head entry;
150 struct list_head client_devs;
151
152 struct ntb_device *ndev;
153 struct ntb_transport_mw mw[NTB_NUM_MW];
154 struct ntb_transport_qp *qps;
155 unsigned int max_qps;
156 unsigned long qp_bitmap;
157 bool transport_link;
158 struct delayed_work link_work;
159 struct work_struct link_cleanup;
160 struct dentry *debugfs_dir;
161};
162
163enum {
164 DESC_DONE_FLAG = 1 << 0,
165 LINK_DOWN_FLAG = 1 << 1,
166};
167
168struct ntb_payload_header {
169 unsigned int ver;
170 unsigned int len;
171 unsigned int flags;
172};
173
174enum {
175 VERSION = 0,
176 MW0_SZ,
177 MW1_SZ,
178 NUM_QPS,
179 QP_LINKS,
180 MAX_SPAD,
181};
182
183#define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
184#define NTB_QP_DEF_NUM_ENTRIES 100
185#define NTB_LINK_DOWN_TIMEOUT 10
186
187static int ntb_match_bus(struct device *dev, struct device_driver *drv)
188{
189 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
190}
191
192static int ntb_client_probe(struct device *dev)
193{
194 const struct ntb_client *drv = container_of(dev->driver,
195 struct ntb_client, driver);
196 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
197 int rc = -EINVAL;
198
199 get_device(dev);
200 if (drv && drv->probe)
201 rc = drv->probe(pdev);
202 if (rc)
203 put_device(dev);
204
205 return rc;
206}
207
208static int ntb_client_remove(struct device *dev)
209{
210 const struct ntb_client *drv = container_of(dev->driver,
211 struct ntb_client, driver);
212 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
213
214 if (drv && drv->remove)
215 drv->remove(pdev);
216
217 put_device(dev);
218
219 return 0;
220}
221
222static struct bus_type ntb_bus_type = {
223 .name = "ntb_bus",
224 .match = ntb_match_bus,
225 .probe = ntb_client_probe,
226 .remove = ntb_client_remove,
227};
228
229static LIST_HEAD(ntb_transport_list);
230
231static int ntb_bus_init(struct ntb_transport *nt)
232{
233 if (list_empty(&ntb_transport_list)) {
234 int rc = bus_register(&ntb_bus_type);
235 if (rc)
236 return rc;
237 }
238
239 list_add(&nt->entry, &ntb_transport_list);
240
241 return 0;
242}
243
244static void ntb_bus_remove(struct ntb_transport *nt)
245{
246 struct ntb_transport_client_dev *client_dev, *cd;
247
248 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
249 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
250 dev_name(&client_dev->dev));
251 list_del(&client_dev->entry);
252 device_unregister(&client_dev->dev);
253 }
254
255 list_del(&nt->entry);
256
257 if (list_empty(&ntb_transport_list))
258 bus_unregister(&ntb_bus_type);
259}
260
261static void ntb_client_release(struct device *dev)
262{
263 struct ntb_transport_client_dev *client_dev;
264 client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
265
266 kfree(client_dev);
267}
268
269/**
270 * ntb_unregister_client_dev - Unregister NTB client device
271 * @device_name: Name of NTB client device
272 *
273 * Unregister an NTB client device with the NTB transport layer
274 */
275void ntb_unregister_client_dev(char *device_name)
276{
277 struct ntb_transport_client_dev *client, *cd;
278 struct ntb_transport *nt;
279
280 list_for_each_entry(nt, &ntb_transport_list, entry)
281 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
282 if (!strncmp(dev_name(&client->dev), device_name,
283 strlen(device_name))) {
284 list_del(&client->entry);
285 device_unregister(&client->dev);
286 }
287}
288EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
289
290/**
291 * ntb_register_client_dev - Register NTB client device
292 * @device_name: Name of NTB client device
293 *
294 * Register an NTB client device with the NTB transport layer
295 */
296int ntb_register_client_dev(char *device_name)
297{
298 struct ntb_transport_client_dev *client_dev;
299 struct ntb_transport *nt;
300 int rc;
301
302 if (list_empty(&ntb_transport_list))
303 return -ENODEV;
304
305 list_for_each_entry(nt, &ntb_transport_list, entry) {
306 struct device *dev;
307
308 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
309 GFP_KERNEL);
310 if (!client_dev) {
311 rc = -ENOMEM;
312 goto err;
313 }
314
315 dev = &client_dev->dev;
316
317 /* setup and register client devices */
318 dev_set_name(dev, "%s", device_name);
319 dev->bus = &ntb_bus_type;
320 dev->release = ntb_client_release;
321 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
322
323 rc = device_register(dev);
324 if (rc) {
325 kfree(client_dev);
326 goto err;
327 }
328
329 list_add_tail(&client_dev->entry, &nt->client_devs);
330 }
331
332 return 0;
333
334err:
335 ntb_unregister_client_dev(device_name);
336
337 return rc;
338}
339EXPORT_SYMBOL_GPL(ntb_register_client_dev);
340
341/**
342 * ntb_register_client - Register NTB client driver
343 * @drv: NTB client driver to be registered
344 *
345 * Register an NTB client driver with the NTB transport layer
346 *
347 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
348 */
349int ntb_register_client(struct ntb_client *drv)
350{
351 drv->driver.bus = &ntb_bus_type;
352
353 if (list_empty(&ntb_transport_list))
354 return -ENODEV;
355
356 return driver_register(&drv->driver);
357}
358EXPORT_SYMBOL_GPL(ntb_register_client);
359
360/**
361 * ntb_unregister_client - Unregister NTB client driver
362 * @drv: NTB client driver to be unregistered
363 *
364 * Unregister an NTB client driver with the NTB transport layer
365 *
366 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
367 */
368void ntb_unregister_client(struct ntb_client *drv)
369{
370 driver_unregister(&drv->driver);
371}
372EXPORT_SYMBOL_GPL(ntb_unregister_client);
373
374static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
375 loff_t *offp)
376{
377 struct ntb_transport_qp *qp;
378 char *buf;
379 ssize_t ret, out_offset, out_count;
380
381 out_count = 600;
382
383 buf = kmalloc(out_count, GFP_KERNEL);
384 if (!buf)
385 return -ENOMEM;
386
387 qp = filp->private_data;
388 out_offset = 0;
389 out_offset += snprintf(buf + out_offset, out_count - out_offset,
390 "NTB QP stats\n");
391 out_offset += snprintf(buf + out_offset, out_count - out_offset,
392 "rx_bytes - \t%llu\n", qp->rx_bytes);
393 out_offset += snprintf(buf + out_offset, out_count - out_offset,
394 "rx_pkts - \t%llu\n", qp->rx_pkts);
395 out_offset += snprintf(buf + out_offset, out_count - out_offset,
396 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
397 out_offset += snprintf(buf + out_offset, out_count - out_offset,
398 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
399 out_offset += snprintf(buf + out_offset, out_count - out_offset,
400 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
401 out_offset += snprintf(buf + out_offset, out_count - out_offset,
402 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
403 out_offset += snprintf(buf + out_offset, out_count - out_offset,
404 "rx_buff - \t%p\n", qp->rx_buff);
405 out_offset += snprintf(buf + out_offset, out_count - out_offset,
406 "rx_index - \t%u\n", qp->rx_index);
407 out_offset += snprintf(buf + out_offset, out_count - out_offset,
408 "rx_max_entry - \t%u\n", qp->rx_max_entry);
409
410 out_offset += snprintf(buf + out_offset, out_count - out_offset,
411 "tx_bytes - \t%llu\n", qp->tx_bytes);
412 out_offset += snprintf(buf + out_offset, out_count - out_offset,
413 "tx_pkts - \t%llu\n", qp->tx_pkts);
414 out_offset += snprintf(buf + out_offset, out_count - out_offset,
415 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
416 out_offset += snprintf(buf + out_offset, out_count - out_offset,
417 "tx_mw - \t%p\n", qp->tx_mw);
418 out_offset += snprintf(buf + out_offset, out_count - out_offset,
419 "tx_index - \t%u\n", qp->tx_index);
420 out_offset += snprintf(buf + out_offset, out_count - out_offset,
421 "tx_max_entry - \t%u\n", qp->tx_max_entry);
422
423 out_offset += snprintf(buf + out_offset, out_count - out_offset,
424 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
425 "Up" : "Down");
426 if (out_offset > out_count)
427 out_offset = out_count;
428
429 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
430 kfree(buf);
431 return ret;
432}
433
434static const struct file_operations ntb_qp_debugfs_stats = {
435 .owner = THIS_MODULE,
436 .open = simple_open,
437 .read = debugfs_read,
438};
439
440static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
441 struct list_head *list)
442{
443 unsigned long flags;
444
445 spin_lock_irqsave(lock, flags);
446 list_add_tail(entry, list);
447 spin_unlock_irqrestore(lock, flags);
448}
449
450static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
451 struct list_head *list)
452{
453 struct ntb_queue_entry *entry;
454 unsigned long flags;
455
456 spin_lock_irqsave(lock, flags);
457 if (list_empty(list)) {
458 entry = NULL;
459 goto out;
460 }
461 entry = list_first_entry(list, struct ntb_queue_entry, entry);
462 list_del(&entry->entry);
463out:
464 spin_unlock_irqrestore(lock, flags);
465
466 return entry;
467}
468
469static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
470 unsigned int qp_num)
471{
472 struct ntb_transport_qp *qp = &nt->qps[qp_num];
473 unsigned int rx_size, num_qps_mw;
474 u8 mw_num = QP_TO_MW(qp_num);
475 unsigned int i;
476
477 WARN_ON(nt->mw[mw_num].virt_addr == NULL);
478
479 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
480 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
481 else
482 num_qps_mw = nt->max_qps / NTB_NUM_MW;
483
484 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
485 qp->remote_rx_info = nt->mw[mw_num].virt_addr +
486 (qp_num / NTB_NUM_MW * rx_size);
487 rx_size -= sizeof(struct ntb_rx_info);
488
489 qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
490 qp->rx_max_frame = min(transport_mtu, rx_size);
491 qp->rx_max_entry = rx_size / qp->rx_max_frame;
492 qp->rx_index = 0;
493
494 qp->remote_rx_info->entry = qp->rx_max_entry;
495
496 /* setup the hdr offsets with 0's */
497 for (i = 0; i < qp->rx_max_entry; i++) {
498 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
499 sizeof(struct ntb_payload_header);
500 memset(offset, 0, sizeof(struct ntb_payload_header));
501 }
502
503 qp->rx_pkts = 0;
504 qp->tx_pkts = 0;
505}
506
507static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
508{
509 struct ntb_transport_mw *mw = &nt->mw[num_mw];
510 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
511
512 /* Alloc memory for receiving data. Must be 4k aligned */
513 mw->size = ALIGN(size, 4096);
514
515 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
516 GFP_KERNEL);
517 if (!mw->virt_addr) {
518 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
519 (int) mw->size);
520 return -ENOMEM;
521 }
522
523 /* Notify HW the memory location of the receive buffer */
524 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
525
526 return 0;
527}
528
529static void ntb_qp_link_cleanup(struct work_struct *work)
530{
531 struct ntb_transport_qp *qp = container_of(work,
532 struct ntb_transport_qp,
533 link_cleanup);
534 struct ntb_transport *nt = qp->transport;
535 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
536
537 if (qp->qp_link == NTB_LINK_DOWN) {
538 cancel_delayed_work_sync(&qp->link_work);
539 return;
540 }
541
542 if (qp->event_handler)
543 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
544
545 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
546 qp->qp_link = NTB_LINK_DOWN;
547
548 if (nt->transport_link == NTB_LINK_UP)
549 schedule_delayed_work(&qp->link_work,
550 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
551}
552
553static void ntb_qp_link_down(struct ntb_transport_qp *qp)
554{
555 schedule_work(&qp->link_cleanup);
556}
557
558static void ntb_transport_link_cleanup(struct work_struct *work)
559{
560 struct ntb_transport *nt = container_of(work, struct ntb_transport,
561 link_cleanup);
562 int i;
563
564 if (nt->transport_link == NTB_LINK_DOWN)
565 cancel_delayed_work_sync(&nt->link_work);
566 else
567 nt->transport_link = NTB_LINK_DOWN;
568
569 /* Pass along the info to any clients */
570 for (i = 0; i < nt->max_qps; i++)
571 if (!test_bit(i, &nt->qp_bitmap))
572 ntb_qp_link_down(&nt->qps[i]);
573
574 /* The scratchpad registers keep the values if the remote side
575 * goes down, blast them now to give them a sane value the next
576 * time they are accessed
577 */
578 for (i = 0; i < MAX_SPAD; i++)
579 ntb_write_local_spad(nt->ndev, i, 0);
580}
581
582static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
583{
584 struct ntb_transport *nt = data;
585
586 switch (event) {
587 case NTB_EVENT_HW_LINK_UP:
588 schedule_delayed_work(&nt->link_work, 0);
589 break;
590 case NTB_EVENT_HW_LINK_DOWN:
591 schedule_work(&nt->link_cleanup);
592 break;
593 default:
594 BUG();
595 }
596}
597
598static void ntb_transport_link_work(struct work_struct *work)
599{
600 struct ntb_transport *nt = container_of(work, struct ntb_transport,
601 link_work.work);
602 struct ntb_device *ndev = nt->ndev;
603 struct pci_dev *pdev = ntb_query_pdev(ndev);
604 u32 val;
605 int rc, i;
606
607 /* send the local info */
608 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
609 if (rc) {
610 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
611 0, VERSION);
612 goto out;
613 }
614
615 rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
616 if (rc) {
617 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
618 (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
619 goto out;
620 }
621
622 rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
623 if (rc) {
624 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
625 (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
626 goto out;
627 }
628
629 rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
630 if (rc) {
631 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
632 nt->max_qps, NUM_QPS);
633 goto out;
634 }
635
636 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
637 if (rc) {
638 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
639 goto out;
640 }
641
642 rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
643 if (rc) {
644 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
645 val, QP_LINKS);
646 goto out;
647 }
648
649 /* Query the remote side for its info */
650 rc = ntb_read_remote_spad(ndev, VERSION, &val);
651 if (rc) {
652 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
653 goto out;
654 }
655
656 if (val != NTB_TRANSPORT_VERSION)
657 goto out;
658 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
659
660 rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
661 if (rc) {
662 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
663 goto out;
664 }
665
666 if (val != nt->max_qps)
667 goto out;
668 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
669
670 rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
671 if (rc) {
672 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
673 goto out;
674 }
675
676 if (!val)
677 goto out;
678 dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
679
680 rc = ntb_set_mw(nt, 0, val);
681 if (rc)
682 goto out;
683
684 rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
685 if (rc) {
686 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
687 goto out;
688 }
689
690 if (!val)
691 goto out;
692 dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
693
694 rc = ntb_set_mw(nt, 1, val);
695 if (rc)
696 goto out;
697
698 nt->transport_link = NTB_LINK_UP;
699
700 for (i = 0; i < nt->max_qps; i++) {
701 struct ntb_transport_qp *qp = &nt->qps[i];
702
703 ntb_transport_setup_qp_mw(nt, i);
704
705 if (qp->client_ready == NTB_LINK_UP)
706 schedule_delayed_work(&qp->link_work, 0);
707 }
708
709 return;
710
711out:
712 if (ntb_hw_link_status(ndev))
713 schedule_delayed_work(&nt->link_work,
714 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
715}
716
717static void ntb_qp_link_work(struct work_struct *work)
718{
719 struct ntb_transport_qp *qp = container_of(work,
720 struct ntb_transport_qp,
721 link_work.work);
722 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
723 struct ntb_transport *nt = qp->transport;
724 int rc, val;
725
726 WARN_ON(nt->transport_link != NTB_LINK_UP);
727
728 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
729 if (rc) {
730 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
731 return;
732 }
733
734 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
735 if (rc)
736 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
737 val | 1 << qp->qp_num, QP_LINKS);
738
739 /* query remote spad for qp ready bits */
740 rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
741 if (rc)
742 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
743
744 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
745
746 /* See if the remote side is up */
747 if (1 << qp->qp_num & val) {
748 qp->qp_link = NTB_LINK_UP;
749
750 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
751 if (qp->event_handler)
752 qp->event_handler(qp->cb_data, NTB_LINK_UP);
753 } else if (nt->transport_link == NTB_LINK_UP)
754 schedule_delayed_work(&qp->link_work,
755 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
756}
757
758static void ntb_transport_init_queue(struct ntb_transport *nt,
759 unsigned int qp_num)
760{
761 struct ntb_transport_qp *qp;
762 unsigned int num_qps_mw, tx_size;
763 u8 mw_num = QP_TO_MW(qp_num);
764
765 qp = &nt->qps[qp_num];
766 qp->qp_num = qp_num;
767 qp->transport = nt;
768 qp->ndev = nt->ndev;
769 qp->qp_link = NTB_LINK_DOWN;
770 qp->client_ready = NTB_LINK_DOWN;
771 qp->event_handler = NULL;
772
773 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
774 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
775 else
776 num_qps_mw = nt->max_qps / NTB_NUM_MW;
777
778 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
779 qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) +
780 (qp_num / NTB_NUM_MW * tx_size);
781 tx_size -= sizeof(struct ntb_rx_info);
782
783 qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
784 qp->tx_max_frame = min(transport_mtu, tx_size);
785 qp->tx_max_entry = tx_size / qp->tx_max_frame;
786 qp->tx_index = 0;
787
788 if (nt->debugfs_dir) {
789 char debugfs_name[4];
790
791 snprintf(debugfs_name, 4, "qp%d", qp_num);
792 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
793 nt->debugfs_dir);
794
795 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
796 qp->debugfs_dir, qp,
797 &ntb_qp_debugfs_stats);
798 }
799
800 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
801 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
802
803 spin_lock_init(&qp->ntb_rx_pend_q_lock);
804 spin_lock_init(&qp->ntb_rx_free_q_lock);
805 spin_lock_init(&qp->ntb_tx_free_q_lock);
806
807 INIT_LIST_HEAD(&qp->rx_pend_q);
808 INIT_LIST_HEAD(&qp->rx_free_q);
809 INIT_LIST_HEAD(&qp->tx_free_q);
810}
811
812int ntb_transport_init(struct pci_dev *pdev)
813{
814 struct ntb_transport *nt;
815 int rc, i;
816
817 nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
818 if (!nt)
819 return -ENOMEM;
820
821 if (debugfs_initialized())
822 nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
823 else
824 nt->debugfs_dir = NULL;
825
826 nt->ndev = ntb_register_transport(pdev, nt);
827 if (!nt->ndev) {
828 rc = -EIO;
829 goto err;
830 }
831
832 nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
833
834 nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
835 GFP_KERNEL);
836 if (!nt->qps) {
837 rc = -ENOMEM;
838 goto err1;
839 }
840
841 nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
842
843 for (i = 0; i < nt->max_qps; i++)
844 ntb_transport_init_queue(nt, i);
845
846 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
847 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
848
849 rc = ntb_register_event_callback(nt->ndev,
850 ntb_transport_event_callback);
851 if (rc)
852 goto err2;
853
854 INIT_LIST_HEAD(&nt->client_devs);
855 rc = ntb_bus_init(nt);
856 if (rc)
857 goto err3;
858
859 if (ntb_hw_link_status(nt->ndev))
860 schedule_delayed_work(&nt->link_work, 0);
861
862 return 0;
863
864err3:
865 ntb_unregister_event_callback(nt->ndev);
866err2:
867 kfree(nt->qps);
868err1:
869 ntb_unregister_transport(nt->ndev);
870err:
871 debugfs_remove_recursive(nt->debugfs_dir);
872 kfree(nt);
873 return rc;
874}
875
876void ntb_transport_free(void *transport)
877{
878 struct ntb_transport *nt = transport;
879 struct pci_dev *pdev;
880 int i;
881
882 nt->transport_link = NTB_LINK_DOWN;
883
884 /* verify that all the qp's are freed */
885 for (i = 0; i < nt->max_qps; i++)
886 if (!test_bit(i, &nt->qp_bitmap))
887 ntb_transport_free_queue(&nt->qps[i]);
888
889 ntb_bus_remove(nt);
890
891 cancel_delayed_work_sync(&nt->link_work);
892
893 debugfs_remove_recursive(nt->debugfs_dir);
894
895 ntb_unregister_event_callback(nt->ndev);
896
897 pdev = ntb_query_pdev(nt->ndev);
898
899 for (i = 0; i < NTB_NUM_MW; i++)
900 if (nt->mw[i].virt_addr)
901 dma_free_coherent(&pdev->dev, nt->mw[i].size,
902 nt->mw[i].virt_addr,
903 nt->mw[i].dma_addr);
904
905 kfree(nt->qps);
906 ntb_unregister_transport(nt->ndev);
907 kfree(nt);
908}
909
910static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
911 struct ntb_queue_entry *entry, void *offset)
912{
913 void *cb_data = entry->cb_data;
914 unsigned int len = entry->len;
915
916 memcpy(entry->buf, offset, entry->len);
917
918 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
919
920 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
921 qp->rx_handler(qp, qp->cb_data, cb_data, len);
922}
923
924static int ntb_process_rxc(struct ntb_transport_qp *qp)
925{
926 struct ntb_payload_header *hdr;
927 struct ntb_queue_entry *entry;
928 void *offset;
929
930 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
931 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
932
933 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
934 if (!entry) {
935 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
936 "no buffer - HDR ver %u, len %d, flags %x\n",
937 hdr->ver, hdr->len, hdr->flags);
938 qp->rx_err_no_buf++;
939 return -ENOMEM;
940 }
941
942 if (!(hdr->flags & DESC_DONE_FLAG)) {
943 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
944 &qp->rx_pend_q);
945 qp->rx_ring_empty++;
946 return -EAGAIN;
947 }
948
949 if (hdr->ver != (u32) qp->rx_pkts) {
950 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
951 "qp %d: version mismatch, expected %llu - got %u\n",
952 qp->qp_num, qp->rx_pkts, hdr->ver);
953 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
954 &qp->rx_pend_q);
955 qp->rx_err_ver++;
956 return -EIO;
957 }
958
959 if (hdr->flags & LINK_DOWN_FLAG) {
960 ntb_qp_link_down(qp);
961
962 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
963 &qp->rx_pend_q);
964 goto out;
965 }
966
967 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
968 "rx offset %u, ver %u - %d payload received, buf size %d\n",
969 qp->rx_index, hdr->ver, hdr->len, entry->len);
970
971 if (hdr->len <= entry->len) {
972 entry->len = hdr->len;
973 ntb_rx_copy_task(qp, entry, offset);
974 } else {
975 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
976 &qp->rx_pend_q);
977
978 qp->rx_err_oflow++;
979 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
980 "RX overflow! Wanted %d got %d\n",
981 hdr->len, entry->len);
982 }
983
984 qp->rx_bytes += hdr->len;
985 qp->rx_pkts++;
986
987out:
988 /* Ensure that the data is fully copied out before clearing the flag */
989 wmb();
990 hdr->flags = 0;
991 iowrite32(qp->rx_index, &qp->rx_info->entry);
992
993 qp->rx_index++;
994 qp->rx_index %= qp->rx_max_entry;
995
996 return 0;
997}
998
999static void ntb_transport_rx(unsigned long data)
1000{
1001 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
1002 int rc;
1003
1004 do {
1005 rc = ntb_process_rxc(qp);
1006 } while (!rc);
1007}
1008
1009static void ntb_transport_rxc_db(void *data, int db_num)
1010{
1011 struct ntb_transport_qp *qp = data;
1012
1013 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1014 __func__, db_num);
1015
1016 tasklet_schedule(&qp->rx_work);
1017}
1018
1019static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
1020 struct ntb_queue_entry *entry,
1021 void __iomem *offset)
1022{
1023 struct ntb_payload_header __iomem *hdr;
1024
1025 memcpy_toio(offset, entry->buf, entry->len);
1026
1027 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1028 iowrite32(entry->len, &hdr->len);
1029 iowrite32((u32) qp->tx_pkts, &hdr->ver);
1030
1031 /* Ensure that the data is fully copied out before setting the flag */
1032 wmb();
1033 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1034
1035 ntb_ring_sdb(qp->ndev, qp->qp_num);
1036
1037 /* The entry length can only be zero if the packet is intended to be a
1038 * "link down" or similar. Since no payload is being sent in these
1039 * cases, there is nothing to add to the completion queue.
1040 */
1041 if (entry->len > 0) {
1042 qp->tx_bytes += entry->len;
1043
1044 if (qp->tx_handler)
1045 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1046 entry->len);
1047 }
1048
1049 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1050}
1051
1052static int ntb_process_tx(struct ntb_transport_qp *qp,
1053 struct ntb_queue_entry *entry)
1054{
1055 void __iomem *offset;
1056
1057 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1058
1059 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n",
1060 qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags,
1061 entry->buf);
1062 if (qp->tx_index == qp->remote_rx_info->entry) {
1063 qp->tx_ring_full++;
1064 return -EAGAIN;
1065 }
1066
1067 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1068 if (qp->tx_handler)
1069 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1070
1071 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1072 &qp->tx_free_q);
1073 return 0;
1074 }
1075
1076 ntb_tx_copy_task(qp, entry, offset);
1077
1078 qp->tx_index++;
1079 qp->tx_index %= qp->tx_max_entry;
1080
1081 qp->tx_pkts++;
1082
1083 return 0;
1084}
1085
1086static void ntb_send_link_down(struct ntb_transport_qp *qp)
1087{
1088 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1089 struct ntb_queue_entry *entry;
1090 int i, rc;
1091
1092 if (qp->qp_link == NTB_LINK_DOWN)
1093 return;
1094
1095 qp->qp_link = NTB_LINK_DOWN;
1096 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1097
1098 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1099 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1100 if (entry)
1101 break;
1102 msleep(100);
1103 }
1104
1105 if (!entry)
1106 return;
1107
1108 entry->cb_data = NULL;
1109 entry->buf = NULL;
1110 entry->len = 0;
1111 entry->flags = LINK_DOWN_FLAG;
1112
1113 rc = ntb_process_tx(qp, entry);
1114 if (rc)
1115 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1116 qp->qp_num);
1117}
1118
1119/**
1120 * ntb_transport_create_queue - Create a new NTB transport layer queue
1121 * @rx_handler: receive callback function
1122 * @tx_handler: transmit callback function
1123 * @event_handler: event callback function
1124 *
1125 * Create a new NTB transport layer queue and provide the queue with a callback
1126 * routine for both transmit and receive. The receive callback routine will be
1127 * used to pass up data when the transport has received it on the queue. The
1128 * transmit callback routine will be called when the transport has completed the
1129 * transmission of the data on the queue and the data is ready to be freed.
1130 *
1131 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1132 */
1133struct ntb_transport_qp *
1134ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1135 const struct ntb_queue_handlers *handlers)
1136{
1137 struct ntb_queue_entry *entry;
1138 struct ntb_transport_qp *qp;
1139 struct ntb_transport *nt;
1140 unsigned int free_queue;
1141 int rc, i;
1142
1143 nt = ntb_find_transport(pdev);
1144 if (!nt)
1145 goto err;
1146
1147 free_queue = ffs(nt->qp_bitmap);
1148 if (!free_queue)
1149 goto err;
1150
1151 /* decrement free_queue to make it zero based */
1152 free_queue--;
1153
1154 clear_bit(free_queue, &nt->qp_bitmap);
1155
1156 qp = &nt->qps[free_queue];
1157 qp->cb_data = data;
1158 qp->rx_handler = handlers->rx_handler;
1159 qp->tx_handler = handlers->tx_handler;
1160 qp->event_handler = handlers->event_handler;
1161
1162 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1163 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1164 if (!entry)
1165 goto err1;
1166
1167 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1168 &qp->rx_free_q);
1169 }
1170
1171 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1172 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1173 if (!entry)
1174 goto err2;
1175
1176 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1177 &qp->tx_free_q);
1178 }
1179
1180 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1181
1182 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1183 ntb_transport_rxc_db);
1184 if (rc)
1185 goto err3;
1186
1187 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1188
1189 return qp;
1190
1191err3:
1192 tasklet_disable(&qp->rx_work);
1193err2:
1194 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1195 kfree(entry);
1196err1:
1197 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1198 kfree(entry);
1199 set_bit(free_queue, &nt->qp_bitmap);
1200err:
1201 return NULL;
1202}
1203EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1204
1205/**
1206 * ntb_transport_free_queue - Frees NTB transport queue
1207 * @qp: NTB queue to be freed
1208 *
1209 * Frees NTB transport queue
1210 */
1211void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1212{
1213 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1214 struct ntb_queue_entry *entry;
1215
1216 if (!qp)
1217 return;
1218
1219 cancel_delayed_work_sync(&qp->link_work);
1220
1221 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1222 tasklet_disable(&qp->rx_work);
1223
1224 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1225 kfree(entry);
1226
1227 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1228 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1229 kfree(entry);
1230 }
1231
1232 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1233 kfree(entry);
1234
1235 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1236
1237 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1238}
1239EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1240
1241/**
1242 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1243 * @qp: NTB queue to be freed
1244 * @len: pointer to variable to write enqueued buffers length
1245 *
1246 * Dequeues unused buffers from receive queue. Should only be used during
1247 * shutdown of qp.
1248 *
1249 * RETURNS: NULL error value on error, or void* for success.
1250 */
1251void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1252{
1253 struct ntb_queue_entry *entry;
1254 void *buf;
1255
1256 if (!qp || qp->client_ready == NTB_LINK_UP)
1257 return NULL;
1258
1259 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1260 if (!entry)
1261 return NULL;
1262
1263 buf = entry->cb_data;
1264 *len = entry->len;
1265
1266 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1267
1268 return buf;
1269}
1270EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1271
1272/**
1273 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1274 * @qp: NTB transport layer queue the entry is to be enqueued on
1275 * @cb: per buffer pointer for callback function to use
1276 * @data: pointer to data buffer that incoming packets will be copied into
1277 * @len: length of the data buffer
1278 *
1279 * Enqueue a new receive buffer onto the transport queue into which a NTB
1280 * payload can be received into.
1281 *
1282 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1283 */
1284int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1285 unsigned int len)
1286{
1287 struct ntb_queue_entry *entry;
1288
1289 if (!qp)
1290 return -EINVAL;
1291
1292 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1293 if (!entry)
1294 return -ENOMEM;
1295
1296 entry->cb_data = cb;
1297 entry->buf = data;
1298 entry->len = len;
1299
1300 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1301
1302 return 0;
1303}
1304EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1305
1306/**
1307 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1308 * @qp: NTB transport layer queue the entry is to be enqueued on
1309 * @cb: per buffer pointer for callback function to use
1310 * @data: pointer to data buffer that will be sent
1311 * @len: length of the data buffer
1312 *
1313 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1314 * payload will be transmitted. This assumes that a lock is behing held to
1315 * serialize access to the qp.
1316 *
1317 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1318 */
1319int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1320 unsigned int len)
1321{
1322 struct ntb_queue_entry *entry;
1323 int rc;
1324
1325 if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1326 return -EINVAL;
1327
1328 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1329 if (!entry)
1330 return -ENOMEM;
1331
1332 entry->cb_data = cb;
1333 entry->buf = data;
1334 entry->len = len;
1335 entry->flags = 0;
1336
1337 rc = ntb_process_tx(qp, entry);
1338 if (rc)
1339 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1340 &qp->tx_free_q);
1341
1342 return rc;
1343}
1344EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1345
1346/**
1347 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1348 * @qp: NTB transport layer queue to be enabled
1349 *
1350 * Notify NTB transport layer of client readiness to use queue
1351 */
1352void ntb_transport_link_up(struct ntb_transport_qp *qp)
1353{
1354 if (!qp)
1355 return;
1356
1357 qp->client_ready = NTB_LINK_UP;
1358
1359 if (qp->transport->transport_link == NTB_LINK_UP)
1360 schedule_delayed_work(&qp->link_work, 0);
1361}
1362EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1363
1364/**
1365 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1366 * @qp: NTB transport layer queue to be disabled
1367 *
1368 * Notify NTB transport layer of client's desire to no longer receive data on
1369 * transport queue specified. It is the client's responsibility to ensure all
1370 * entries on queue are purged or otherwise handled appropraitely.
1371 */
1372void ntb_transport_link_down(struct ntb_transport_qp *qp)
1373{
1374 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1375 int rc, val;
1376
1377 if (!qp)
1378 return;
1379
1380 qp->client_ready = NTB_LINK_DOWN;
1381
1382 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1383 if (rc) {
1384 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1385 return;
1386 }
1387
1388 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1389 val & ~(1 << qp->qp_num));
1390 if (rc)
1391 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1392 val & ~(1 << qp->qp_num), QP_LINKS);
1393
1394 if (qp->qp_link == NTB_LINK_UP)
1395 ntb_send_link_down(qp);
1396 else
1397 cancel_delayed_work_sync(&qp->link_work);
1398}
1399EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1400
1401/**
1402 * ntb_transport_link_query - Query transport link state
1403 * @qp: NTB transport layer queue to be queried
1404 *
1405 * Query connectivity to the remote system of the NTB transport queue
1406 *
1407 * RETURNS: true for link up or false for link down
1408 */
1409bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1410{
1411 return qp->qp_link == NTB_LINK_UP;
1412}
1413EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1414
1415/**
1416 * ntb_transport_qp_num - Query the qp number
1417 * @qp: NTB transport layer queue to be queried
1418 *
1419 * Query qp number of the NTB transport queue
1420 *
1421 * RETURNS: a zero based number specifying the qp number
1422 */
1423unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1424{
1425 return qp->qp_num;
1426}
1427EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1428
1429/**
1430 * ntb_transport_max_size - Query the max payload size of a qp
1431 * @qp: NTB transport layer queue to be queried
1432 *
1433 * Query the maximum payload size permissible on the given qp
1434 *
1435 * RETURNS: the max payload size of a qp
1436 */
1437unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1438{
1439 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1440}
1441EXPORT_SYMBOL_GPL(ntb_transport_max_size);
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 3578e1ca97a0..519c4d6003a6 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -133,8 +133,6 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
133 goto err_out_free_res; 133 goto err_out_free_res;
134 } 134 }
135 135
136 pci_set_drvdata(dev, &sockets[i].socket);
137
138 for (i = 0; i<socket_count; i++) { 136 for (i = 0; i<socket_count; i++) {
139 sockets[i].socket.dev.parent = &dev->dev; 137 sockets[i].socket.dev.parent = &dev->dev;
140 sockets[i].socket.ops = &i82092aa_operations; 138 sockets[i].socket.ops = &i82092aa_operations;
@@ -164,14 +162,14 @@ err_out_disable:
164 162
165static void i82092aa_pci_remove(struct pci_dev *dev) 163static void i82092aa_pci_remove(struct pci_dev *dev)
166{ 164{
167 struct pcmcia_socket *socket = pci_get_drvdata(dev); 165 int i;
168 166
169 enter("i82092aa_pci_remove"); 167 enter("i82092aa_pci_remove");
170 168
171 free_irq(dev->irq, i82092aa_interrupt); 169 free_irq(dev->irq, i82092aa_interrupt);
172 170
173 if (socket) 171 for (i = 0; i < socket_count; i++)
174 pcmcia_unregister_socket(socket); 172 pcmcia_unregister_socket(&sockets[i].socket);
175 173
176 leave("i82092aa_pci_remove"); 174 leave("i82092aa_pci_remove");
177} 175}
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 75806be344e5..d98a08612492 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
246 socket = &vrc4171_sockets[slot]; 246 socket = &vrc4171_sockets[slot];
247 socket->csc_irq = search_nonuse_irq(); 247 socket->csc_irq = search_nonuse_irq();
248 socket->io_irq = search_nonuse_irq(); 248 socket->io_irq = search_nonuse_irq();
249 spin_lock_init(&socket->lock);
249 250
250 return 0; 251 return 0;
251} 252}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 01440782feb2..270b3cf6f372 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1410,13 +1410,13 @@ enum {
1410 1410
1411static const struct hv_vmbus_device_id id_table[] = { 1411static const struct hv_vmbus_device_id id_table[] = {
1412 /* SCSI guid */ 1412 /* SCSI guid */
1413 { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 1413 { HV_SCSI_GUID,
1414 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) 1414 .driver_data = SCSI_GUID
1415 .driver_data = SCSI_GUID }, 1415 },
1416 /* IDE guid */ 1416 /* IDE guid */
1417 { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 1417 { HV_IDE_GUID,
1418 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) 1418 .driver_data = IDE_GUID
1419 .driver_data = IDE_GUID }, 1419 },
1420 { }, 1420 { },
1421}; 1421};
1422 1422
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 95a9f71d793e..5e6c7d74e19f 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1376,6 +1376,7 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
1376 return 0; 1376 return 0;
1377 1377
1378err_reg: 1378err_reg:
1379 put_device(&vdev->dev);
1379 kfree(vdev); 1380 kfree(vdev);
1380err_devalloc: 1381err_devalloc:
1381 list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) { 1382 list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 7c294f4dc0ed..96cab6ac2b4e 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/io.h>
16#include <linux/irq.h> 17#include <linux/irq.h>
17#include <linux/pm.h> 18#include <linux/pm.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
@@ -459,43 +460,34 @@ static int ds1wm_probe(struct platform_device *pdev)
459 if (!pdev) 460 if (!pdev)
460 return -ENODEV; 461 return -ENODEV;
461 462
462 ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL); 463 ds1wm_data = devm_kzalloc(&pdev->dev, sizeof(*ds1wm_data), GFP_KERNEL);
463 if (!ds1wm_data) 464 if (!ds1wm_data)
464 return -ENOMEM; 465 return -ENOMEM;
465 466
466 platform_set_drvdata(pdev, ds1wm_data); 467 platform_set_drvdata(pdev, ds1wm_data);
467 468
468 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 469 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
469 if (!res) { 470 if (!res)
470 ret = -ENXIO; 471 return -ENXIO;
471 goto err0; 472 ds1wm_data->map = devm_ioremap(&pdev->dev, res->start,
472 } 473 resource_size(res));
473 ds1wm_data->map = ioremap(res->start, resource_size(res)); 474 if (!ds1wm_data->map)
474 if (!ds1wm_data->map) { 475 return -ENOMEM;
475 ret = -ENOMEM;
476 goto err0;
477 }
478 476
479 /* calculate bus shift from mem resource */ 477 /* calculate bus shift from mem resource */
480 ds1wm_data->bus_shift = resource_size(res) >> 3; 478 ds1wm_data->bus_shift = resource_size(res) >> 3;
481 479
482 ds1wm_data->pdev = pdev; 480 ds1wm_data->pdev = pdev;
483 ds1wm_data->cell = mfd_get_cell(pdev); 481 ds1wm_data->cell = mfd_get_cell(pdev);
484 if (!ds1wm_data->cell) { 482 if (!ds1wm_data->cell)
485 ret = -ENODEV; 483 return -ENODEV;
486 goto err1;
487 }
488 plat = pdev->dev.platform_data; 484 plat = pdev->dev.platform_data;
489 if (!plat) { 485 if (!plat)
490 ret = -ENODEV; 486 return -ENODEV;
491 goto err1;
492 }
493 487
494 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 488 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
495 if (!res) { 489 if (!res)
496 ret = -ENXIO; 490 return -ENXIO;
497 goto err1;
498 }
499 ds1wm_data->irq = res->start; 491 ds1wm_data->irq = res->start;
500 ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0); 492 ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
501 ds1wm_data->reset_recover_delay = plat->reset_recover_delay; 493 ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
@@ -505,10 +497,10 @@ static int ds1wm_probe(struct platform_device *pdev)
505 if (res->flags & IORESOURCE_IRQ_LOWEDGE) 497 if (res->flags & IORESOURCE_IRQ_LOWEDGE)
506 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); 498 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
507 499
508 ret = request_irq(ds1wm_data->irq, ds1wm_isr, 500 ret = devm_request_irq(&pdev->dev, ds1wm_data->irq, ds1wm_isr,
509 IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data); 501 IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
510 if (ret) 502 if (ret)
511 goto err1; 503 return ret;
512 504
513 ds1wm_up(ds1wm_data); 505 ds1wm_up(ds1wm_data);
514 506
@@ -516,17 +508,12 @@ static int ds1wm_probe(struct platform_device *pdev)
516 508
517 ret = w1_add_master_device(&ds1wm_master); 509 ret = w1_add_master_device(&ds1wm_master);
518 if (ret) 510 if (ret)
519 goto err2; 511 goto err;
520 512
521 return 0; 513 return 0;
522 514
523err2: 515err:
524 ds1wm_down(ds1wm_data); 516 ds1wm_down(ds1wm_data);
525 free_irq(ds1wm_data->irq, ds1wm_data);
526err1:
527 iounmap(ds1wm_data->map);
528err0:
529 kfree(ds1wm_data);
530 517
531 return ret; 518 return ret;
532} 519}
@@ -560,9 +547,6 @@ static int ds1wm_remove(struct platform_device *pdev)
560 547
561 w1_remove_master_device(&ds1wm_master); 548 w1_remove_master_device(&ds1wm_master);
562 ds1wm_down(ds1wm_data); 549 ds1wm_down(ds1wm_data);
563 free_irq(ds1wm_data->irq, ds1wm_data);
564 iounmap(ds1wm_data->map);
565 kfree(ds1wm_data);
566 550
567 return 0; 551 return 0;
568} 552}
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 6429b9e9fb82..e033491fe308 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -51,10 +51,10 @@
51 * The top 4 bits always read 0. 51 * The top 4 bits always read 0.
52 * To write, the top nibble must be the 1's compl. of the low nibble. 52 * To write, the top nibble must be the 1's compl. of the low nibble.
53 */ 53 */
54#define DS2482_REG_CFG_1WS 0x08 54#define DS2482_REG_CFG_1WS 0x08 /* 1-wire speed */
55#define DS2482_REG_CFG_SPU 0x04 55#define DS2482_REG_CFG_SPU 0x04 /* strong pull-up */
56#define DS2482_REG_CFG_PPM 0x02 56#define DS2482_REG_CFG_PPM 0x02 /* presence pulse masking */
57#define DS2482_REG_CFG_APU 0x01 57#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
58 58
59 59
60/** 60/**
@@ -132,6 +132,17 @@ struct ds2482_data {
132 132
133 133
134/** 134/**
135 * Helper to calculate values for configuration register
136 * @param conf the raw config value
137 * @return the value w/ complements that can be written to register
138 */
139static inline u8 ds2482_calculate_config(u8 conf)
140{
141 return conf | ((~conf & 0x0f) << 4);
142}
143
144
145/**
135 * Sets the read pointer. 146 * Sets the read pointer.
136 * @param pdev The ds2482 client pointer 147 * @param pdev The ds2482 client pointer
137 * @param read_ptr see DS2482_PTR_CODE_xxx above 148 * @param read_ptr see DS2482_PTR_CODE_xxx above
@@ -399,7 +410,7 @@ static u8 ds2482_w1_reset_bus(void *data)
399 /* If the chip did reset since detect, re-config it */ 410 /* If the chip did reset since detect, re-config it */
400 if (err & DS2482_REG_STS_RST) 411 if (err & DS2482_REG_STS_RST)
401 ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG, 412 ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
402 0xF0); 413 ds2482_calculate_config(0x00));
403 } 414 }
404 415
405 mutex_unlock(&pdev->access_lock); 416 mutex_unlock(&pdev->access_lock);
@@ -407,6 +418,32 @@ static u8 ds2482_w1_reset_bus(void *data)
407 return retval; 418 return retval;
408} 419}
409 420
421static u8 ds2482_w1_set_pullup(void *data, int delay)
422{
423 struct ds2482_w1_chan *pchan = data;
424 struct ds2482_data *pdev = pchan->pdev;
425 u8 retval = 1;
426
427 /* if delay is non-zero activate the pullup,
428 * the strong pullup will be automatically deactivated
429 * by the master, so do not explicitly deactive it
430 */
431 if (delay) {
432 /* both waits are crucial, otherwise devices might not be
433 * powered long enough, causing e.g. a w1_therm sensor to
434 * provide wrong conversion results
435 */
436 ds2482_wait_1wire_idle(pdev);
437 /* note: it seems like both SPU and APU have to be set! */
438 retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
439 ds2482_calculate_config(DS2482_REG_CFG_SPU |
440 DS2482_REG_CFG_APU));
441 ds2482_wait_1wire_idle(pdev);
442 }
443
444 return retval;
445}
446
410 447
411static int ds2482_probe(struct i2c_client *client, 448static int ds2482_probe(struct i2c_client *client,
412 const struct i2c_device_id *id) 449 const struct i2c_device_id *id)
@@ -452,7 +489,8 @@ static int ds2482_probe(struct i2c_client *client,
452 data->w1_count = 8; 489 data->w1_count = 8;
453 490
454 /* Set all config items to 0 (off) */ 491 /* Set all config items to 0 (off) */
455 ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 0xF0); 492 ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
493 ds2482_calculate_config(0x00));
456 494
457 mutex_init(&data->access_lock); 495 mutex_init(&data->access_lock);
458 496
@@ -468,6 +506,7 @@ static int ds2482_probe(struct i2c_client *client,
468 data->w1_ch[idx].w1_bm.touch_bit = ds2482_w1_touch_bit; 506 data->w1_ch[idx].w1_bm.touch_bit = ds2482_w1_touch_bit;
469 data->w1_ch[idx].w1_bm.triplet = ds2482_w1_triplet; 507 data->w1_ch[idx].w1_bm.triplet = ds2482_w1_triplet;
470 data->w1_ch[idx].w1_bm.reset_bus = ds2482_w1_reset_bus; 508 data->w1_ch[idx].w1_bm.reset_bus = ds2482_w1_reset_bus;
509 data->w1_ch[idx].w1_bm.set_pullup = ds2482_w1_set_pullup;
471 510
472 err = w1_add_master_device(&data->w1_ch[idx].w1_bm); 511 err = w1_add_master_device(&data->w1_ch[idx].w1_bm);
473 if (err) { 512 if (err) {
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 708a25fc9961..372c8c0d54a0 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -109,34 +109,21 @@ static int mxc_w1_probe(struct platform_device *pdev)
109 struct resource *res; 109 struct resource *res;
110 int err = 0; 110 int err = 0;
111 111
112 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 112 mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
113 if (!res) 113 GFP_KERNEL);
114 return -ENODEV;
115
116 mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL);
117 if (!mdev) 114 if (!mdev)
118 return -ENOMEM; 115 return -ENOMEM;
119 116
120 mdev->clk = clk_get(&pdev->dev, NULL); 117 mdev->clk = devm_clk_get(&pdev->dev, NULL);
121 if (IS_ERR(mdev->clk)) { 118 if (IS_ERR(mdev->clk))
122 err = PTR_ERR(mdev->clk); 119 return PTR_ERR(mdev->clk);
123 goto failed_clk;
124 }
125 120
126 mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1; 121 mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
127 122
128 res = request_mem_region(res->start, resource_size(res), 123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
129 "mxc_w1"); 124 mdev->regs = devm_request_and_ioremap(&pdev->dev, res);
130 if (!res) { 125 if (!mdev->regs)
131 err = -EBUSY; 126 return -EBUSY;
132 goto failed_req;
133 }
134
135 mdev->regs = ioremap(res->start, resource_size(res));
136 if (!mdev->regs) {
137 dev_err(&pdev->dev, "Cannot map mxc_w1 registers\n");
138 goto failed_ioremap;
139 }
140 127
141 clk_prepare_enable(mdev->clk); 128 clk_prepare_enable(mdev->clk);
142 __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER); 129 __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
@@ -148,20 +135,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
148 err = w1_add_master_device(&mdev->bus_master); 135 err = w1_add_master_device(&mdev->bus_master);
149 136
150 if (err) 137 if (err)
151 goto failed_add; 138 return err;
152 139
153 platform_set_drvdata(pdev, mdev); 140 platform_set_drvdata(pdev, mdev);
154 return 0; 141 return 0;
155
156failed_add:
157 iounmap(mdev->regs);
158failed_ioremap:
159 release_mem_region(res->start, resource_size(res));
160failed_req:
161 clk_put(mdev->clk);
162failed_clk:
163 kfree(mdev);
164 return err;
165} 142}
166 143
167/* 144/*
@@ -170,16 +147,10 @@ failed_clk:
170static int mxc_w1_remove(struct platform_device *pdev) 147static int mxc_w1_remove(struct platform_device *pdev)
171{ 148{
172 struct mxc_w1_device *mdev = platform_get_drvdata(pdev); 149 struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
173 struct resource *res;
174
175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
176 150
177 w1_remove_master_device(&mdev->bus_master); 151 w1_remove_master_device(&mdev->bus_master);
178 152
179 iounmap(mdev->regs);
180 release_mem_region(res->start, resource_size(res));
181 clk_disable_unprepare(mdev->clk); 153 clk_disable_unprepare(mdev->clk);
182 clk_put(mdev->clk);
183 154
184 platform_set_drvdata(pdev, NULL); 155 platform_set_drvdata(pdev, NULL);
185 156
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 85b363a5bd0f..d39dfa4cc235 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -72,7 +72,7 @@ static int w1_gpio_probe_dt(struct platform_device *pdev)
72 return 0; 72 return 0;
73} 73}
74 74
75static int __init w1_gpio_probe(struct platform_device *pdev) 75static int w1_gpio_probe(struct platform_device *pdev)
76{ 76{
77 struct w1_bus_master *master; 77 struct w1_bus_master *master;
78 struct w1_gpio_platform_data *pdata; 78 struct w1_gpio_platform_data *pdata;
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 92d08e7fcba2..c1a702f8c803 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -41,14 +41,18 @@ MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature famil
41 * If it was disabled a parasite powered device might not get the require 41 * If it was disabled a parasite powered device might not get the require
42 * current to do a temperature conversion. If it is enabled parasite powered 42 * current to do a temperature conversion. If it is enabled parasite powered
43 * devices have a better chance of getting the current required. 43 * devices have a better chance of getting the current required.
44 * In case the parasite power-detection is not working (seems to be the case
45 * for some DS18S20) the strong pullup can also be forced, regardless of the
46 * power state of the devices.
47 *
48 * Summary of options:
49 * - strong_pullup = 0 Disable strong pullup completely
50 * - strong_pullup = 1 Enable automatic strong pullup detection
51 * - strong_pullup = 2 Force strong pullup
44 */ 52 */
45static int w1_strong_pullup = 1; 53static int w1_strong_pullup = 1;
46module_param_named(strong_pullup, w1_strong_pullup, int, 0); 54module_param_named(strong_pullup, w1_strong_pullup, int, 0);
47 55
48static u8 bad_roms[][9] = {
49 {0xaa, 0x00, 0x4b, 0x46, 0xff, 0xff, 0x0c, 0x10, 0x87},
50 {}
51 };
52 56
53static ssize_t w1_therm_read(struct device *device, 57static ssize_t w1_therm_read(struct device *device,
54 struct device_attribute *attr, char *buf); 58 struct device_attribute *attr, char *buf);
@@ -168,16 +172,6 @@ static inline int w1_convert_temp(u8 rom[9], u8 fid)
168 return 0; 172 return 0;
169} 173}
170 174
171static int w1_therm_check_rom(u8 rom[9])
172{
173 int i;
174
175 for (i=0; i<sizeof(bad_roms)/9; ++i)
176 if (!memcmp(bad_roms[i], rom, 9))
177 return 1;
178
179 return 0;
180}
181 175
182static ssize_t w1_therm_read(struct device *device, 176static ssize_t w1_therm_read(struct device *device,
183 struct device_attribute *attr, char *buf) 177 struct device_attribute *attr, char *buf)
@@ -194,10 +188,11 @@ static ssize_t w1_therm_read(struct device *device,
194 188
195 memset(rom, 0, sizeof(rom)); 189 memset(rom, 0, sizeof(rom));
196 190
197 verdict = 0;
198 crc = 0;
199
200 while (max_trying--) { 191 while (max_trying--) {
192
193 verdict = 0;
194 crc = 0;
195
201 if (!w1_reset_select_slave(sl)) { 196 if (!w1_reset_select_slave(sl)) {
202 int count = 0; 197 int count = 0;
203 unsigned int tm = 750; 198 unsigned int tm = 750;
@@ -210,7 +205,8 @@ static ssize_t w1_therm_read(struct device *device,
210 continue; 205 continue;
211 206
212 /* 750ms strong pullup (or delay) after the convert */ 207 /* 750ms strong pullup (or delay) after the convert */
213 if (!external_power && w1_strong_pullup) 208 if (w1_strong_pullup == 2 ||
209 (!external_power && w1_strong_pullup))
214 w1_next_pullup(dev, tm); 210 w1_next_pullup(dev, tm);
215 211
216 w1_write_8(dev, W1_CONVERT_TEMP); 212 w1_write_8(dev, W1_CONVERT_TEMP);
@@ -249,7 +245,7 @@ static ssize_t w1_therm_read(struct device *device,
249 } 245 }
250 } 246 }
251 247
252 if (!w1_therm_check_rom(rom)) 248 if (verdict)
253 break; 249 break;
254 } 250 }
255 251
@@ -260,7 +256,7 @@ static ssize_t w1_therm_read(struct device *device,
260 if (verdict) 256 if (verdict)
261 memcpy(sl->rom, rom, sizeof(sl->rom)); 257 memcpy(sl->rom, rom, sizeof(sl->rom));
262 else 258 else
263 dev_warn(device, "18S20 doesn't respond to CONVERT_TEMP.\n"); 259 dev_warn(device, "Read failed CRC check\n");
264 260
265 for (i = 0; i < 9; ++i) 261 for (i = 0; i < 9; ++i)
266 c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]); 262 c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]);
diff --git a/include/linux/extcon/extcon_gpio.h b/include/linux/extcon/extcon-gpio.h
index 2d8307f7d67d..2d8307f7d67d 100644
--- a/include/linux/extcon/extcon_gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index e73b852156b1..df77ba9a8166 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -325,14 +325,28 @@ struct hv_ring_buffer {
325 325
326 u32 interrupt_mask; 326 u32 interrupt_mask;
327 327
328 /* Pad it to PAGE_SIZE so that data starts on page boundary */ 328 /*
329 u8 reserved[4084]; 329 * Win8 uses some of the reserved bits to implement
330 330 * interrupt driven flow management. On the send side
331 /* NOTE: 331 * we can request that the receiver interrupt the sender
332 * The interrupt_mask field is used only for channels but since our 332 * when the ring transitions from being full to being able
333 * vmbus connection also uses this data structure and its data starts 333 * to handle a message of size "pending_send_sz".
334 * here, we commented out this field. 334 *
335 * Add necessary state for this enhancement.
335 */ 336 */
337 u32 pending_send_sz;
338
339 u32 reserved1[12];
340
341 union {
342 struct {
343 u32 feat_pending_send_sz:1;
344 };
345 u32 value;
346 } feature_bits;
347
348 /* Pad it to PAGE_SIZE so that data starts on page boundary */
349 u8 reserved2[4028];
336 350
337 /* 351 /*
338 * Ring data starts here + RingDataStartOffset 352 * Ring data starts here + RingDataStartOffset
@@ -405,12 +419,22 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
405 */ 419 */
406#define HV_DRV_VERSION "3.1" 420#define HV_DRV_VERSION "3.1"
407 421
408
409/* 422/*
410 * A revision number of vmbus that is used for ensuring both ends on a 423 * VMBUS version is 32 bit entity broken up into
411 * partition are using compatible versions. 424 * two 16 bit quantities: major_number. minor_number.
425 *
426 * 0 . 13 (Windows Server 2008)
427 * 1 . 1 (Windows 7)
428 * 2 . 4 (Windows 8)
412 */ 429 */
413#define VMBUS_REVISION_NUMBER 13 430
431#define VERSION_WS2008 ((0 << 16) | (13))
432#define VERSION_WIN7 ((1 << 16) | (1))
433#define VERSION_WIN8 ((2 << 16) | (4))
434
435#define VERSION_INVAL -1
436
437#define VERSION_CURRENT VERSION_WIN8
414 438
415/* Make maximum size of pipe payload of 16K */ 439/* Make maximum size of pipe payload of 16K */
416#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) 440#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -432,9 +456,13 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
432struct vmbus_channel_offer { 456struct vmbus_channel_offer {
433 uuid_le if_type; 457 uuid_le if_type;
434 uuid_le if_instance; 458 uuid_le if_instance;
435 u64 int_latency; /* in 100ns units */ 459
436 u32 if_revision; 460 /*
437 u32 server_ctx_size; /* in bytes */ 461 * These two fields are not currently used.
462 */
463 u64 reserved1;
464 u64 reserved2;
465
438 u16 chn_flags; 466 u16 chn_flags;
439 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ 467 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
440 468
@@ -456,7 +484,11 @@ struct vmbus_channel_offer {
456 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; 484 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
457 } pipe; 485 } pipe;
458 } u; 486 } u;
459 u32 padding; 487 /*
488 * The sub_channel_index is defined in win8.
489 */
490 u16 sub_channel_index;
491 u16 reserved3;
460} __packed; 492} __packed;
461 493
462/* Server Flags */ 494/* Server Flags */
@@ -652,7 +684,25 @@ struct vmbus_channel_offer_channel {
652 struct vmbus_channel_offer offer; 684 struct vmbus_channel_offer offer;
653 u32 child_relid; 685 u32 child_relid;
654 u8 monitorid; 686 u8 monitorid;
655 u8 monitor_allocated; 687 /*
688 * win7 and beyond splits this field into a bit field.
689 */
690 u8 monitor_allocated:1;
691 u8 reserved:7;
692 /*
693 * These are new fields added in win7 and later.
694 * Do not access these fields without checking the
695 * negotiated protocol.
696 *
697 * If "is_dedicated_interrupt" is set, we must not set the
698 * associated bit in the channel bitmap while sending the
699 * interrupt to the host.
700 *
701 * connection_id is to be used in signaling the host.
702 */
703 u16 is_dedicated_interrupt:1;
704 u16 reserved1:15;
705 u32 connection_id;
656} __packed; 706} __packed;
657 707
658/* Rescind Offer parameters */ 708/* Rescind Offer parameters */
@@ -683,8 +733,15 @@ struct vmbus_channel_open_channel {
683 /* GPADL for the channel's ring buffer. */ 733 /* GPADL for the channel's ring buffer. */
684 u32 ringbuffer_gpadlhandle; 734 u32 ringbuffer_gpadlhandle;
685 735
686 /* GPADL for the channel's server context save area. */ 736 /*
687 u32 server_contextarea_gpadlhandle; 737 * Starting with win8, this field will be used to specify
738 * the target virtual processor on which to deliver the interrupt for
739 * the host to guest communication.
740 * Prior to win8, incoming channel interrupts would only
741 * be delivered on cpu 0. Setting this value to 0 would
742 * preserve the earlier behavior.
743 */
744 u32 target_vp;
688 745
689 /* 746 /*
690 * The upstream ring buffer begins at offset zero in the memory 747 * The upstream ring buffer begins at offset zero in the memory
@@ -848,6 +905,27 @@ struct vmbus_close_msg {
848 struct vmbus_channel_close_channel msg; 905 struct vmbus_channel_close_channel msg;
849}; 906};
850 907
908/* Define connection identifier type. */
909union hv_connection_id {
910 u32 asu32;
911 struct {
912 u32 id:24;
913 u32 reserved:8;
914 } u;
915};
916
917/* Definition of the hv_signal_event hypercall input structure. */
918struct hv_input_signal_event {
919 union hv_connection_id connectionid;
920 u16 flag_number;
921 u16 rsvdz;
922};
923
924struct hv_input_signal_event_buffer {
925 u64 align8;
926 struct hv_input_signal_event event;
927};
928
851struct vmbus_channel { 929struct vmbus_channel {
852 struct list_head listentry; 930 struct list_head listentry;
853 931
@@ -882,8 +960,42 @@ struct vmbus_channel {
882 960
883 void (*onchannel_callback)(void *context); 961 void (*onchannel_callback)(void *context);
884 void *channel_callback_context; 962 void *channel_callback_context;
963
964 /*
965 * A channel can be marked for efficient (batched)
966 * reading:
967 * If batched_reading is set to "true", we read until the
968 * channel is empty and hold off interrupts from the host
969 * during the entire read process.
970 * If batched_reading is set to "false", the client is not
971 * going to perform batched reading.
972 *
973 * By default we will enable batched reading; specific
974 * drivers that don't want this behavior can turn it off.
975 */
976
977 bool batched_reading;
978
979 bool is_dedicated_interrupt;
980 struct hv_input_signal_event_buffer sig_buf;
981 struct hv_input_signal_event *sig_event;
982
983 /*
984 * Starting with win8, this field will be used to specify
985 * the target virtual processor on which to deliver the interrupt for
986 * the host to guest communication.
987 * Prior to win8, incoming channel interrupts would only
988 * be delivered on cpu 0. Setting this value to 0 would
989 * preserve the earlier behavior.
990 */
991 u32 target_vp;
885}; 992};
886 993
994static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
995{
996 c->batched_reading = state;
997}
998
887void vmbus_onmessage(void *context); 999void vmbus_onmessage(void *context);
888 1000
889int vmbus_request_offers(void); 1001int vmbus_request_offers(void);
@@ -1047,6 +1159,100 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
1047 g8, g9, ga, gb, gc, gd, ge, gf }, 1159 g8, g9, ga, gb, gc, gd, ge, gf },
1048 1160
1049/* 1161/*
1162 * GUID definitions of various offer types - services offered to the guest.
1163 */
1164
1165/*
1166 * Network GUID
1167 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1168 */
1169#define HV_NIC_GUID \
1170 .guid = { \
1171 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \
1172 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \
1173 }
1174
1175/*
1176 * IDE GUID
1177 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1178 */
1179#define HV_IDE_GUID \
1180 .guid = { \
1181 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \
1182 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \
1183 }
1184
1185/*
1186 * SCSI GUID
1187 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1188 */
1189#define HV_SCSI_GUID \
1190 .guid = { \
1191 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \
1192 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \
1193 }
1194
1195/*
1196 * Shutdown GUID
1197 * {0e0b6031-5213-4934-818b-38d90ced39db}
1198 */
1199#define HV_SHUTDOWN_GUID \
1200 .guid = { \
1201 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \
1202 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \
1203 }
1204
1205/*
1206 * Time Synch GUID
1207 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1208 */
1209#define HV_TS_GUID \
1210 .guid = { \
1211 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \
1212 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \
1213 }
1214
1215/*
1216 * Heartbeat GUID
1217 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1218 */
1219#define HV_HEART_BEAT_GUID \
1220 .guid = { \
1221 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \
1222 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \
1223 }
1224
1225/*
1226 * KVP GUID
1227 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1228 */
1229#define HV_KVP_GUID \
1230 .guid = { \
1231 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \
1232 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \
1233 }
1234
1235/*
1236 * Dynamic memory GUID
1237 * {525074dc-8985-46e2-8057-a307dc18a502}
1238 */
1239#define HV_DM_GUID \
1240 .guid = { \
1241 0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \
1242 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \
1243 }
1244
1245/*
1246 * Mouse GUID
1247 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1248 */
1249#define HV_MOUSE_GUID \
1250 .guid = { \
1251 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \
1252 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \
1253 }
1254
1255/*
1050 * Common header for Hyper-V ICs 1256 * Common header for Hyper-V ICs
1051 */ 1257 */
1052 1258
@@ -1150,5 +1356,11 @@ int hv_kvp_init(struct hv_util_service *);
1150void hv_kvp_deinit(void); 1356void hv_kvp_deinit(void);
1151void hv_kvp_onchannelcallback(void *); 1357void hv_kvp_onchannelcallback(void *);
1152 1358
1359/*
1360 * Negotiated version with the Host.
1361 */
1362
1363extern __u32 vmbus_proto_version;
1364
1153#endif /* __KERNEL__ */ 1365#endif /* __KERNEL__ */
1154#endif /* _HYPERV_H */ 1366#endif /* _HYPERV_H */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index a580363a7d29..a710255528d7 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -75,8 +75,10 @@ enum arizona_type {
75#define ARIZONA_IRQ_DCS_HP_DONE 47 75#define ARIZONA_IRQ_DCS_HP_DONE 47
76#define ARIZONA_IRQ_FLL2_CLOCK_OK 48 76#define ARIZONA_IRQ_FLL2_CLOCK_OK 48
77#define ARIZONA_IRQ_FLL1_CLOCK_OK 49 77#define ARIZONA_IRQ_FLL1_CLOCK_OK 49
78#define ARIZONA_IRQ_MICD_CLAMP_RISE 50
79#define ARIZONA_IRQ_MICD_CLAMP_FALL 51
78 80
79#define ARIZONA_NUM_IRQ 50 81#define ARIZONA_NUM_IRQ 52
80 82
81struct snd_soc_dapm_context; 83struct snd_soc_dapm_context;
82 84
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index ec3e2a2a6d77..96d64f2b8d78 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -105,9 +105,30 @@ struct arizona_pdata {
105 */ 105 */
106 int max_channels_clocked[ARIZONA_MAX_AIF]; 106 int max_channels_clocked[ARIZONA_MAX_AIF];
107 107
108 /** GPIO5 is used for jack detection */
109 bool jd_gpio5;
110
111 /** Use the headphone detect circuit to identify the accessory */
112 bool hpdet_acc_id;
113
114 /** GPIO used for mic isolation with HPDET */
115 int hpdet_id_gpio;
116
108 /** GPIO for mic detection polarity */ 117 /** GPIO for mic detection polarity */
109 int micd_pol_gpio; 118 int micd_pol_gpio;
110 119
120 /** Mic detect ramp rate */
121 int micd_bias_start_time;
122
123 /** Mic detect sample rate */
124 int micd_rate;
125
126 /** Mic detect debounce level */
127 int micd_dbtime;
128
129 /** Force MICBIAS on for mic detect */
130 bool micd_force_micbias;
131
111 /** Headset polarity configurations */ 132 /** Headset polarity configurations */
112 struct arizona_micd_config *micd_configs; 133 struct arizona_micd_config *micd_configs;
113 int num_micd_configs; 134 int num_micd_configs;
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 1f6fe31a4d5c..188d89abd963 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -119,6 +119,8 @@
119#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293 119#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293
120#define ARIZONA_HEADPHONE_DETECT_1 0x29B 120#define ARIZONA_HEADPHONE_DETECT_1 0x29B
121#define ARIZONA_HEADPHONE_DETECT_2 0x29C 121#define ARIZONA_HEADPHONE_DETECT_2 0x29C
122#define ARIZONA_HP_DACVAL 0x29F
123#define ARIZONA_MICD_CLAMP_CONTROL 0x2A2
122#define ARIZONA_MIC_DETECT_1 0x2A3 124#define ARIZONA_MIC_DETECT_1 0x2A3
123#define ARIZONA_MIC_DETECT_2 0x2A4 125#define ARIZONA_MIC_DETECT_2 0x2A4
124#define ARIZONA_MIC_DETECT_3 0x2A5 126#define ARIZONA_MIC_DETECT_3 0x2A5
@@ -1194,6 +1196,14 @@
1194/* 1196/*
1195 * R64 (0x40) - Wake control 1197 * R64 (0x40) - Wake control
1196 */ 1198 */
1199#define ARIZONA_WKUP_MICD_CLAMP_FALL 0x0080 /* WKUP_MICD_CLAMP_FALL */
1200#define ARIZONA_WKUP_MICD_CLAMP_FALL_MASK 0x0080 /* WKUP_MICD_CLAMP_FALL */
1201#define ARIZONA_WKUP_MICD_CLAMP_FALL_SHIFT 7 /* WKUP_MICD_CLAMP_FALL */
1202#define ARIZONA_WKUP_MICD_CLAMP_FALL_WIDTH 1 /* WKUP_MICD_CLAMP_FALL */
1203#define ARIZONA_WKUP_MICD_CLAMP_RISE 0x0040 /* WKUP_MICD_CLAMP_RISE */
1204#define ARIZONA_WKUP_MICD_CLAMP_RISE_MASK 0x0040 /* WKUP_MICD_CLAMP_RISE */
1205#define ARIZONA_WKUP_MICD_CLAMP_RISE_SHIFT 6 /* WKUP_MICD_CLAMP_RISE */
1206#define ARIZONA_WKUP_MICD_CLAMP_RISE_WIDTH 1 /* WKUP_MICD_CLAMP_RISE */
1197#define ARIZONA_WKUP_GP5_FALL 0x0020 /* WKUP_GP5_FALL */ 1207#define ARIZONA_WKUP_GP5_FALL 0x0020 /* WKUP_GP5_FALL */
1198#define ARIZONA_WKUP_GP5_FALL_MASK 0x0020 /* WKUP_GP5_FALL */ 1208#define ARIZONA_WKUP_GP5_FALL_MASK 0x0020 /* WKUP_GP5_FALL */
1199#define ARIZONA_WKUP_GP5_FALL_SHIFT 5 /* WKUP_GP5_FALL */ 1209#define ARIZONA_WKUP_GP5_FALL_SHIFT 5 /* WKUP_GP5_FALL */
@@ -2035,6 +2045,9 @@
2035/* 2045/*
2036 * R667 (0x29B) - Headphone Detect 1 2046 * R667 (0x29B) - Headphone Detect 1
2037 */ 2047 */
2048#define ARIZONA_HP_IMPEDANCE_RANGE_MASK 0x0600 /* HP_IMPEDANCE_RANGE - [10:9] */
2049#define ARIZONA_HP_IMPEDANCE_RANGE_SHIFT 9 /* HP_IMPEDANCE_RANGE - [10:9] */
2050#define ARIZONA_HP_IMPEDANCE_RANGE_WIDTH 2 /* HP_IMPEDANCE_RANGE - [10:9] */
2038#define ARIZONA_HP_STEP_SIZE 0x0100 /* HP_STEP_SIZE */ 2051#define ARIZONA_HP_STEP_SIZE 0x0100 /* HP_STEP_SIZE */
2039#define ARIZONA_HP_STEP_SIZE_MASK 0x0100 /* HP_STEP_SIZE */ 2052#define ARIZONA_HP_STEP_SIZE_MASK 0x0100 /* HP_STEP_SIZE */
2040#define ARIZONA_HP_STEP_SIZE_SHIFT 8 /* HP_STEP_SIZE */ 2053#define ARIZONA_HP_STEP_SIZE_SHIFT 8 /* HP_STEP_SIZE */
@@ -2069,6 +2082,21 @@
2069#define ARIZONA_HP_LVL_SHIFT 0 /* HP_LVL - [6:0] */ 2082#define ARIZONA_HP_LVL_SHIFT 0 /* HP_LVL - [6:0] */
2070#define ARIZONA_HP_LVL_WIDTH 7 /* HP_LVL - [6:0] */ 2083#define ARIZONA_HP_LVL_WIDTH 7 /* HP_LVL - [6:0] */
2071 2084
2085#define ARIZONA_HP_DONE_B 0x8000 /* HP_DONE */
2086#define ARIZONA_HP_DONE_B_MASK 0x8000 /* HP_DONE */
2087#define ARIZONA_HP_DONE_B_SHIFT 15 /* HP_DONE */
2088#define ARIZONA_HP_DONE_B_WIDTH 1 /* HP_DONE */
2089#define ARIZONA_HP_LVL_B_MASK 0x7FFF /* HP_LVL - [14:0] */
2090#define ARIZONA_HP_LVL_B_SHIFT 0 /* HP_LVL - [14:0] */
2091#define ARIZONA_HP_LVL_B_WIDTH 15 /* HP_LVL - [14:0] */
2092
2093/*
2094 * R674 (0x2A2) - MICD clamp control
2095 */
2096#define ARIZONA_MICD_CLAMP_MODE_MASK 0x000F /* MICD_CLAMP_MODE - [3:0] */
2097#define ARIZONA_MICD_CLAMP_MODE_SHIFT 0 /* MICD_CLAMP_MODE - [3:0] */
2098#define ARIZONA_MICD_CLAMP_MODE_WIDTH 4 /* MICD_CLAMP_MODE - [3:0] */
2099
2072/* 2100/*
2073 * R675 (0x2A3) - Mic Detect 1 2101 * R675 (0x2A3) - Mic Detect 1
2074 */ 2102 */
@@ -5239,6 +5267,14 @@
5239/* 5267/*
5240 * R3408 (0xD50) - AOD wkup and trig 5268 * R3408 (0xD50) - AOD wkup and trig
5241 */ 5269 */
5270#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS 0x0080 /* MICD_CLAMP_FALL_TRIG_STS */
5271#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_MASK 0x0080 /* MICD_CLAMP_FALL_TRIG_STS */
5272#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_SHIFT 7 /* MICD_CLAMP_FALL_TRIG_STS */
5273#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_WIDTH 1 /* MICD_CLAMP_FALL_TRIG_STS */
5274#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS 0x0040 /* MICD_CLAMP_RISE_TRIG_STS */
5275#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_MASK 0x0040 /* MICD_CLAMP_RISE_TRIG_STS */
5276#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_SHIFT 6 /* MICD_CLAMP_RISE_TRIG_STS */
5277#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_WIDTH 1 /* MICD_CLAMP_RISE_TRIG_STS */
5242#define ARIZONA_GP5_FALL_TRIG_STS 0x0020 /* GP5_FALL_TRIG_STS */ 5278#define ARIZONA_GP5_FALL_TRIG_STS 0x0020 /* GP5_FALL_TRIG_STS */
5243#define ARIZONA_GP5_FALL_TRIG_STS_MASK 0x0020 /* GP5_FALL_TRIG_STS */ 5279#define ARIZONA_GP5_FALL_TRIG_STS_MASK 0x0020 /* GP5_FALL_TRIG_STS */
5244#define ARIZONA_GP5_FALL_TRIG_STS_SHIFT 5 /* GP5_FALL_TRIG_STS */ 5280#define ARIZONA_GP5_FALL_TRIG_STS_SHIFT 5 /* GP5_FALL_TRIG_STS */
@@ -5267,6 +5303,12 @@
5267/* 5303/*
5268 * R3409 (0xD51) - AOD IRQ1 5304 * R3409 (0xD51) - AOD IRQ1
5269 */ 5305 */
5306#define ARIZONA_MICD_CLAMP_FALL_EINT1 0x0080 /* MICD_CLAMP_FALL_EINT1 */
5307#define ARIZONA_MICD_CLAMP_FALL_EINT1_MASK 0x0080 /* MICD_CLAMP_FALL_EINT1 */
5308#define ARIZONA_MICD_CLAMP_FALL_EINT1_SHIFT 7 /* MICD_CLAMP_FALL_EINT1 */
5309#define ARIZONA_MICD_CLAMP_RISE_EINT1 0x0040 /* MICD_CLAMP_RISE_EINT1 */
5310#define ARIZONA_MICD_CLAMP_RISE_EINT1_MASK 0x0040 /* MICD_CLAMP_RISE_EINT1 */
5311#define ARIZONA_MICD_CLAMP_RISE_EINT1_SHIFT 6 /* MICD_CLAMP_RISE_EINT1 */
5270#define ARIZONA_GP5_FALL_EINT1 0x0020 /* GP5_FALL_EINT1 */ 5312#define ARIZONA_GP5_FALL_EINT1 0x0020 /* GP5_FALL_EINT1 */
5271#define ARIZONA_GP5_FALL_EINT1_MASK 0x0020 /* GP5_FALL_EINT1 */ 5313#define ARIZONA_GP5_FALL_EINT1_MASK 0x0020 /* GP5_FALL_EINT1 */
5272#define ARIZONA_GP5_FALL_EINT1_SHIFT 5 /* GP5_FALL_EINT1 */ 5314#define ARIZONA_GP5_FALL_EINT1_SHIFT 5 /* GP5_FALL_EINT1 */
@@ -5295,6 +5337,12 @@
5295/* 5337/*
5296 * R3410 (0xD52) - AOD IRQ2 5338 * R3410 (0xD52) - AOD IRQ2
5297 */ 5339 */
5340#define ARIZONA_MICD_CLAMP_FALL_EINT2 0x0080 /* MICD_CLAMP_FALL_EINT2 */
5341#define ARIZONA_MICD_CLAMP_FALL_EINT2_MASK 0x0080 /* MICD_CLAMP_FALL_EINT2 */
5342#define ARIZONA_MICD_CLAMP_FALL_EINT2_SHIFT 7 /* MICD_CLAMP_FALL_EINT2 */
5343#define ARIZONA_MICD_CLAMP_RISE_EINT2 0x0040 /* MICD_CLAMP_RISE_EINT2 */
5344#define ARIZONA_MICD_CLAMP_RISE_EINT2_MASK 0x0040 /* MICD_CLAMP_RISE_EINT2 */
5345#define ARIZONA_MICD_CLAMP_RISE_EINT2_SHIFT 6 /* MICD_CLAMP_RISE_EINT2 */
5298#define ARIZONA_GP5_FALL_EINT2 0x0020 /* GP5_FALL_EINT2 */ 5346#define ARIZONA_GP5_FALL_EINT2 0x0020 /* GP5_FALL_EINT2 */
5299#define ARIZONA_GP5_FALL_EINT2_MASK 0x0020 /* GP5_FALL_EINT2 */ 5347#define ARIZONA_GP5_FALL_EINT2_MASK 0x0020 /* GP5_FALL_EINT2 */
5300#define ARIZONA_GP5_FALL_EINT2_SHIFT 5 /* GP5_FALL_EINT2 */ 5348#define ARIZONA_GP5_FALL_EINT2_SHIFT 5 /* GP5_FALL_EINT2 */
@@ -5379,6 +5427,10 @@
5379/* 5427/*
5380 * R3413 (0xD55) - AOD IRQ Raw Status 5428 * R3413 (0xD55) - AOD IRQ Raw Status
5381 */ 5429 */
5430#define ARIZONA_MICD_CLAMP_STS 0x0008 /* MICD_CLAMP_STS */
5431#define ARIZONA_MICD_CLAMP_STS_MASK 0x0008 /* MICD_CLAMP_STS */
5432#define ARIZONA_MICD_CLAMP_STS_SHIFT 3 /* MICD_CLAMP_STS */
5433#define ARIZONA_MICD_CLAMP_STS_WIDTH 1 /* MICD_CLAMP_STS */
5382#define ARIZONA_GP5_STS 0x0004 /* GP5_STS */ 5434#define ARIZONA_GP5_STS 0x0004 /* GP5_STS */
5383#define ARIZONA_GP5_STS_MASK 0x0004 /* GP5_STS */ 5435#define ARIZONA_GP5_STS_MASK 0x0004 /* GP5_STS */
5384#define ARIZONA_GP5_STS_SHIFT 2 /* GP5_STS */ 5436#define ARIZONA_GP5_STS_SHIFT 2 /* GP5_STS */
@@ -5395,6 +5447,10 @@
5395/* 5447/*
5396 * R3414 (0xD56) - Jack detect debounce 5448 * R3414 (0xD56) - Jack detect debounce
5397 */ 5449 */
5450#define ARIZONA_MICD_CLAMP_DB 0x0008 /* MICD_CLAMP_DB */
5451#define ARIZONA_MICD_CLAMP_DB_MASK 0x0008 /* MICD_CLAMP_DB */
5452#define ARIZONA_MICD_CLAMP_DB_SHIFT 3 /* MICD_CLAMP_DB */
5453#define ARIZONA_MICD_CLAMP_DB_WIDTH 1 /* MICD_CLAMP_DB */
5398#define ARIZONA_JD2_DB 0x0002 /* JD2_DB */ 5454#define ARIZONA_JD2_DB 0x0002 /* JD2_DB */
5399#define ARIZONA_JD2_DB_MASK 0x0002 /* JD2_DB */ 5455#define ARIZONA_JD2_DB_MASK 0x0002 /* JD2_DB */
5400#define ARIZONA_JD2_DB_SHIFT 1 /* JD2_DB */ 5456#define ARIZONA_JD2_DB_SHIFT 1 /* JD2_DB */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 1eeae5c07915..5b18ecde69b5 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -106,6 +106,92 @@ enum max77693_muic_reg {
106 MAX77693_MUIC_REG_END, 106 MAX77693_MUIC_REG_END,
107}; 107};
108 108
109/* MAX77693 MUIC - STATUS1~3 Register */
110#define STATUS1_ADC_SHIFT (0)
111#define STATUS1_ADCLOW_SHIFT (5)
112#define STATUS1_ADCERR_SHIFT (6)
113#define STATUS1_ADC1K_SHIFT (7)
114#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
115#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
116#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
117#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
118
119#define STATUS2_CHGTYP_SHIFT (0)
120#define STATUS2_CHGDETRUN_SHIFT (3)
121#define STATUS2_DCDTMR_SHIFT (4)
122#define STATUS2_DXOVP_SHIFT (5)
123#define STATUS2_VBVOLT_SHIFT (6)
124#define STATUS2_VIDRM_SHIFT (7)
125#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
126#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
127#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
128#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
129#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
130#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
131
132#define STATUS3_OVP_SHIFT (2)
133#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
134
135/* MAX77693 CDETCTRL1~2 register */
136#define CDETCTRL1_CHGDETEN_SHIFT (0)
137#define CDETCTRL1_CHGTYPMAN_SHIFT (1)
138#define CDETCTRL1_DCDEN_SHIFT (2)
139#define CDETCTRL1_DCD2SCT_SHIFT (3)
140#define CDETCTRL1_CDDELAY_SHIFT (4)
141#define CDETCTRL1_DCDCPL_SHIFT (5)
142#define CDETCTRL1_CDPDET_SHIFT (7)
143#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT)
144#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
145#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT)
146#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT)
147#define CDETCTRL1_CDDELAY_MASK (0x1 << CDETCTRL1_CDDELAY_SHIFT)
148#define CDETCTRL1_DCDCPL_MASK (0x1 << CDETCTRL1_DCDCPL_SHIFT)
149#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT)
150
151#define CDETCTRL2_VIDRMEN_SHIFT (1)
152#define CDETCTRL2_DXOVPEN_SHIFT (3)
153#define CDETCTRL2_VIDRMEN_MASK (0x1 << CDETCTRL2_VIDRMEN_SHIFT)
154#define CDETCTRL2_DXOVPEN_MASK (0x1 << CDETCTRL2_DXOVPEN_SHIFT)
155
156/* MAX77693 MUIC - CONTROL1~3 register */
157#define COMN1SW_SHIFT (0)
158#define COMP2SW_SHIFT (3)
159#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
160#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
161#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
162#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
163 | (1 << COMN1SW_SHIFT))
164#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
165 | (2 << COMN1SW_SHIFT))
166#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
167 | (3 << COMN1SW_SHIFT))
168#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
169 | (0 << COMN1SW_SHIFT))
170
171#define CONTROL2_LOWPWR_SHIFT (0)
172#define CONTROL2_ADCEN_SHIFT (1)
173#define CONTROL2_CPEN_SHIFT (2)
174#define CONTROL2_SFOUTASRT_SHIFT (3)
175#define CONTROL2_SFOUTORD_SHIFT (4)
176#define CONTROL2_ACCDET_SHIFT (5)
177#define CONTROL2_USBCPINT_SHIFT (6)
178#define CONTROL2_RCPS_SHIFT (7)
179#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
180#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
181#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
182#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
183#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
184#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
185#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
186#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
187
188#define CONTROL3_JIGSET_SHIFT (0)
189#define CONTROL3_BTLDSET_SHIFT (2)
190#define CONTROL3_ADCDBSET_SHIFT (4)
191#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
192#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
193#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
194
109/* Slave addr = 0x90: Haptic */ 195/* Slave addr = 0x90: Haptic */
110enum max77693_haptic_reg { 196enum max77693_haptic_reg {
111 MAX77693_HAPTIC_REG_STATUS = 0x00, 197 MAX77693_HAPTIC_REG_STATUS = 0x00,
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index fe03b2d35d4f..3109a6c5c948 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -38,6 +38,15 @@ struct max77693_reg_data {
38struct max77693_muic_platform_data { 38struct max77693_muic_platform_data {
39 struct max77693_reg_data *init_data; 39 struct max77693_reg_data *init_data;
40 int num_init_data; 40 int num_init_data;
41
42 int detcable_delay_ms;
43
44 /*
45 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
46 * h/w path of COMP2/COMN1 on CONTROL1 register.
47 */
48 int path_usb;
49 int path_uart;
41}; 50};
42 51
43struct max77693_platform_data { 52struct max77693_platform_data {
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 6ae21bf47d64..fb465dfbb59e 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -194,6 +194,70 @@ enum max8997_muic_reg {
194 MAX8997_MUIC_REG_END = 0xf, 194 MAX8997_MUIC_REG_END = 0xf,
195}; 195};
196 196
197/* MAX8997-MUIC STATUS1 register */
198#define STATUS1_ADC_SHIFT 0
199#define STATUS1_ADCLOW_SHIFT 5
200#define STATUS1_ADCERR_SHIFT 6
201#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
202#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
203#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
204
205/* MAX8997-MUIC STATUS2 register */
206#define STATUS2_CHGTYP_SHIFT 0
207#define STATUS2_CHGDETRUN_SHIFT 3
208#define STATUS2_DCDTMR_SHIFT 4
209#define STATUS2_DBCHG_SHIFT 5
210#define STATUS2_VBVOLT_SHIFT 6
211#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
212#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
213#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
214#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
215#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
216
217/* MAX8997-MUIC STATUS3 register */
218#define STATUS3_OVP_SHIFT 2
219#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
220
221/* MAX8997-MUIC CONTROL1 register */
222#define COMN1SW_SHIFT 0
223#define COMP2SW_SHIFT 3
224#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
225#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
226#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
227
228#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
229 | (1 << COMN1SW_SHIFT))
230#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
231 | (2 << COMN1SW_SHIFT))
232#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
233 | (3 << COMN1SW_SHIFT))
234#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
235 | (0 << COMN1SW_SHIFT))
236
237#define CONTROL2_LOWPWR_SHIFT (0)
238#define CONTROL2_ADCEN_SHIFT (1)
239#define CONTROL2_CPEN_SHIFT (2)
240#define CONTROL2_SFOUTASRT_SHIFT (3)
241#define CONTROL2_SFOUTORD_SHIFT (4)
242#define CONTROL2_ACCDET_SHIFT (5)
243#define CONTROL2_USBCPINT_SHIFT (6)
244#define CONTROL2_RCPS_SHIFT (7)
245#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
246#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
247#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
248#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
249#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
250#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
251#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
252#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
253
254#define CONTROL3_JIGSET_SHIFT (0)
255#define CONTROL3_BTLDSET_SHIFT (2)
256#define CONTROL3_ADCDBSET_SHIFT (4)
257#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
258#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
259#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
260
197enum max8997_haptic_reg { 261enum max8997_haptic_reg {
198 MAX8997_HAPTIC_REG_GENERAL = 0x00, 262 MAX8997_HAPTIC_REG_GENERAL = 0x00,
199 MAX8997_HAPTIC_REG_CONF1 = 0x01, 263 MAX8997_HAPTIC_REG_CONF1 = 0x01,
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index 1d4a4fe6ac33..cf815577bd68 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -78,21 +78,6 @@ struct max8997_regulator_data {
78 struct device_node *reg_node; 78 struct device_node *reg_node;
79}; 79};
80 80
81enum max8997_muic_usb_type {
82 MAX8997_USB_HOST,
83 MAX8997_USB_DEVICE,
84};
85
86enum max8997_muic_charger_type {
87 MAX8997_CHARGER_TYPE_NONE = 0,
88 MAX8997_CHARGER_TYPE_USB,
89 MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT,
90 MAX8997_CHARGER_TYPE_DEDICATED_CHG,
91 MAX8997_CHARGER_TYPE_500MA,
92 MAX8997_CHARGER_TYPE_1A,
93 MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7,
94};
95
96struct max8997_muic_reg_data { 81struct max8997_muic_reg_data {
97 u8 addr; 82 u8 addr;
98 u8 data; 83 u8 data;
@@ -107,6 +92,16 @@ struct max8997_muic_reg_data {
107struct max8997_muic_platform_data { 92struct max8997_muic_platform_data {
108 struct max8997_muic_reg_data *init_data; 93 struct max8997_muic_reg_data *init_data;
109 int num_init_data; 94 int num_init_data;
95
96 /* Check cable state after certain delay */
97 int detcable_delay_ms;
98
99 /*
100 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
101 * h/w path of COMP2/COMN1 on CONTROL1 register.
102 */
103 int path_usb;
104 int path_uart;
110}; 105};
111 106
112enum max8997_haptic_motor_type { 107enum max8997_haptic_motor_type {
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
new file mode 100644
index 000000000000..f6a15205853b
--- /dev/null
+++ b/include/linux/ntb.h
@@ -0,0 +1,83 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * Intel PCIe NTB Linux driver
44 *
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
47 */
48
49struct ntb_transport_qp;
50
51struct ntb_client {
52 struct device_driver driver;
53 int (*probe) (struct pci_dev *pdev);
54 void (*remove) (struct pci_dev *pdev);
55};
56
57int ntb_register_client(struct ntb_client *drvr);
58void ntb_unregister_client(struct ntb_client *drvr);
59int ntb_register_client_dev(char *device_name);
60void ntb_unregister_client_dev(char *device_name);
61
62struct ntb_queue_handlers {
63 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
64 void *data, int len);
65 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
66 void *data, int len);
67 void (*event_handler) (void *data, int status);
68};
69
70unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
71unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
72struct ntb_transport_qp *
73ntb_transport_create_queue(void *data, struct pci_dev *pdev,
74 const struct ntb_queue_handlers *handlers);
75void ntb_transport_free_queue(struct ntb_transport_qp *qp);
76int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
77 unsigned int len);
78int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
79 unsigned int len);
80void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
81void ntb_transport_link_up(struct ntb_transport_qp *qp);
82void ntb_transport_link_down(struct ntb_transport_qp *qp);
83bool ntb_transport_link_query(struct ntb_transport_qp *qp);
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
new file mode 100644
index 000000000000..023430e265fe
--- /dev/null
+++ b/include/linux/vmw_vmci_api.h
@@ -0,0 +1,82 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef __VMW_VMCI_API_H__
17#define __VMW_VMCI_API_H__
18
19#include <linux/uidgid.h>
20#include <linux/vmw_vmci_defs.h>
21
22#undef VMCI_KERNEL_API_VERSION
23#define VMCI_KERNEL_API_VERSION_1 1
24#define VMCI_KERNEL_API_VERSION_2 2
25#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2
26
27typedef void (vmci_device_shutdown_fn) (void *device_registration,
28 void *user_data);
29
30int vmci_datagram_create_handle(u32 resource_id, u32 flags,
31 vmci_datagram_recv_cb recv_cb,
32 void *client_data,
33 struct vmci_handle *out_handle);
34int vmci_datagram_create_handle_priv(u32 resource_id, u32 flags, u32 priv_flags,
35 vmci_datagram_recv_cb recv_cb,
36 void *client_data,
37 struct vmci_handle *out_handle);
38int vmci_datagram_destroy_handle(struct vmci_handle handle);
39int vmci_datagram_send(struct vmci_datagram *msg);
40int vmci_doorbell_create(struct vmci_handle *handle, u32 flags,
41 u32 priv_flags,
42 vmci_callback notify_cb, void *client_data);
43int vmci_doorbell_destroy(struct vmci_handle handle);
44int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags);
45u32 vmci_get_context_id(void);
46bool vmci_is_context_owner(u32 context_id, kuid_t uid);
47
48int vmci_event_subscribe(u32 event,
49 vmci_event_cb callback, void *callback_data,
50 u32 *subid);
51int vmci_event_unsubscribe(u32 subid);
52u32 vmci_context_get_priv_flags(u32 context_id);
53int vmci_qpair_alloc(struct vmci_qp **qpair,
54 struct vmci_handle *handle,
55 u64 produce_qsize,
56 u64 consume_qsize,
57 u32 peer, u32 flags, u32 priv_flags);
58int vmci_qpair_detach(struct vmci_qp **qpair);
59int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
60 u64 *producer_tail,
61 u64 *consumer_head);
62int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
63 u64 *consumer_tail,
64 u64 *producer_head);
65s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair);
66s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair);
67s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair);
68s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair);
69ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
70 const void *buf, size_t buf_size, int mode);
71ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
72 void *buf, size_t buf_size, int mode);
73ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
74 int mode);
75ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
76 void *iov, size_t iov_size, int mode);
77ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
78 void *iov, size_t iov_size, int mode);
79ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size,
80 int mode);
81
82#endif /* !__VMW_VMCI_API_H__ */
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
new file mode 100644
index 000000000000..65ac54c61c18
--- /dev/null
+++ b/include/linux/vmw_vmci_defs.h
@@ -0,0 +1,880 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMW_VMCI_DEF_H_
17#define _VMW_VMCI_DEF_H_
18
19#include <linux/atomic.h>
20
21/* Register offsets. */
22#define VMCI_STATUS_ADDR 0x00
23#define VMCI_CONTROL_ADDR 0x04
24#define VMCI_ICR_ADDR 0x08
25#define VMCI_IMR_ADDR 0x0c
26#define VMCI_DATA_OUT_ADDR 0x10
27#define VMCI_DATA_IN_ADDR 0x14
28#define VMCI_CAPS_ADDR 0x18
29#define VMCI_RESULT_LOW_ADDR 0x1c
30#define VMCI_RESULT_HIGH_ADDR 0x20
31
32/* Max number of devices. */
33#define VMCI_MAX_DEVICES 1
34
35/* Status register bits. */
36#define VMCI_STATUS_INT_ON 0x1
37
38/* Control register bits. */
39#define VMCI_CONTROL_RESET 0x1
40#define VMCI_CONTROL_INT_ENABLE 0x2
41#define VMCI_CONTROL_INT_DISABLE 0x4
42
43/* Capabilities register bits. */
44#define VMCI_CAPS_HYPERCALL 0x1
45#define VMCI_CAPS_GUESTCALL 0x2
46#define VMCI_CAPS_DATAGRAM 0x4
47#define VMCI_CAPS_NOTIFICATIONS 0x8
48
49/* Interrupt Cause register bits. */
50#define VMCI_ICR_DATAGRAM 0x1
51#define VMCI_ICR_NOTIFICATION 0x2
52
53/* Interrupt Mask register bits. */
54#define VMCI_IMR_DATAGRAM 0x1
55#define VMCI_IMR_NOTIFICATION 0x2
56
57/* Interrupt type. */
58enum {
59 VMCI_INTR_TYPE_INTX = 0,
60 VMCI_INTR_TYPE_MSI = 1,
61 VMCI_INTR_TYPE_MSIX = 2,
62};
63
64/* Maximum MSI/MSI-X interrupt vectors in the device. */
65#define VMCI_MAX_INTRS 2
66
67/*
68 * Supported interrupt vectors. There is one for each ICR value above,
69 * but here they indicate the position in the vector array/message ID.
70 */
71enum {
72 VMCI_INTR_DATAGRAM = 0,
73 VMCI_INTR_NOTIFICATION = 1,
74};
75
76/*
77 * A single VMCI device has an upper limit of 128MB on the amount of
78 * memory that can be used for queue pairs.
79 */
80#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
81
82/*
83 * Queues with pre-mapped data pages must be small, so that we don't pin
84 * too much kernel memory (especially on vmkernel). We limit a queuepair to
85 * 32 KB, or 16 KB per queue for symmetrical pairs.
86 */
87#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
88
89/*
90 * We have a fixed set of resource IDs available in the VMX.
91 * This allows us to have a very simple implementation since we statically
92 * know how many will create datagram handles. If a new caller arrives and
93 * we have run out of slots we can manually increment the maximum size of
94 * available resource IDs.
95 *
96 * VMCI reserved hypervisor datagram resource IDs.
97 */
98enum {
99 VMCI_RESOURCES_QUERY = 0,
100 VMCI_GET_CONTEXT_ID = 1,
101 VMCI_SET_NOTIFY_BITMAP = 2,
102 VMCI_DOORBELL_LINK = 3,
103 VMCI_DOORBELL_UNLINK = 4,
104 VMCI_DOORBELL_NOTIFY = 5,
105 /*
106 * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
107 * obsoleted by the removal of VM to VM communication.
108 */
109 VMCI_DATAGRAM_REQUEST_MAP = 6,
110 VMCI_DATAGRAM_REMOVE_MAP = 7,
111 VMCI_EVENT_SUBSCRIBE = 8,
112 VMCI_EVENT_UNSUBSCRIBE = 9,
113 VMCI_QUEUEPAIR_ALLOC = 10,
114 VMCI_QUEUEPAIR_DETACH = 11,
115
116 /*
117 * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
118 * WS 7.0/7.1 and ESX 4.1
119 */
120 VMCI_HGFS_TRANSPORT = 13,
121 VMCI_UNITY_PBRPC_REGISTER = 14,
122 VMCI_RPC_PRIVILEGED = 15,
123 VMCI_RPC_UNPRIVILEGED = 16,
124 VMCI_RESOURCE_MAX = 17,
125};
126
127/*
128 * struct vmci_handle - Ownership information structure
129 * @context: The VMX context ID.
130 * @resource: The resource ID (used for locating in resource hash).
131 *
132 * The vmci_handle structure is used to track resources used within
133 * vmw_vmci.
134 */
135struct vmci_handle {
136 u32 context;
137 u32 resource;
138};
139
140#define vmci_make_handle(_cid, _rid) \
141 (struct vmci_handle){ .context = _cid, .resource = _rid }
142
143static inline bool vmci_handle_is_equal(struct vmci_handle h1,
144 struct vmci_handle h2)
145{
146 return h1.context == h2.context && h1.resource == h2.resource;
147}
148
149#define VMCI_INVALID_ID ~0
150static const struct vmci_handle VMCI_INVALID_HANDLE = {
151 .context = VMCI_INVALID_ID,
152 .resource = VMCI_INVALID_ID
153};
154
155static inline bool vmci_handle_is_invalid(struct vmci_handle h)
156{
157 return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
158}
159
160/*
161 * The below defines can be used to send anonymous requests.
162 * This also indicates that no response is expected.
163 */
164#define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
165#define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
166static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
167 .context = VMCI_ANON_SRC_CONTEXT_ID,
168 .resource = VMCI_ANON_SRC_RESOURCE_ID
169};
170
171/* The lowest 16 context ids are reserved for internal use. */
172#define VMCI_RESERVED_CID_LIMIT ((u32) 16)
173
174/*
175 * Hypervisor context id, used for calling into hypervisor
176 * supplied services from the VM.
177 */
178#define VMCI_HYPERVISOR_CONTEXT_ID 0
179
180/*
181 * Well-known context id, a logical context that contains a set of
182 * well-known services. This context ID is now obsolete.
183 */
184#define VMCI_WELL_KNOWN_CONTEXT_ID 1
185
186/*
187 * Context ID used by host endpoints.
188 */
189#define VMCI_HOST_CONTEXT_ID 2
190
191#define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \
192 (_cid) > VMCI_HOST_CONTEXT_ID)
193
194/*
195 * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
196 * handles that refer to a specific context.
197 */
198#define VMCI_CONTEXT_RESOURCE_ID 0
199
200/*
201 * VMCI error codes.
202 */
203enum {
204 VMCI_SUCCESS_QUEUEPAIR_ATTACH = 5,
205 VMCI_SUCCESS_QUEUEPAIR_CREATE = 4,
206 VMCI_SUCCESS_LAST_DETACH = 3,
207 VMCI_SUCCESS_ACCESS_GRANTED = 2,
208 VMCI_SUCCESS_ENTRY_DEAD = 1,
209 VMCI_SUCCESS = 0,
210 VMCI_ERROR_INVALID_RESOURCE = (-1),
211 VMCI_ERROR_INVALID_ARGS = (-2),
212 VMCI_ERROR_NO_MEM = (-3),
213 VMCI_ERROR_DATAGRAM_FAILED = (-4),
214 VMCI_ERROR_MORE_DATA = (-5),
215 VMCI_ERROR_NO_MORE_DATAGRAMS = (-6),
216 VMCI_ERROR_NO_ACCESS = (-7),
217 VMCI_ERROR_NO_HANDLE = (-8),
218 VMCI_ERROR_DUPLICATE_ENTRY = (-9),
219 VMCI_ERROR_DST_UNREACHABLE = (-10),
220 VMCI_ERROR_PAYLOAD_TOO_LARGE = (-11),
221 VMCI_ERROR_INVALID_PRIV = (-12),
222 VMCI_ERROR_GENERIC = (-13),
223 VMCI_ERROR_PAGE_ALREADY_SHARED = (-14),
224 VMCI_ERROR_CANNOT_SHARE_PAGE = (-15),
225 VMCI_ERROR_CANNOT_UNSHARE_PAGE = (-16),
226 VMCI_ERROR_NO_PROCESS = (-17),
227 VMCI_ERROR_NO_DATAGRAM = (-18),
228 VMCI_ERROR_NO_RESOURCES = (-19),
229 VMCI_ERROR_UNAVAILABLE = (-20),
230 VMCI_ERROR_NOT_FOUND = (-21),
231 VMCI_ERROR_ALREADY_EXISTS = (-22),
232 VMCI_ERROR_NOT_PAGE_ALIGNED = (-23),
233 VMCI_ERROR_INVALID_SIZE = (-24),
234 VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
235 VMCI_ERROR_TIMEOUT = (-26),
236 VMCI_ERROR_DATAGRAM_INCOMPLETE = (-27),
237 VMCI_ERROR_INCORRECT_IRQL = (-28),
238 VMCI_ERROR_EVENT_UNKNOWN = (-29),
239 VMCI_ERROR_OBSOLETE = (-30),
240 VMCI_ERROR_QUEUEPAIR_MISMATCH = (-31),
241 VMCI_ERROR_QUEUEPAIR_NOTSET = (-32),
242 VMCI_ERROR_QUEUEPAIR_NOTOWNER = (-33),
243 VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
244 VMCI_ERROR_QUEUEPAIR_NOSPACE = (-35),
245 VMCI_ERROR_QUEUEPAIR_NODATA = (-36),
246 VMCI_ERROR_BUSMEM_INVALIDATION = (-37),
247 VMCI_ERROR_MODULE_NOT_LOADED = (-38),
248 VMCI_ERROR_DEVICE_NOT_FOUND = (-39),
249 VMCI_ERROR_QUEUEPAIR_NOT_READY = (-40),
250 VMCI_ERROR_WOULD_BLOCK = (-41),
251
252 /* VMCI clients should return error code within this range */
253 VMCI_ERROR_CLIENT_MIN = (-500),
254 VMCI_ERROR_CLIENT_MAX = (-550),
255
256 /* Internal error codes. */
257 VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
258};
259
260/* VMCI reserved events. */
261enum {
262 /* Only applicable to guest endpoints */
263 VMCI_EVENT_CTX_ID_UPDATE = 0,
264
265 /* Applicable to guest and host */
266 VMCI_EVENT_CTX_REMOVED = 1,
267
268 /* Only applicable to guest endpoints */
269 VMCI_EVENT_QP_RESUMED = 2,
270
271 /* Applicable to guest and host */
272 VMCI_EVENT_QP_PEER_ATTACH = 3,
273
274 /* Applicable to guest and host */
275 VMCI_EVENT_QP_PEER_DETACH = 4,
276
277 /*
278 * Applicable to VMX and vmk. On vmk,
279 * this event has the Context payload type.
280 */
281 VMCI_EVENT_MEM_ACCESS_ON = 5,
282
283 /*
284 * Applicable to VMX and vmk. Same as
285 * above for the payload type.
286 */
287 VMCI_EVENT_MEM_ACCESS_OFF = 6,
288 VMCI_EVENT_MAX = 7,
289};
290
291/*
292 * Of the above events, a few are reserved for use in the VMX, and
293 * other endpoints (guest and host kernel) should not use them. For
294 * the rest of the events, we allow both host and guest endpoints to
295 * subscribe to them, to maintain the same API for host and guest
296 * endpoints.
297 */
298#define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
299 (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
300
301#define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \
302 !VMCI_EVENT_VALID_VMX(_event))
303
304/* Reserved guest datagram resource ids. */
305#define VMCI_EVENT_HANDLER 0
306
307/*
308 * VMCI coarse-grained privileges (per context or host
309 * process/endpoint. An entity with the restricted flag is only
310 * allowed to interact with the hypervisor and trusted entities.
311 */
312enum {
313 VMCI_NO_PRIVILEGE_FLAGS = 0,
314 VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
315 VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
316 VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
317 VMCI_PRIVILEGE_FLAG_TRUSTED),
318 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
319 VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
320 VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
321};
322
323/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
324#define VMCI_RESERVED_RESOURCE_ID_MAX 1023
325
326/*
327 * Driver version.
328 *
329 * Increment major version when you make an incompatible change.
330 * Compatibility goes both ways (old driver with new executable
331 * as well as new driver with old executable).
332 */
333
334/* Never change VMCI_VERSION_SHIFT_WIDTH */
335#define VMCI_VERSION_SHIFT_WIDTH 16
336#define VMCI_MAKE_VERSION(_major, _minor) \
337 ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
338
339#define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
340#define VMCI_VERSION_MINOR(v) ((u16) (v))
341
342/*
343 * VMCI_VERSION is always the current version. Subsequently listed
344 * versions are ways of detecting previous versions of the connecting
345 * application (i.e., VMX).
346 *
347 * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
348 * communication.
349 *
350 * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
351 * support.
352 *
353 * VMCI_VERSION_HOSTQP: This version introduced host end point support
354 * for hosted products.
355 *
356 * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
357 * support for host end-points.
358 *
359 * VMCI_VERSION_PREVERS2: This fictional version number is intended to
360 * represent the version of a VMX which doesn't call into the driver
361 * with ioctl VERSION2 and thus doesn't establish its version with the
362 * driver.
363 */
364
365#define VMCI_VERSION VMCI_VERSION_NOVMVM
366#define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
367#define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
368#define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
369#define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
370#define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
371
372#define VMCI_SOCKETS_MAKE_VERSION(_p) \
373 ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
374
375/*
376 * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
377 * we start at sequence 9f. This gives us the same values that our shipping
378 * products use, starting at 1951, provided we leave out the direction and
379 * structure size. Note that VMMon occupies the block following us, starting
380 * at 2001.
381 */
382#define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
383#define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
384#define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4)
385#define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5)
386#define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6)
387#define IOCTL_VMCI_VERSION2 _IO(7, 0xa7)
388#define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8)
389#define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9)
390#define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa)
391#define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab)
392#define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac)
393#define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf)
394#define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0)
395#define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
396#define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
397#define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
398#define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
399#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
400#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
401#define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
402/*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
403
404/*
405 * struct vmci_queue_header - VMCI Queue Header information.
406 *
407 * A Queue cannot stand by itself as designed. Each Queue's header
408 * contains a pointer into itself (the producer_tail) and into its peer
409 * (consumer_head). The reason for the separation is one of
410 * accessibility: Each end-point can modify two things: where the next
411 * location to enqueue is within its produce_q (producer_tail); and
412 * where the next dequeue location is in its consume_q (consumer_head).
413 *
414 * An end-point cannot modify the pointers of its peer (guest to
415 * guest; NOTE that in the host both queue headers are mapped r/w).
416 * But, each end-point needs read access to both Queue header
417 * structures in order to determine how much space is used (or left)
418 * in the Queue. This is because for an end-point to know how full
419 * its produce_q is, it needs to use the consumer_head that points into
420 * the produce_q but -that- consumer_head is in the Queue header for
421 * that end-points consume_q.
422 *
423 * Thoroughly confused? Sorry.
424 *
425 * producer_tail: the point to enqueue new entrants. When you approach
426 * a line in a store, for example, you walk up to the tail.
427 *
428 * consumer_head: the point in the queue from which the next element is
429 * dequeued. In other words, who is next in line is he who is at the
430 * head of the line.
431 *
432 * Also, producer_tail points to an empty byte in the Queue, whereas
433 * consumer_head points to a valid byte of data (unless producer_tail ==
434 * consumer_head in which case consumer_head does not point to a valid
435 * byte of data).
436 *
437 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
438 * the range [0, size-1].
439 *
440 * If produce_q_header->producer_tail == consume_q_header->consumer_head
441 * then the produce_q is empty.
442 */
443struct vmci_queue_header {
444 /* All fields are 64bit and aligned. */
445 struct vmci_handle handle; /* Identifier. */
446 atomic64_t producer_tail; /* Offset in this queue. */
447 atomic64_t consumer_head; /* Offset in peer queue. */
448};
449
450/*
451 * struct vmci_datagram - Base struct for vmci datagrams.
452 * @dst: A vmci_handle that tracks the destination of the datagram.
453 * @src: A vmci_handle that tracks the source of the datagram.
454 * @payload_size: The size of the payload.
455 *
456 * vmci_datagram structs are used when sending vmci datagrams. They include
457 * the necessary source and destination information to properly route
458 * the information along with the size of the package.
459 */
460struct vmci_datagram {
461 struct vmci_handle dst;
462 struct vmci_handle src;
463 u64 payload_size;
464};
465
466/*
467 * Second flag is for creating a well-known handle instead of a per context
468 * handle. Next flag is for deferring datagram delivery, so that the
469 * datagram callback is invoked in a delayed context (not interrupt context).
470 */
471#define VMCI_FLAG_DG_NONE 0
472#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
473#define VMCI_FLAG_ANYCID_DG_HND 0x2
474#define VMCI_FLAG_DG_DELAYED_CB 0x4
475
476/*
477 * Maximum supported size of a VMCI datagram for routable datagrams.
478 * Datagrams going to the hypervisor are allowed to be larger.
479 */
480#define VMCI_MAX_DG_SIZE (17 * 4096)
481#define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
482 sizeof(struct vmci_datagram))
483#define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \
484 sizeof(struct vmci_datagram))
485#define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
486#define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
487#define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
488#define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
489
490struct vmci_event_payload_qp {
491 struct vmci_handle handle; /* queue_pair handle. */
492 u32 peer_id; /* Context id of attaching/detaching VM. */
493 u32 _pad;
494};
495
496/* Flags for VMCI queue_pair API. */
497enum {
498 /* Fail alloc if QP not created by peer. */
499 VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
500
501 /* Only allow attaches from local context. */
502 VMCI_QPFLAG_LOCAL = 1 << 1,
503
504 /* Host won't block when guest is quiesced. */
505 VMCI_QPFLAG_NONBLOCK = 1 << 2,
506
507 /* Pin data pages in ESX. Used with NONBLOCK */
508 VMCI_QPFLAG_PINNED = 1 << 3,
509
510 /* Update the following flag when adding new flags. */
511 VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
512 VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
513
514 /* Convenience flags */
515 VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
516 VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
517};
518
519/*
520 * We allow at least 1024 more event datagrams from the hypervisor past the
521 * normally allowed datagrams pending for a given context. We define this
522 * limit on event datagrams from the hypervisor to guard against DoS attack
523 * from a malicious VM which could repeatedly attach to and detach from a queue
524 * pair, causing events to be queued at the destination VM. However, the rate
525 * at which such events can be generated is small since it requires a VM exit
526 * and handling of queue pair attach/detach call at the hypervisor. Event
527 * datagrams may be queued up at the destination VM if it has interrupts
528 * disabled or if it is not draining events for some other reason. 1024
529 * datagrams is a grossly conservative estimate of the time for which
530 * interrupts may be disabled in the destination VM, but at the same time does
531 * not exacerbate the memory pressure problem on the host by much (size of each
532 * event datagram is small).
533 */
534#define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
535 (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
536 1024 * (sizeof(struct vmci_datagram) + \
537 sizeof(struct vmci_event_data_max)))
538
539/*
540 * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
541 * hypervisor resources. Struct size is 16 bytes. All fields in struct are
542 * aligned to their natural alignment.
543 */
544struct vmci_resource_query_hdr {
545 struct vmci_datagram hdr;
546 u32 num_resources;
547 u32 _padding;
548};
549
550/*
551 * Convenience struct for negotiating vectors. Must match layout of
552 * VMCIResourceQueryHdr minus the struct vmci_datagram header.
553 */
554struct vmci_resource_query_msg {
555 u32 num_resources;
556 u32 _padding;
557 u32 resources[1];
558};
559
560/*
561 * The maximum number of resources that can be queried using
562 * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
563 * bits of a positive return value. Negative values are reserved for
564 * errors.
565 */
566#define VMCI_RESOURCE_QUERY_MAX_NUM 31
567
568/* Maximum size for the VMCI_RESOURCE_QUERY request. */
569#define VMCI_RESOURCE_QUERY_MAX_SIZE \
570 (sizeof(struct vmci_resource_query_hdr) + \
571 sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
572
573/*
574 * Struct used for setting the notification bitmap. All fields in
575 * struct are aligned to their natural alignment.
576 */
577struct vmci_notify_bm_set_msg {
578 struct vmci_datagram hdr;
579 u32 bitmap_ppn;
580 u32 _pad;
581};
582
583/*
584 * Struct used for linking a doorbell handle with an index in the
585 * notify bitmap. All fields in struct are aligned to their natural
586 * alignment.
587 */
588struct vmci_doorbell_link_msg {
589 struct vmci_datagram hdr;
590 struct vmci_handle handle;
591 u64 notify_idx;
592};
593
594/*
595 * Struct used for unlinking a doorbell handle from an index in the
596 * notify bitmap. All fields in struct are aligned to their natural
597 * alignment.
598 */
599struct vmci_doorbell_unlink_msg {
600 struct vmci_datagram hdr;
601 struct vmci_handle handle;
602};
603
604/*
605 * Struct used for generating a notification on a doorbell handle. All
606 * fields in struct are aligned to their natural alignment.
607 */
608struct vmci_doorbell_notify_msg {
609 struct vmci_datagram hdr;
610 struct vmci_handle handle;
611};
612
613/*
614 * This struct is used to contain data for events. Size of this struct is a
615 * multiple of 8 bytes, and all fields are aligned to their natural alignment.
616 */
617struct vmci_event_data {
618 u32 event; /* 4 bytes. */
619 u32 _pad;
620 /* Event payload is put here. */
621};
622
623/*
624 * Define the different VMCI_EVENT payload data types here. All structs must
625 * be a multiple of 8 bytes, and fields must be aligned to their natural
626 * alignment.
627 */
628struct vmci_event_payld_ctx {
629 u32 context_id; /* 4 bytes. */
630 u32 _pad;
631};
632
633struct vmci_event_payld_qp {
634 struct vmci_handle handle; /* queue_pair handle. */
635 u32 peer_id; /* Context id of attaching/detaching VM. */
636 u32 _pad;
637};
638
639/*
640 * We define the following struct to get the size of the maximum event
641 * data the hypervisor may send to the guest. If adding a new event
642 * payload type above, add it to the following struct too (inside the
643 * union).
644 */
645struct vmci_event_data_max {
646 struct vmci_event_data event_data;
647 union {
648 struct vmci_event_payld_ctx context_payload;
649 struct vmci_event_payld_qp qp_payload;
650 } ev_data_payload;
651};
652
653/*
654 * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
655 * VMCI_EVENT_HANDLER messages. Struct size is 32 bytes. All fields
656 * in struct are aligned to their natural alignment.
657 */
658struct vmci_event_msg {
659 struct vmci_datagram hdr;
660
661 /* Has event type and payload. */
662 struct vmci_event_data event_data;
663
664 /* Payload gets put here. */
665};
666
667/* Event with context payload. */
668struct vmci_event_ctx {
669 struct vmci_event_msg msg;
670 struct vmci_event_payld_ctx payload;
671};
672
673/* Event with QP payload. */
674struct vmci_event_qp {
675 struct vmci_event_msg msg;
676 struct vmci_event_payld_qp payload;
677};
678
679/*
680 * Structs used for queue_pair alloc and detach messages. We align fields of
681 * these structs to 64bit boundaries.
682 */
683struct vmci_qp_alloc_msg {
684 struct vmci_datagram hdr;
685 struct vmci_handle handle;
686 u32 peer;
687 u32 flags;
688 u64 produce_size;
689 u64 consume_size;
690 u64 num_ppns;
691
692 /* List of PPNs placed here. */
693};
694
695struct vmci_qp_detach_msg {
696 struct vmci_datagram hdr;
697 struct vmci_handle handle;
698};
699
700/* VMCI Doorbell API. */
701#define VMCI_FLAG_DELAYED_CB 0x01
702
703typedef void (*vmci_callback) (void *client_data);
704
705/*
706 * struct vmci_qp - A vmw_vmci queue pair handle.
707 *
708 * This structure is used as a handle to a queue pair created by
709 * VMCI. It is intentionally left opaque to clients.
710 */
711struct vmci_qp;
712
713/* Callback needed for correctly waiting on events. */
714typedef int (*vmci_datagram_recv_cb) (void *client_data,
715 struct vmci_datagram *msg);
716
717/* VMCI Event API. */
718typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
719 void *client_data);
720
721/*
722 * We use the following inline function to access the payload data
723 * associated with an event data.
724 */
725static inline const void *
726vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
727{
728 return (const char *)ev_data + sizeof(*ev_data);
729}
730
731static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
732{
733 return (void *)vmci_event_data_const_payload(ev_data);
734}
735
736/*
737 * Helper to add a given offset to a head or tail pointer. Wraps the
738 * value of the pointer around the max size of the queue.
739 */
740static inline void vmci_qp_add_pointer(atomic64_t *var,
741 size_t add,
742 u64 size)
743{
744 u64 new_val = atomic64_read(var);
745
746 if (new_val >= size - add)
747 new_val -= size;
748
749 new_val += add;
750
751 atomic64_set(var, new_val);
752}
753
754/*
755 * Helper routine to get the Producer Tail from the supplied queue.
756 */
757static inline u64
758vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
759{
760 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
761 return atomic64_read(&qh->producer_tail);
762}
763
764/*
765 * Helper routine to get the Consumer Head from the supplied queue.
766 */
767static inline u64
768vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
769{
770 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
771 return atomic64_read(&qh->consumer_head);
772}
773
774/*
775 * Helper routine to increment the Producer Tail. Fundamentally,
776 * vmci_qp_add_pointer() is used to manipulate the tail itself.
777 */
778static inline void
779vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
780 size_t add,
781 u64 queue_size)
782{
783 vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
784}
785
786/*
787 * Helper routine to increment the Consumer Head. Fundamentally,
788 * vmci_qp_add_pointer() is used to manipulate the head itself.
789 */
790static inline void
791vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
792 size_t add,
793 u64 queue_size)
794{
795 vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
796}
797
798/*
799 * Helper routine for getting the head and the tail pointer for a queue.
800 * Both the VMCIQueues are needed to get both the pointers for one queue.
801 */
802static inline void
803vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
804 const struct vmci_queue_header *consume_q_header,
805 u64 *producer_tail,
806 u64 *consumer_head)
807{
808 if (producer_tail)
809 *producer_tail = vmci_q_header_producer_tail(produce_q_header);
810
811 if (consumer_head)
812 *consumer_head = vmci_q_header_consumer_head(consume_q_header);
813}
814
815static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
816 const struct vmci_handle handle)
817{
818 q_header->handle = handle;
819 atomic64_set(&q_header->producer_tail, 0);
820 atomic64_set(&q_header->consumer_head, 0);
821}
822
823/*
824 * Finds available free space in a produce queue to enqueue more
825 * data or reports an error if queue pair corruption is detected.
826 */
827static s64
828vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
829 const struct vmci_queue_header *consume_q_header,
830 const u64 produce_q_size)
831{
832 u64 tail;
833 u64 head;
834 u64 free_space;
835
836 tail = vmci_q_header_producer_tail(produce_q_header);
837 head = vmci_q_header_consumer_head(consume_q_header);
838
839 if (tail >= produce_q_size || head >= produce_q_size)
840 return VMCI_ERROR_INVALID_SIZE;
841
842 /*
843 * Deduct 1 to avoid tail becoming equal to head which causes
844 * ambiguity. If head and tail are equal it means that the
845 * queue is empty.
846 */
847 if (tail >= head)
848 free_space = produce_q_size - (tail - head) - 1;
849 else
850 free_space = head - tail - 1;
851
852 return free_space;
853}
854
855/*
856 * vmci_q_header_free_space() does all the heavy lifting of
857 * determing the number of free bytes in a Queue. This routine,
858 * then subtracts that size from the full size of the Queue so
859 * the caller knows how many bytes are ready to be dequeued.
860 * Results:
861 * On success, available data size in bytes (up to MAX_INT64).
862 * On failure, appropriate error code.
863 */
864static inline s64
865vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
866 const struct vmci_queue_header *produce_q_header,
867 const u64 consume_q_size)
868{
869 s64 free_space;
870
871 free_space = vmci_q_header_free_space(consume_q_header,
872 produce_q_header, consume_q_size);
873 if (free_space < VMCI_SUCCESS)
874 return free_space;
875
876 return consume_q_size - free_space - 1;
877}
878
879
880#endif /* _VMW_VMCI_DEF_H_ */
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index d25a46925e61..c800ea4c8bf9 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -97,7 +97,7 @@ static struct utsname uts_buf;
97 * The location of the interface configuration file. 97 * The location of the interface configuration file.
98 */ 98 */
99 99
100#define KVP_CONFIG_LOC "/var/opt/" 100#define KVP_CONFIG_LOC "/var/lib/hyperv"
101 101
102#define MAX_FILE_NAME 100 102#define MAX_FILE_NAME 100
103#define ENTRIES_PER_BLOCK 50 103#define ENTRIES_PER_BLOCK 50
@@ -151,7 +151,7 @@ static void kvp_update_file(int pool)
151 */ 151 */
152 kvp_acquire_lock(pool); 152 kvp_acquire_lock(pool);
153 153
154 filep = fopen(kvp_file_info[pool].fname, "w"); 154 filep = fopen(kvp_file_info[pool].fname, "we");
155 if (!filep) { 155 if (!filep) {
156 kvp_release_lock(pool); 156 kvp_release_lock(pool);
157 syslog(LOG_ERR, "Failed to open file, pool: %d", pool); 157 syslog(LOG_ERR, "Failed to open file, pool: %d", pool);
@@ -182,7 +182,7 @@ static void kvp_update_mem_state(int pool)
182 182
183 kvp_acquire_lock(pool); 183 kvp_acquire_lock(pool);
184 184
185 filep = fopen(kvp_file_info[pool].fname, "r"); 185 filep = fopen(kvp_file_info[pool].fname, "re");
186 if (!filep) { 186 if (!filep) {
187 kvp_release_lock(pool); 187 kvp_release_lock(pool);
188 syslog(LOG_ERR, "Failed to open file, pool: %d", pool); 188 syslog(LOG_ERR, "Failed to open file, pool: %d", pool);
@@ -234,9 +234,9 @@ static int kvp_file_init(void)
234 int i; 234 int i;
235 int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; 235 int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
236 236
237 if (access("/var/opt/hyperv", F_OK)) { 237 if (access(KVP_CONFIG_LOC, F_OK)) {
238 if (mkdir("/var/opt/hyperv", S_IRUSR | S_IWUSR | S_IROTH)) { 238 if (mkdir(KVP_CONFIG_LOC, 0755 /* rwxr-xr-x */)) {
239 syslog(LOG_ERR, " Failed to create /var/opt/hyperv"); 239 syslog(LOG_ERR, " Failed to create %s", KVP_CONFIG_LOC);
240 exit(EXIT_FAILURE); 240 exit(EXIT_FAILURE);
241 } 241 }
242 } 242 }
@@ -245,14 +245,14 @@ static int kvp_file_init(void)
245 fname = kvp_file_info[i].fname; 245 fname = kvp_file_info[i].fname;
246 records_read = 0; 246 records_read = 0;
247 num_blocks = 1; 247 num_blocks = 1;
248 sprintf(fname, "/var/opt/hyperv/.kvp_pool_%d", i); 248 sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
249 fd = open(fname, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR | S_IROTH); 249 fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
250 250
251 if (fd == -1) 251 if (fd == -1)
252 return 1; 252 return 1;
253 253
254 254
255 filep = fopen(fname, "r"); 255 filep = fopen(fname, "re");
256 if (!filep) 256 if (!filep)
257 return 1; 257 return 1;
258 258
@@ -1162,16 +1162,13 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
1162 snprintf(str, sizeof(str), "%s", "DNS"); 1162 snprintf(str, sizeof(str), "%s", "DNS");
1163 break; 1163 break;
1164 } 1164 }
1165 if (i != 0) { 1165
1166 if (type != DNS) { 1166 if (type == DNS) {
1167 snprintf(sub_str, sizeof(sub_str),
1168 "_%d", i++);
1169 } else {
1170 snprintf(sub_str, sizeof(sub_str),
1171 "%d", ++i);
1172 }
1173 } else if (type == DNS) {
1174 snprintf(sub_str, sizeof(sub_str), "%d", ++i); 1167 snprintf(sub_str, sizeof(sub_str), "%d", ++i);
1168 } else if (type == GATEWAY && i == 0) {
1169 ++i;
1170 } else {
1171 snprintf(sub_str, sizeof(sub_str), "%d", i++);
1175 } 1172 }
1176 1173
1177 1174
@@ -1191,17 +1188,13 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
1191 snprintf(str, sizeof(str), "%s", "DNS"); 1188 snprintf(str, sizeof(str), "%s", "DNS");
1192 break; 1189 break;
1193 } 1190 }
1194 if ((j != 0) || (type == DNS)) { 1191
1195 if (type != DNS) { 1192 if (type == DNS) {
1196 snprintf(sub_str, sizeof(sub_str), 1193 snprintf(sub_str, sizeof(sub_str), "%d", ++i);
1197 "_%d", j++); 1194 } else if (j == 0) {
1198 } else { 1195 ++j;
1199 snprintf(sub_str, sizeof(sub_str), 1196 } else {
1200 "%d", ++i); 1197 snprintf(sub_str, sizeof(sub_str), "_%d", j++);
1201 }
1202 } else if (type == DNS) {
1203 snprintf(sub_str, sizeof(sub_str),
1204 "%d", ++i);
1205 } 1198 }
1206 } else { 1199 } else {
1207 return HV_INVALIDARG; 1200 return HV_INVALIDARG;
@@ -1244,18 +1237,19 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
1244 * Here is the format of the ip configuration file: 1237 * Here is the format of the ip configuration file:
1245 * 1238 *
1246 * HWADDR=macaddr 1239 * HWADDR=macaddr
1247 * IF_NAME=interface name 1240 * DEVICE=interface name
1248 * DHCP=yes (This is optional; if yes, DHCP is configured) 1241 * BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured
1242 * or "none" if no boot-time protocol should be used)
1249 * 1243 *
1250 * IPADDR=ipaddr1 1244 * IPADDR0=ipaddr1
1251 * IPADDR_1=ipaddr2 1245 * IPADDR1=ipaddr2
1252 * IPADDR_x=ipaddry (where y = x + 1) 1246 * IPADDRx=ipaddry (where y = x + 1)
1253 * 1247 *
1254 * NETMASK=netmask1 1248 * NETMASK0=netmask1
1255 * NETMASK_x=netmasky (where y = x + 1) 1249 * NETMASKx=netmasky (where y = x + 1)
1256 * 1250 *
1257 * GATEWAY=ipaddr1 1251 * GATEWAY=ipaddr1
1258 * GATEWAY_x=ipaddry (where y = x + 1) 1252 * GATEWAYx=ipaddry (where y = x + 1)
1259 * 1253 *
1260 * DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc) 1254 * DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc)
1261 * 1255 *
@@ -1271,7 +1265,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
1271 */ 1265 */
1272 1266
1273 snprintf(if_file, sizeof(if_file), "%s%s%s", KVP_CONFIG_LOC, 1267 snprintf(if_file, sizeof(if_file), "%s%s%s", KVP_CONFIG_LOC,
1274 "hyperv/ifcfg-", if_name); 1268 "/ifcfg-", if_name);
1275 1269
1276 file = fopen(if_file, "w"); 1270 file = fopen(if_file, "w");
1277 1271
@@ -1294,12 +1288,12 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
1294 if (error) 1288 if (error)
1295 goto setval_error; 1289 goto setval_error;
1296 1290
1297 error = kvp_write_file(file, "IF_NAME", "", if_name); 1291 error = kvp_write_file(file, "DEVICE", "", if_name);
1298 if (error) 1292 if (error)
1299 goto setval_error; 1293 goto setval_error;
1300 1294
1301 if (new_val->dhcp_enabled) { 1295 if (new_val->dhcp_enabled) {
1302 error = kvp_write_file(file, "DHCP", "", "yes"); 1296 error = kvp_write_file(file, "BOOTPROTO", "", "dhcp");
1303 if (error) 1297 if (error)
1304 goto setval_error; 1298 goto setval_error;
1305 1299
@@ -1307,6 +1301,11 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
1307 * We are done!. 1301 * We are done!.
1308 */ 1302 */
1309 goto setval_done; 1303 goto setval_done;
1304
1305 } else {
1306 error = kvp_write_file(file, "BOOTPROTO", "", "none");
1307 if (error)
1308 goto setval_error;
1310 } 1309 }
1311 1310
1312 /* 1311 /*
diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
index 3e9427e08d80..735aafd64a3f 100755
--- a/tools/hv/hv_set_ifconfig.sh
+++ b/tools/hv/hv_set_ifconfig.sh
@@ -20,18 +20,19 @@
20# Here is the format of the ip configuration file: 20# Here is the format of the ip configuration file:
21# 21#
22# HWADDR=macaddr 22# HWADDR=macaddr
23# IF_NAME=interface name 23# DEVICE=interface name
24# DHCP=yes (This is optional; if yes, DHCP is configured) 24# BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured
25# or "none" if no boot-time protocol should be used)
25# 26#
26# IPADDR=ipaddr1 27# IPADDR0=ipaddr1
27# IPADDR_1=ipaddr2 28# IPADDR1=ipaddr2
28# IPADDR_x=ipaddry (where y = x + 1) 29# IPADDRx=ipaddry (where y = x + 1)
29# 30#
30# NETMASK=netmask1 31# NETMASK0=netmask1
31# NETMASK_x=netmasky (where y = x + 1) 32# NETMASKx=netmasky (where y = x + 1)
32# 33#
33# GATEWAY=ipaddr1 34# GATEWAY=ipaddr1
34# GATEWAY_x=ipaddry (where y = x + 1) 35# GATEWAYx=ipaddry (where y = x + 1)
35# 36#
36# DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc) 37# DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc)
37# 38#
@@ -53,11 +54,6 @@ echo "NM_CONTROLLED=no" >> $1
53echo "PEERDNS=yes" >> $1 54echo "PEERDNS=yes" >> $1
54echo "ONBOOT=yes" >> $1 55echo "ONBOOT=yes" >> $1
55 56
56dhcp=$(grep "DHCP" $1 2>/dev/null)
57if [ "$dhcp" != "" ];
58then
59echo "BOOTPROTO=dhcp" >> $1;
60fi
61 57
62cp $1 /etc/sysconfig/network-scripts/ 58cp $1 /etc/sysconfig/network-scripts/
63 59
@@ -65,4 +61,4 @@ cp $1 /etc/sysconfig/network-scripts/
65interface=$(echo $1 | awk -F - '{ print $2 }') 61interface=$(echo $1 | awk -F - '{ print $2 }')
66 62
67/sbin/ifdown $interface 2>/dev/null 63/sbin/ifdown $interface 2>/dev/null
68/sbin/ifup $interfac 2>/dev/null 64/sbin/ifup $interface 2>/dev/null