aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 14:36:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 14:36:27 -0400
commit1d1fdd95df681f0c065d90ffaafa215a0e8825e2 (patch)
tree19016e131bb5c7eb280a4cc8dff864ba36e53be4 /drivers/misc
parentb3b49114c80e799af8b08c0c6d1ff886ea843f03 (diff)
parent3cc1f95283a125cf54ccf1e25065321d4385133b (diff)
Merge tag 'char-misc-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc patches from Greg KH: "Here is the big char/misc driver pull request for 3.12-rc1 Lots of driver updates all over the char/misc tree, full details in the shortlog" * tag 'char-misc-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (62 commits) drivers: uio: Kconfig: add MMU dependancy for UIO drivers: uio: Add driver for Humusoft MF624 DAQ PCI card drivers: uio_pdrv_genirq: use dev_get_platdata() drivers: uio_pruss: use dev_get_platdata() drivers: uio_dmem_genirq: use dev_get_platdata() drivers: parport: Kconfig: exclude h8300 for PARPORT_PC drivers: misc: ti-st: fix potential race if st_kim_start fails Drivers: hv: vmbus: Do not attempt to negoatiate a new version prematurely misc: vmw_balloon: Remove braces to fix build for clang. Drivers: hv: vmbus: Fix a bug in the handling of channel offers vme: vme_ca91cx42.c: fix to pass correct device identity to free_irq() VMCI: Add support for virtual IOMMU VMCI: Remove non-blocking/pinned queuepair support uio: uio_pruss: remove unnecessary platform_set_drvdata() parport: amiga: remove unnecessary platform_set_drvdata() vme: vme_vmivme7805.c: add missing __iomem annotation vme: vme_ca91cx42.c: add missing __iomem annotation vme: vme_tsi148.c: add missing __iomem annotation drivers/misc/hpilo: Correct panic when an AUX iLO is detected uio: drop unused vma_count member in uio_device struct ...
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/atmel-ssc.c8
-rw-r--r--drivers/misc/hpilo.c4
-rw-r--r--drivers/misc/lkdtm.c63
-rw-r--r--drivers/misc/mei/amthif.c14
-rw-r--r--drivers/misc/mei/bus.c4
-rw-r--r--drivers/misc/mei/client.c15
-rw-r--r--drivers/misc/mei/client.h9
-rw-r--r--drivers/misc/mei/hw-me.c9
-rw-r--r--drivers/misc/mei/init.c11
-rw-r--r--drivers/misc/mei/main.c22
-rw-r--r--drivers/misc/sram.c3
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h7
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c22
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c315
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h18
18 files changed, 263 insertions, 267 deletions
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index e068a76a5f6f..5be808406edc 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20 20
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/pinctrl/consumer.h>
23 22
24/* Serialize access to ssc_list and user count */ 23/* Serialize access to ssc_list and user count */
25static DEFINE_SPINLOCK(user_lock); 24static DEFINE_SPINLOCK(user_lock);
@@ -137,13 +136,6 @@ static int ssc_probe(struct platform_device *pdev)
137 struct resource *regs; 136 struct resource *regs;
138 struct ssc_device *ssc; 137 struct ssc_device *ssc;
139 const struct atmel_ssc_platform_data *plat_dat; 138 const struct atmel_ssc_platform_data *plat_dat;
140 struct pinctrl *pinctrl;
141
142 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
143 if (IS_ERR(pinctrl)) {
144 dev_err(&pdev->dev, "Failed to request pinctrl\n");
145 return PTR_ERR(pinctrl);
146 }
147 139
148 ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL); 140 ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
149 if (!ssc) { 141 if (!ssc) {
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 621c7a373390..b83e3ca12a41 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -759,7 +759,7 @@ static int ilo_probe(struct pci_dev *pdev,
759 759
760 /* Ignore subsystem_device = 0x1979 (set by BIOS) */ 760 /* Ignore subsystem_device = 0x1979 (set by BIOS) */
761 if (pdev->subsystem_device == 0x1979) 761 if (pdev->subsystem_device == 0x1979)
762 goto out; 762 return 0;
763 763
764 if (max_ccb > MAX_CCB) 764 if (max_ccb > MAX_CCB)
765 max_ccb = MAX_CCB; 765 max_ccb = MAX_CCB;
@@ -899,7 +899,7 @@ static void __exit ilo_exit(void)
899 class_destroy(ilo_class); 899 class_destroy(ilo_class);
900} 900}
901 901
902MODULE_VERSION("1.4"); 902MODULE_VERSION("1.4.1");
903MODULE_ALIAS(ILO_NAME); 903MODULE_ALIAS(ILO_NAME);
904MODULE_DESCRIPTION(ILO_NAME); 904MODULE_DESCRIPTION(ILO_NAME);
905MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); 905MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 08aad69c8da4..2fc0586ce3bb 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -43,6 +43,7 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <scsi/scsi_cmnd.h> 44#include <scsi/scsi_cmnd.h>
45#include <linux/debugfs.h> 45#include <linux/debugfs.h>
46#include <linux/vmalloc.h>
46 47
47#ifdef CONFIG_IDE 48#ifdef CONFIG_IDE
48#include <linux/ide.h> 49#include <linux/ide.h>
@@ -50,6 +51,7 @@
50 51
51#define DEFAULT_COUNT 10 52#define DEFAULT_COUNT 10
52#define REC_NUM_DEFAULT 10 53#define REC_NUM_DEFAULT 10
54#define EXEC_SIZE 64
53 55
54enum cname { 56enum cname {
55 CN_INVALID, 57 CN_INVALID,
@@ -68,6 +70,7 @@ enum ctype {
68 CT_NONE, 70 CT_NONE,
69 CT_PANIC, 71 CT_PANIC,
70 CT_BUG, 72 CT_BUG,
73 CT_WARNING,
71 CT_EXCEPTION, 74 CT_EXCEPTION,
72 CT_LOOP, 75 CT_LOOP,
73 CT_OVERFLOW, 76 CT_OVERFLOW,
@@ -77,7 +80,12 @@ enum ctype {
77 CT_WRITE_AFTER_FREE, 80 CT_WRITE_AFTER_FREE,
78 CT_SOFTLOCKUP, 81 CT_SOFTLOCKUP,
79 CT_HARDLOCKUP, 82 CT_HARDLOCKUP,
83 CT_SPINLOCKUP,
80 CT_HUNG_TASK, 84 CT_HUNG_TASK,
85 CT_EXEC_DATA,
86 CT_EXEC_STACK,
87 CT_EXEC_KMALLOC,
88 CT_EXEC_VMALLOC,
81}; 89};
82 90
83static char* cp_name[] = { 91static char* cp_name[] = {
@@ -95,6 +103,7 @@ static char* cp_name[] = {
95static char* cp_type[] = { 103static char* cp_type[] = {
96 "PANIC", 104 "PANIC",
97 "BUG", 105 "BUG",
106 "WARNING",
98 "EXCEPTION", 107 "EXCEPTION",
99 "LOOP", 108 "LOOP",
100 "OVERFLOW", 109 "OVERFLOW",
@@ -104,7 +113,12 @@ static char* cp_type[] = {
104 "WRITE_AFTER_FREE", 113 "WRITE_AFTER_FREE",
105 "SOFTLOCKUP", 114 "SOFTLOCKUP",
106 "HARDLOCKUP", 115 "HARDLOCKUP",
116 "SPINLOCKUP",
107 "HUNG_TASK", 117 "HUNG_TASK",
118 "EXEC_DATA",
119 "EXEC_STACK",
120 "EXEC_KMALLOC",
121 "EXEC_VMALLOC",
108}; 122};
109 123
110static struct jprobe lkdtm; 124static struct jprobe lkdtm;
@@ -121,6 +135,9 @@ static enum cname cpoint = CN_INVALID;
121static enum ctype cptype = CT_NONE; 135static enum ctype cptype = CT_NONE;
122static int count = DEFAULT_COUNT; 136static int count = DEFAULT_COUNT;
123static DEFINE_SPINLOCK(count_lock); 137static DEFINE_SPINLOCK(count_lock);
138static DEFINE_SPINLOCK(lock_me_up);
139
140static u8 data_area[EXEC_SIZE];
124 141
125module_param(recur_count, int, 0644); 142module_param(recur_count, int, 0644);
126MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ 143MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -275,6 +292,19 @@ static int recursive_loop(int a)
275 return recursive_loop(a); 292 return recursive_loop(a);
276} 293}
277 294
295static void do_nothing(void)
296{
297 return;
298}
299
300static void execute_location(void *dst)
301{
302 void (*func)(void) = dst;
303
304 memcpy(dst, do_nothing, EXEC_SIZE);
305 func();
306}
307
278static void lkdtm_do_action(enum ctype which) 308static void lkdtm_do_action(enum ctype which)
279{ 309{
280 switch (which) { 310 switch (which) {
@@ -284,6 +314,9 @@ static void lkdtm_do_action(enum ctype which)
284 case CT_BUG: 314 case CT_BUG:
285 BUG(); 315 BUG();
286 break; 316 break;
317 case CT_WARNING:
318 WARN_ON(1);
319 break;
287 case CT_EXCEPTION: 320 case CT_EXCEPTION:
288 *((int *) 0) = 0; 321 *((int *) 0) = 0;
289 break; 322 break;
@@ -295,10 +328,10 @@ static void lkdtm_do_action(enum ctype which)
295 (void) recursive_loop(0); 328 (void) recursive_loop(0);
296 break; 329 break;
297 case CT_CORRUPT_STACK: { 330 case CT_CORRUPT_STACK: {
298 volatile u32 data[8]; 331 /* Make sure the compiler creates and uses an 8 char array. */
299 volatile u32 *p = data; 332 volatile char data[8];
300 333
301 p[12] = 0x12345678; 334 memset((void *)data, 0, 64);
302 break; 335 break;
303 } 336 }
304 case CT_UNALIGNED_LOAD_STORE_WRITE: { 337 case CT_UNALIGNED_LOAD_STORE_WRITE: {
@@ -340,10 +373,34 @@ static void lkdtm_do_action(enum ctype which)
340 for (;;) 373 for (;;)
341 cpu_relax(); 374 cpu_relax();
342 break; 375 break;
376 case CT_SPINLOCKUP:
377 /* Must be called twice to trigger. */
378 spin_lock(&lock_me_up);
379 break;
343 case CT_HUNG_TASK: 380 case CT_HUNG_TASK:
344 set_current_state(TASK_UNINTERRUPTIBLE); 381 set_current_state(TASK_UNINTERRUPTIBLE);
345 schedule(); 382 schedule();
346 break; 383 break;
384 case CT_EXEC_DATA:
385 execute_location(data_area);
386 break;
387 case CT_EXEC_STACK: {
388 u8 stack_area[EXEC_SIZE];
389 execute_location(stack_area);
390 break;
391 }
392 case CT_EXEC_KMALLOC: {
393 u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
394 execute_location(kmalloc_area);
395 kfree(kmalloc_area);
396 break;
397 }
398 case CT_EXEC_VMALLOC: {
399 u32 *vmalloc_area = vmalloc(EXEC_SIZE);
400 execute_location(vmalloc_area);
401 vfree(vmalloc_area);
402 break;
403 }
347 case CT_NONE: 404 case CT_NONE:
348 default: 405 default:
349 break; 406 break;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 749452f8e2f6..d0fdc134068a 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -418,15 +418,23 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
418 struct file *file, poll_table *wait) 418 struct file *file, poll_table *wait)
419{ 419{
420 unsigned int mask = 0; 420 unsigned int mask = 0;
421 mutex_unlock(&dev->device_lock); 421
422 poll_wait(file, &dev->iamthif_cl.wait, wait); 422 poll_wait(file, &dev->iamthif_cl.wait, wait);
423
423 mutex_lock(&dev->device_lock); 424 mutex_lock(&dev->device_lock);
424 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && 425 if (!mei_cl_is_connected(&dev->iamthif_cl)) {
425 dev->iamthif_file_object == file) { 426
427 mask = POLLERR;
428
429 } else if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
430 dev->iamthif_file_object == file) {
431
426 mask |= (POLLIN | POLLRDNORM); 432 mask |= (POLLIN | POLLRDNORM);
427 dev_dbg(&dev->pdev->dev, "run next amthif cb\n"); 433 dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
428 mei_amthif_run_next_cmd(dev); 434 mei_amthif_run_next_cmd(dev);
429 } 435 }
436 mutex_unlock(&dev->device_lock);
437
430 return mask; 438 return mask;
431} 439}
432 440
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 9ecd49a7be1b..a150a42ed4af 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -47,7 +47,7 @@ static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
47 id = driver->id_table; 47 id = driver->id_table;
48 48
49 while (id->name[0]) { 49 while (id->name[0]) {
50 if (!strcmp(dev_name(dev), id->name)) 50 if (!strncmp(dev_name(dev), id->name, sizeof(id->name)))
51 return 1; 51 return 1;
52 52
53 id++; 53 id++;
@@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev)
71 71
72 dev_dbg(dev, "Device probe\n"); 72 dev_dbg(dev, "Device probe\n");
73 73
74 strncpy(id.name, dev_name(dev), MEI_CL_NAME_SIZE); 74 strncpy(id.name, dev_name(dev), sizeof(id.name));
75 75
76 return driver->probe(device, &id); 76 return driver->probe(device, &id);
77} 77}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 21d3f5aa8353..e0684b4d9a08 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -635,10 +635,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
635 635
636 dev = cl->dev; 636 dev = cl->dev;
637 637
638 if (cl->state != MEI_FILE_CONNECTED) 638 if (!mei_cl_is_connected(cl))
639 return -ENODEV;
640
641 if (dev->dev_state != MEI_DEV_ENABLED)
642 return -ENODEV; 639 return -ENODEV;
643 640
644 if (cl->read_cb) { 641 if (cl->read_cb) {
@@ -892,18 +889,22 @@ void mei_cl_all_disconnect(struct mei_device *dev)
892 889
893 890
894/** 891/**
895 * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted 892 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
896 * 893 *
897 * @dev - mei device 894 * @dev - mei device
898 */ 895 */
899void mei_cl_all_read_wakeup(struct mei_device *dev) 896void mei_cl_all_wakeup(struct mei_device *dev)
900{ 897{
901 struct mei_cl *cl, *next; 898 struct mei_cl *cl, *next;
902 list_for_each_entry_safe(cl, next, &dev->file_list, link) { 899 list_for_each_entry_safe(cl, next, &dev->file_list, link) {
903 if (waitqueue_active(&cl->rx_wait)) { 900 if (waitqueue_active(&cl->rx_wait)) {
904 dev_dbg(&dev->pdev->dev, "Waking up client!\n"); 901 dev_dbg(&dev->pdev->dev, "Waking up reading client!\n");
905 wake_up_interruptible(&cl->rx_wait); 902 wake_up_interruptible(&cl->rx_wait);
906 } 903 }
904 if (waitqueue_active(&cl->tx_wait)) {
905 dev_dbg(&dev->pdev->dev, "Waking up writing client!\n");
906 wake_up_interruptible(&cl->tx_wait);
907 }
907 } 908 }
908} 909}
909 910
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 26b157d8bad5..9eb031e92070 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -84,6 +84,13 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
84/* 84/*
85 * MEI input output function prototype 85 * MEI input output function prototype
86 */ 86 */
87static inline bool mei_cl_is_connected(struct mei_cl *cl)
88{
89 return (cl->dev &&
90 cl->dev->dev_state == MEI_DEV_ENABLED &&
91 cl->state == MEI_FILE_CONNECTED);
92}
93
87bool mei_cl_is_other_connecting(struct mei_cl *cl); 94bool mei_cl_is_other_connecting(struct mei_cl *cl);
88int mei_cl_disconnect(struct mei_cl *cl); 95int mei_cl_disconnect(struct mei_cl *cl);
89int mei_cl_connect(struct mei_cl *cl, struct file *file); 96int mei_cl_connect(struct mei_cl *cl, struct file *file);
@@ -99,7 +106,7 @@ void mei_host_client_init(struct work_struct *work);
99 106
100 107
101void mei_cl_all_disconnect(struct mei_device *dev); 108void mei_cl_all_disconnect(struct mei_device *dev);
102void mei_cl_all_read_wakeup(struct mei_device *dev); 109void mei_cl_all_wakeup(struct mei_device *dev);
103void mei_cl_all_write_clear(struct mei_device *dev); 110void mei_cl_all_write_clear(struct mei_device *dev);
104 111
105#endif /* _MEI_CLIENT_H_ */ 112#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index b22c7e247225..3412adcdaeb0 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -176,21 +176,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
176 struct mei_me_hw *hw = to_me_hw(dev); 176 struct mei_me_hw *hw = to_me_hw(dev);
177 u32 hcsr = mei_hcsr_read(hw); 177 u32 hcsr = mei_hcsr_read(hw);
178 178
179 dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr); 179 hcsr |= H_RST | H_IG | H_IS;
180
181 hcsr |= (H_RST | H_IG);
182 180
183 if (intr_enable) 181 if (intr_enable)
184 hcsr |= H_IE; 182 hcsr |= H_IE;
185 else 183 else
186 hcsr |= ~H_IE; 184 hcsr &= ~H_IE;
187 185
188 mei_hcsr_set(hw, hcsr); 186 mei_me_reg_write(hw, H_CSR, hcsr);
189 187
190 if (dev->dev_state == MEI_DEV_POWER_DOWN) 188 if (dev->dev_state == MEI_DEV_POWER_DOWN)
191 mei_me_hw_reset_release(dev); 189 mei_me_hw_reset_release(dev);
192 190
193 dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw));
194 return 0; 191 return 0;
195} 192}
196 193
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index e6f16f83ecde..92c73118b13c 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -154,8 +154,14 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
154 dev->dev_state != MEI_DEV_POWER_DOWN) 154 dev->dev_state != MEI_DEV_POWER_DOWN)
155 dev->dev_state = MEI_DEV_RESETTING; 155 dev->dev_state = MEI_DEV_RESETTING;
156 156
157 /* remove all waiting requests */
158 mei_cl_all_write_clear(dev);
159
157 mei_cl_all_disconnect(dev); 160 mei_cl_all_disconnect(dev);
158 161
162 /* wake up all readings so they can be interrupted */
163 mei_cl_all_wakeup(dev);
164
159 /* remove entry if already in list */ 165 /* remove entry if already in list */
160 dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); 166 dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
161 mei_cl_unlink(&dev->wd_cl); 167 mei_cl_unlink(&dev->wd_cl);
@@ -196,11 +202,6 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
196 202
197 mei_hbm_start_req(dev); 203 mei_hbm_start_req(dev);
198 204
199 /* wake up all readings so they can be interrupted */
200 mei_cl_all_read_wakeup(dev);
201
202 /* remove all waiting requests */
203 mei_cl_all_write_clear(dev);
204} 205}
205EXPORT_SYMBOL_GPL(mei_reset); 206EXPORT_SYMBOL_GPL(mei_reset);
206 207
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 5e11b5b9b65d..173ff095be0d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -625,24 +625,32 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
625 unsigned int mask = 0; 625 unsigned int mask = 0;
626 626
627 if (WARN_ON(!cl || !cl->dev)) 627 if (WARN_ON(!cl || !cl->dev))
628 return mask; 628 return POLLERR;
629 629
630 dev = cl->dev; 630 dev = cl->dev;
631 631
632 mutex_lock(&dev->device_lock); 632 mutex_lock(&dev->device_lock);
633 633
634 if (dev->dev_state != MEI_DEV_ENABLED) 634 if (!mei_cl_is_connected(cl)) {
635 goto out; 635 mask = POLLERR;
636
637
638 if (cl == &dev->iamthif_cl) {
639 mask = mei_amthif_poll(dev, file, wait);
640 goto out; 636 goto out;
641 } 637 }
642 638
643 mutex_unlock(&dev->device_lock); 639 mutex_unlock(&dev->device_lock);
640
641
642 if (cl == &dev->iamthif_cl)
643 return mei_amthif_poll(dev, file, wait);
644
644 poll_wait(file, &cl->tx_wait, wait); 645 poll_wait(file, &cl->tx_wait, wait);
646
645 mutex_lock(&dev->device_lock); 647 mutex_lock(&dev->device_lock);
648
649 if (!mei_cl_is_connected(cl)) {
650 mask = POLLERR;
651 goto out;
652 }
653
646 if (MEI_WRITE_COMPLETE == cl->writing_state) 654 if (MEI_WRITE_COMPLETE == cl->writing_state)
647 mask |= (POLLIN | POLLRDNORM); 655 mask |= (POLLIN | POLLRDNORM);
648 656
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index d87cc91bc016..afe66571ce0b 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -68,7 +68,8 @@ static int sram_probe(struct platform_device *pdev)
68 ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base, 68 ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base,
69 res->start, size, -1); 69 res->start, size, -1);
70 if (ret < 0) { 70 if (ret < 0) {
71 gen_pool_destroy(sram->pool); 71 if (sram->clk)
72 clk_disable_unprepare(sram->clk);
72 return ret; 73 return ret;
73 } 74 }
74 75
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 0a1428016350..8d64b681dd93 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -562,7 +562,9 @@ long st_register(struct st_proto_s *new_proto)
562 if ((st_gdata->protos_registered != ST_EMPTY) && 562 if ((st_gdata->protos_registered != ST_EMPTY) &&
563 (test_bit(ST_REG_PENDING, &st_gdata->st_state))) { 563 (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
564 pr_err(" KIM failure complete callback "); 564 pr_err(" KIM failure complete callback ");
565 spin_lock_irqsave(&st_gdata->lock, flags);
565 st_reg_complete(st_gdata, err); 566 st_reg_complete(st_gdata, err);
567 spin_unlock_irqrestore(&st_gdata->lock, flags);
566 clear_bit(ST_REG_PENDING, &st_gdata->st_state); 568 clear_bit(ST_REG_PENDING, &st_gdata->st_state);
567 } 569 }
568 return -EINVAL; 570 return -EINVAL;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index cb56e270da11..2421835d5daf 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -133,7 +133,7 @@ MODULE_LICENSE("GPL");
133#define VMWARE_BALLOON_CMD(cmd, data, result) \ 133#define VMWARE_BALLOON_CMD(cmd, data, result) \
134({ \ 134({ \
135 unsigned long __stat, __dummy1, __dummy2; \ 135 unsigned long __stat, __dummy1, __dummy2; \
136 __asm__ __volatile__ ("inl (%%dx)" : \ 136 __asm__ __volatile__ ("inl %%dx" : \
137 "=a"(__stat), \ 137 "=a"(__stat), \
138 "=c"(__dummy1), \ 138 "=c"(__dummy1), \
139 "=d"(__dummy2), \ 139 "=d"(__dummy2), \
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 7b3fce2da6c3..3dee7ae123e7 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
113 113
114MODULE_AUTHOR("VMware, Inc."); 114MODULE_AUTHOR("VMware, Inc.");
115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); 115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
116MODULE_VERSION("1.0.0.0-k"); 116MODULE_VERSION("1.1.0.0-k");
117MODULE_LICENSE("GPL v2"); 117MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index f69156a1f30c..cee9e977d318 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -35,6 +35,13 @@ struct vmci_obj {
35 enum vmci_obj_type type; 35 enum vmci_obj_type type;
36}; 36};
37 37
38/*
39 * Needed by other components of this module. It's okay to have one global
40 * instance of this because there can only ever be one VMCI device. Our
41 * virtual hardware enforces this.
42 */
43extern struct pci_dev *vmci_pdev;
44
38u32 vmci_get_context_id(void); 45u32 vmci_get_context_id(void);
39int vmci_send_datagram(struct vmci_datagram *dg); 46int vmci_send_datagram(struct vmci_datagram *dg);
40 47
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 60c01999f489..b3a2b763ecf2 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -65,9 +65,11 @@ struct vmci_guest_device {
65 65
66 void *data_buffer; 66 void *data_buffer;
67 void *notification_bitmap; 67 void *notification_bitmap;
68 dma_addr_t notification_base;
68}; 69};
69 70
70/* vmci_dev singleton device and supporting data*/ 71/* vmci_dev singleton device and supporting data*/
72struct pci_dev *vmci_pdev;
71static struct vmci_guest_device *vmci_dev_g; 73static struct vmci_guest_device *vmci_dev_g;
72static DEFINE_SPINLOCK(vmci_dev_spinlock); 74static DEFINE_SPINLOCK(vmci_dev_spinlock);
73 75
@@ -528,7 +530,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
528 * well. 530 * well.
529 */ 531 */
530 if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 532 if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
531 vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE); 533 vmci_dev->notification_bitmap = dma_alloc_coherent(
534 &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
535 GFP_KERNEL);
532 if (!vmci_dev->notification_bitmap) { 536 if (!vmci_dev->notification_bitmap) {
533 dev_warn(&pdev->dev, 537 dev_warn(&pdev->dev,
534 "Unable to allocate notification bitmap\n"); 538 "Unable to allocate notification bitmap\n");
@@ -546,6 +550,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
546 /* Set up global device so that we can start sending datagrams */ 550 /* Set up global device so that we can start sending datagrams */
547 spin_lock_irq(&vmci_dev_spinlock); 551 spin_lock_irq(&vmci_dev_spinlock);
548 vmci_dev_g = vmci_dev; 552 vmci_dev_g = vmci_dev;
553 vmci_pdev = pdev;
549 spin_unlock_irq(&vmci_dev_spinlock); 554 spin_unlock_irq(&vmci_dev_spinlock);
550 555
551 /* 556 /*
@@ -553,9 +558,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
553 * used. 558 * used.
554 */ 559 */
555 if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 560 if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
556 struct page *page = 561 unsigned long bitmap_ppn =
557 vmalloc_to_page(vmci_dev->notification_bitmap); 562 vmci_dev->notification_base >> PAGE_SHIFT;
558 unsigned long bitmap_ppn = page_to_pfn(page);
559 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { 563 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
560 dev_warn(&pdev->dev, 564 dev_warn(&pdev->dev,
561 "VMCI device unable to register notification bitmap with PPN 0x%x\n", 565 "VMCI device unable to register notification bitmap with PPN 0x%x\n",
@@ -665,11 +669,14 @@ err_remove_bitmap:
665 if (vmci_dev->notification_bitmap) { 669 if (vmci_dev->notification_bitmap) {
666 iowrite32(VMCI_CONTROL_RESET, 670 iowrite32(VMCI_CONTROL_RESET,
667 vmci_dev->iobase + VMCI_CONTROL_ADDR); 671 vmci_dev->iobase + VMCI_CONTROL_ADDR);
668 vfree(vmci_dev->notification_bitmap); 672 dma_free_coherent(&pdev->dev, PAGE_SIZE,
673 vmci_dev->notification_bitmap,
674 vmci_dev->notification_base);
669 } 675 }
670 676
671err_remove_vmci_dev_g: 677err_remove_vmci_dev_g:
672 spin_lock_irq(&vmci_dev_spinlock); 678 spin_lock_irq(&vmci_dev_spinlock);
679 vmci_pdev = NULL;
673 vmci_dev_g = NULL; 680 vmci_dev_g = NULL;
674 spin_unlock_irq(&vmci_dev_spinlock); 681 spin_unlock_irq(&vmci_dev_spinlock);
675 682
@@ -699,6 +706,7 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
699 706
700 spin_lock_irq(&vmci_dev_spinlock); 707 spin_lock_irq(&vmci_dev_spinlock);
701 vmci_dev_g = NULL; 708 vmci_dev_g = NULL;
709 vmci_pdev = NULL;
702 spin_unlock_irq(&vmci_dev_spinlock); 710 spin_unlock_irq(&vmci_dev_spinlock);
703 711
704 dev_dbg(&pdev->dev, "Resetting vmci device\n"); 712 dev_dbg(&pdev->dev, "Resetting vmci device\n");
@@ -727,7 +735,9 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
727 * device, so we can safely free it here. 735 * device, so we can safely free it here.
728 */ 736 */
729 737
730 vfree(vmci_dev->notification_bitmap); 738 dma_free_coherent(&pdev->dev, PAGE_SIZE,
739 vmci_dev->notification_bitmap,
740 vmci_dev->notification_base);
731 } 741 }
732 742
733 vfree(vmci_dev->data_buffer); 743 vfree(vmci_dev->data_buffer);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8ff2e5ee8fb8..a0515a6d6ebd 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/pagemap.h> 23#include <linux/pagemap.h>
24#include <linux/pci.h>
24#include <linux/sched.h> 25#include <linux/sched.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/uio.h> 27#include <linux/uio.h>
@@ -146,14 +147,20 @@ typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
146 147
147/* The Kernel specific component of the struct vmci_queue structure. */ 148/* The Kernel specific component of the struct vmci_queue structure. */
148struct vmci_queue_kern_if { 149struct vmci_queue_kern_if {
149 struct page **page;
150 struct page **header_page;
151 void *va;
152 struct mutex __mutex; /* Protects the queue. */ 150 struct mutex __mutex; /* Protects the queue. */
153 struct mutex *mutex; /* Shared by producer and consumer queues. */ 151 struct mutex *mutex; /* Shared by producer and consumer queues. */
154 bool host; 152 size_t num_pages; /* Number of pages incl. header. */
155 size_t num_pages; 153 bool host; /* Host or guest? */
156 bool mapped; 154 union {
155 struct {
156 dma_addr_t *pas;
157 void **vas;
158 } g; /* Used by the guest. */
159 struct {
160 struct page **page;
161 struct page **header_page;
162 } h; /* Used by the host. */
163 } u;
157}; 164};
158 165
159/* 166/*
@@ -265,76 +272,65 @@ static void qp_free_queue(void *q, u64 size)
265 struct vmci_queue *queue = q; 272 struct vmci_queue *queue = q;
266 273
267 if (queue) { 274 if (queue) {
268 u64 i = DIV_ROUND_UP(size, PAGE_SIZE); 275 u64 i;
269 276
270 if (queue->kernel_if->mapped) { 277 /* Given size does not include header, so add in a page here. */
271 vunmap(queue->kernel_if->va); 278 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
272 queue->kernel_if->va = NULL; 279 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
280 queue->kernel_if->u.g.vas[i],
281 queue->kernel_if->u.g.pas[i]);
273 } 282 }
274 283
275 while (i) 284 vfree(queue);
276 __free_page(queue->kernel_if->page[--i]);
277
278 vfree(queue->q_header);
279 } 285 }
280} 286}
281 287
282/* 288/*
283 * Allocates kernel VA space of specified size, plus space for the 289 * Allocates kernel queue pages of specified size with IOMMU mappings,
284 * queue structure/kernel interface and the queue header. Allocates 290 * plus space for the queue structure/kernel interface and the queue
285 * physical pages for the queue data pages. 291 * header.
286 *
287 * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header)
288 * PAGE m+1: struct vmci_queue
289 * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
290 * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
291 */ 292 */
292static void *qp_alloc_queue(u64 size, u32 flags) 293static void *qp_alloc_queue(u64 size, u32 flags)
293{ 294{
294 u64 i; 295 u64 i;
295 struct vmci_queue *queue; 296 struct vmci_queue *queue;
296 struct vmci_queue_header *q_header; 297 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
297 const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE); 298 const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
298 const uint queue_size = 299 const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
299 PAGE_SIZE + 300 const size_t queue_size =
300 sizeof(*queue) + sizeof(*(queue->kernel_if)) + 301 sizeof(*queue) + sizeof(*queue->kernel_if) +
301 num_data_pages * sizeof(*(queue->kernel_if->page)); 302 pas_size + vas_size;
302 303
303 q_header = vmalloc(queue_size); 304 queue = vmalloc(queue_size);
304 if (!q_header) 305 if (!queue)
305 return NULL; 306 return NULL;
306 307
307 queue = (void *)q_header + PAGE_SIZE; 308 queue->q_header = NULL;
308 queue->q_header = q_header;
309 queue->saved_header = NULL; 309 queue->saved_header = NULL;
310 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 310 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
311 queue->kernel_if->header_page = NULL; /* Unused in guest. */ 311 queue->kernel_if->mutex = NULL;
312 queue->kernel_if->page = (struct page **)(queue->kernel_if + 1); 312 queue->kernel_if->num_pages = num_pages;
313 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
314 queue->kernel_if->u.g.vas =
315 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
313 queue->kernel_if->host = false; 316 queue->kernel_if->host = false;
314 queue->kernel_if->va = NULL;
315 queue->kernel_if->mapped = false;
316
317 for (i = 0; i < num_data_pages; i++) {
318 queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
319 if (!queue->kernel_if->page[i])
320 goto fail;
321 }
322 317
323 if (vmci_qp_pinned(flags)) { 318 for (i = 0; i < num_pages; i++) {
324 queue->kernel_if->va = 319 queue->kernel_if->u.g.vas[i] =
325 vmap(queue->kernel_if->page, num_data_pages, VM_MAP, 320 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
326 PAGE_KERNEL); 321 &queue->kernel_if->u.g.pas[i],
327 if (!queue->kernel_if->va) 322 GFP_KERNEL);
328 goto fail; 323 if (!queue->kernel_if->u.g.vas[i]) {
329 324 /* Size excl. the header. */
330 queue->kernel_if->mapped = true; 325 qp_free_queue(queue, i * PAGE_SIZE);
326 return NULL;
327 }
331 } 328 }
332 329
333 return (void *)queue; 330 /* Queue header is the first page. */
331 queue->q_header = queue->kernel_if->u.g.vas[0];
334 332
335 fail: 333 return queue;
336 qp_free_queue(queue, i * PAGE_SIZE);
337 return NULL;
338} 334}
339 335
340/* 336/*
@@ -353,17 +349,18 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
353 size_t bytes_copied = 0; 349 size_t bytes_copied = 0;
354 350
355 while (bytes_copied < size) { 351 while (bytes_copied < size) {
356 u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE; 352 const u64 page_index =
357 size_t page_offset = 353 (queue_offset + bytes_copied) / PAGE_SIZE;
354 const size_t page_offset =
358 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 355 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
359 void *va; 356 void *va;
360 size_t to_copy; 357 size_t to_copy;
361 358
362 if (!kernel_if->mapped) 359 if (kernel_if->host)
363 va = kmap(kernel_if->page[page_index]); 360 va = kmap(kernel_if->u.h.page[page_index]);
364 else 361 else
365 va = (void *)((u8 *)kernel_if->va + 362 va = kernel_if->u.g.vas[page_index + 1];
366 (page_index * PAGE_SIZE)); 363 /* Skip header. */
367 364
368 if (size - bytes_copied > PAGE_SIZE - page_offset) 365 if (size - bytes_copied > PAGE_SIZE - page_offset)
369 /* Enough payload to fill up from this page. */ 366 /* Enough payload to fill up from this page. */
@@ -379,7 +376,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
379 err = memcpy_fromiovec((u8 *)va + page_offset, 376 err = memcpy_fromiovec((u8 *)va + page_offset,
380 iov, to_copy); 377 iov, to_copy);
381 if (err != 0) { 378 if (err != 0) {
382 kunmap(kernel_if->page[page_index]); 379 if (kernel_if->host)
380 kunmap(kernel_if->u.h.page[page_index]);
383 return VMCI_ERROR_INVALID_ARGS; 381 return VMCI_ERROR_INVALID_ARGS;
384 } 382 }
385 } else { 383 } else {
@@ -388,8 +386,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
388 } 386 }
389 387
390 bytes_copied += to_copy; 388 bytes_copied += to_copy;
391 if (!kernel_if->mapped) 389 if (kernel_if->host)
392 kunmap(kernel_if->page[page_index]); 390 kunmap(kernel_if->u.h.page[page_index]);
393 } 391 }
394 392
395 return VMCI_SUCCESS; 393 return VMCI_SUCCESS;
@@ -411,17 +409,18 @@ static int __qp_memcpy_from_queue(void *dest,
411 size_t bytes_copied = 0; 409 size_t bytes_copied = 0;
412 410
413 while (bytes_copied < size) { 411 while (bytes_copied < size) {
414 u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE; 412 const u64 page_index =
415 size_t page_offset = 413 (queue_offset + bytes_copied) / PAGE_SIZE;
414 const size_t page_offset =
416 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 415 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
417 void *va; 416 void *va;
418 size_t to_copy; 417 size_t to_copy;
419 418
420 if (!kernel_if->mapped) 419 if (kernel_if->host)
421 va = kmap(kernel_if->page[page_index]); 420 va = kmap(kernel_if->u.h.page[page_index]);
422 else 421 else
423 va = (void *)((u8 *)kernel_if->va + 422 va = kernel_if->u.g.vas[page_index + 1];
424 (page_index * PAGE_SIZE)); 423 /* Skip header. */
425 424
426 if (size - bytes_copied > PAGE_SIZE - page_offset) 425 if (size - bytes_copied > PAGE_SIZE - page_offset)
427 /* Enough payload to fill up this page. */ 426 /* Enough payload to fill up this page. */
@@ -437,7 +436,8 @@ static int __qp_memcpy_from_queue(void *dest,
437 err = memcpy_toiovec(iov, (u8 *)va + page_offset, 436 err = memcpy_toiovec(iov, (u8 *)va + page_offset,
438 to_copy); 437 to_copy);
439 if (err != 0) { 438 if (err != 0) {
440 kunmap(kernel_if->page[page_index]); 439 if (kernel_if->host)
440 kunmap(kernel_if->u.h.page[page_index]);
441 return VMCI_ERROR_INVALID_ARGS; 441 return VMCI_ERROR_INVALID_ARGS;
442 } 442 }
443 } else { 443 } else {
@@ -446,8 +446,8 @@ static int __qp_memcpy_from_queue(void *dest,
446 } 446 }
447 447
448 bytes_copied += to_copy; 448 bytes_copied += to_copy;
449 if (!kernel_if->mapped) 449 if (kernel_if->host)
450 kunmap(kernel_if->page[page_index]); 450 kunmap(kernel_if->u.h.page[page_index]);
451 } 451 }
452 452
453 return VMCI_SUCCESS; 453 return VMCI_SUCCESS;
@@ -489,12 +489,11 @@ static int qp_alloc_ppn_set(void *prod_q,
489 return VMCI_ERROR_NO_MEM; 489 return VMCI_ERROR_NO_MEM;
490 } 490 }
491 491
492 produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header)); 492 for (i = 0; i < num_produce_pages; i++) {
493 for (i = 1; i < num_produce_pages; i++) {
494 unsigned long pfn; 493 unsigned long pfn;
495 494
496 produce_ppns[i] = 495 produce_ppns[i] =
497 page_to_pfn(produce_q->kernel_if->page[i - 1]); 496 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
498 pfn = produce_ppns[i]; 497 pfn = produce_ppns[i];
499 498
500 /* Fail allocation if PFN isn't supported by hypervisor. */ 499 /* Fail allocation if PFN isn't supported by hypervisor. */
@@ -503,12 +502,11 @@ static int qp_alloc_ppn_set(void *prod_q,
503 goto ppn_error; 502 goto ppn_error;
504 } 503 }
505 504
506 consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header)); 505 for (i = 0; i < num_consume_pages; i++) {
507 for (i = 1; i < num_consume_pages; i++) {
508 unsigned long pfn; 506 unsigned long pfn;
509 507
510 consume_ppns[i] = 508 consume_ppns[i] =
511 page_to_pfn(consume_q->kernel_if->page[i - 1]); 509 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
512 pfn = consume_ppns[i]; 510 pfn = consume_ppns[i];
513 511
514 /* Fail allocation if PFN isn't supported by hypervisor. */ 512 /* Fail allocation if PFN isn't supported by hypervisor. */
@@ -619,23 +617,20 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
619 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 617 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
620 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 618 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
621 const size_t queue_page_size = 619 const size_t queue_page_size =
622 num_pages * sizeof(*queue->kernel_if->page); 620 num_pages * sizeof(*queue->kernel_if->u.h.page);
623 621
624 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 622 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
625 if (queue) { 623 if (queue) {
626 queue->q_header = NULL; 624 queue->q_header = NULL;
627 queue->saved_header = NULL; 625 queue->saved_header = NULL;
628 queue->kernel_if = 626 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
629 (struct vmci_queue_kern_if *)((u8 *)queue +
630 sizeof(*queue));
631 queue->kernel_if->host = true; 627 queue->kernel_if->host = true;
632 queue->kernel_if->mutex = NULL; 628 queue->kernel_if->mutex = NULL;
633 queue->kernel_if->num_pages = num_pages; 629 queue->kernel_if->num_pages = num_pages;
634 queue->kernel_if->header_page = 630 queue->kernel_if->u.h.header_page =
635 (struct page **)((u8 *)queue + queue_size); 631 (struct page **)((u8 *)queue + queue_size);
636 queue->kernel_if->page = &queue->kernel_if->header_page[1]; 632 queue->kernel_if->u.h.page =
637 queue->kernel_if->va = NULL; 633 &queue->kernel_if->u.h.header_page[1];
638 queue->kernel_if->mapped = false;
639 } 634 }
640 635
641 return queue; 636 return queue;
@@ -742,11 +737,12 @@ static int qp_host_get_user_memory(u64 produce_uva,
742 current->mm, 737 current->mm,
743 (uintptr_t) produce_uva, 738 (uintptr_t) produce_uva,
744 produce_q->kernel_if->num_pages, 739 produce_q->kernel_if->num_pages,
745 1, 0, produce_q->kernel_if->header_page, NULL); 740 1, 0,
741 produce_q->kernel_if->u.h.header_page, NULL);
746 if (retval < produce_q->kernel_if->num_pages) { 742 if (retval < produce_q->kernel_if->num_pages) {
747 pr_warn("get_user_pages(produce) failed (retval=%d)", retval); 743 pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
748 qp_release_pages(produce_q->kernel_if->header_page, retval, 744 qp_release_pages(produce_q->kernel_if->u.h.header_page,
749 false); 745 retval, false);
750 err = VMCI_ERROR_NO_MEM; 746 err = VMCI_ERROR_NO_MEM;
751 goto out; 747 goto out;
752 } 748 }
@@ -755,12 +751,13 @@ static int qp_host_get_user_memory(u64 produce_uva,
755 current->mm, 751 current->mm,
756 (uintptr_t) consume_uva, 752 (uintptr_t) consume_uva,
757 consume_q->kernel_if->num_pages, 753 consume_q->kernel_if->num_pages,
758 1, 0, consume_q->kernel_if->header_page, NULL); 754 1, 0,
755 consume_q->kernel_if->u.h.header_page, NULL);
759 if (retval < consume_q->kernel_if->num_pages) { 756 if (retval < consume_q->kernel_if->num_pages) {
760 pr_warn("get_user_pages(consume) failed (retval=%d)", retval); 757 pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
761 qp_release_pages(consume_q->kernel_if->header_page, retval, 758 qp_release_pages(consume_q->kernel_if->u.h.header_page,
762 false); 759 retval, false);
763 qp_release_pages(produce_q->kernel_if->header_page, 760 qp_release_pages(produce_q->kernel_if->u.h.header_page,
764 produce_q->kernel_if->num_pages, false); 761 produce_q->kernel_if->num_pages, false);
765 err = VMCI_ERROR_NO_MEM; 762 err = VMCI_ERROR_NO_MEM;
766 } 763 }
@@ -803,15 +800,15 @@ static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
803static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, 800static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
804 struct vmci_queue *consume_q) 801 struct vmci_queue *consume_q)
805{ 802{
806 qp_release_pages(produce_q->kernel_if->header_page, 803 qp_release_pages(produce_q->kernel_if->u.h.header_page,
807 produce_q->kernel_if->num_pages, true); 804 produce_q->kernel_if->num_pages, true);
808 memset(produce_q->kernel_if->header_page, 0, 805 memset(produce_q->kernel_if->u.h.header_page, 0,
809 sizeof(*produce_q->kernel_if->header_page) * 806 sizeof(*produce_q->kernel_if->u.h.header_page) *
810 produce_q->kernel_if->num_pages); 807 produce_q->kernel_if->num_pages);
811 qp_release_pages(consume_q->kernel_if->header_page, 808 qp_release_pages(consume_q->kernel_if->u.h.header_page,
812 consume_q->kernel_if->num_pages, true); 809 consume_q->kernel_if->num_pages, true);
813 memset(consume_q->kernel_if->header_page, 0, 810 memset(consume_q->kernel_if->u.h.header_page, 0,
814 sizeof(*consume_q->kernel_if->header_page) * 811 sizeof(*consume_q->kernel_if->u.h.header_page) *
815 consume_q->kernel_if->num_pages); 812 consume_q->kernel_if->num_pages);
816} 813}
817 814
@@ -834,12 +831,12 @@ static int qp_host_map_queues(struct vmci_queue *produce_q,
834 if (produce_q->q_header != consume_q->q_header) 831 if (produce_q->q_header != consume_q->q_header)
835 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 832 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
836 833
837 if (produce_q->kernel_if->header_page == NULL || 834 if (produce_q->kernel_if->u.h.header_page == NULL ||
838 *produce_q->kernel_if->header_page == NULL) 835 *produce_q->kernel_if->u.h.header_page == NULL)
839 return VMCI_ERROR_UNAVAILABLE; 836 return VMCI_ERROR_UNAVAILABLE;
840 837
841 headers[0] = *produce_q->kernel_if->header_page; 838 headers[0] = *produce_q->kernel_if->u.h.header_page;
842 headers[1] = *consume_q->kernel_if->header_page; 839 headers[1] = *consume_q->kernel_if->u.h.header_page;
843 840
844 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); 841 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
845 if (produce_q->q_header != NULL) { 842 if (produce_q->q_header != NULL) {
@@ -1720,21 +1717,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
1720 if (result < VMCI_SUCCESS) 1717 if (result < VMCI_SUCCESS)
1721 return result; 1718 return result;
1722 1719
1723 /*
1724 * Preemptively load in the headers if non-blocking to
1725 * prevent blocking later.
1726 */
1727 if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
1728 result = qp_host_map_queues(entry->produce_q,
1729 entry->consume_q);
1730 if (result < VMCI_SUCCESS) {
1731 qp_host_unregister_user_memory(
1732 entry->produce_q,
1733 entry->consume_q);
1734 return result;
1735 }
1736 }
1737
1738 entry->state = VMCIQPB_ATTACHED_MEM; 1720 entry->state = VMCIQPB_ATTACHED_MEM;
1739 } else { 1721 } else {
1740 entry->state = VMCIQPB_ATTACHED_NO_MEM; 1722 entry->state = VMCIQPB_ATTACHED_NO_MEM;
@@ -1749,24 +1731,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
1749 1731
1750 return VMCI_ERROR_UNAVAILABLE; 1732 return VMCI_ERROR_UNAVAILABLE;
1751 } else { 1733 } else {
1752 /*
1753 * For non-blocking queue pairs, we cannot rely on
1754 * enqueue/dequeue to map in the pages on the
1755 * host-side, since it may block, so we make an
1756 * attempt here.
1757 */
1758
1759 if (flags & VMCI_QPFLAG_NONBLOCK) {
1760 result =
1761 qp_host_map_queues(entry->produce_q,
1762 entry->consume_q);
1763 if (result < VMCI_SUCCESS)
1764 return result;
1765
1766 entry->qp.flags |= flags &
1767 (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
1768 }
1769
1770 /* The host side has successfully attached to a queue pair. */ 1734 /* The host side has successfully attached to a queue pair. */
1771 entry->state = VMCIQPB_ATTACHED_MEM; 1735 entry->state = VMCIQPB_ATTACHED_MEM;
1772 } 1736 }
@@ -2543,24 +2507,19 @@ void vmci_qp_guest_endpoints_exit(void)
2543 * Since non-blocking isn't yet implemented on the host personality we 2507 * Since non-blocking isn't yet implemented on the host personality we
2544 * have no reason to acquire a spin lock. So to avoid the use of an 2508 * have no reason to acquire a spin lock. So to avoid the use of an
2545 * unnecessary lock only acquire the mutex if we can block. 2509 * unnecessary lock only acquire the mutex if we can block.
2546 * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore
2547 * we can use the same locking function for access to both the queue
2548 * and the queue headers as it is the same logic. Assert this behvior.
2549 */ 2510 */
2550static void qp_lock(const struct vmci_qp *qpair) 2511static void qp_lock(const struct vmci_qp *qpair)
2551{ 2512{
2552 if (vmci_can_block(qpair->flags)) 2513 qp_acquire_queue_mutex(qpair->produce_q);
2553 qp_acquire_queue_mutex(qpair->produce_q);
2554} 2514}
2555 2515
2556/* 2516/*
2557 * Helper routine that unlocks the queue pair after calling 2517 * Helper routine that unlocks the queue pair after calling
2558 * qp_lock. Respects non-blocking and pinning flags. 2518 * qp_lock.
2559 */ 2519 */
2560static void qp_unlock(const struct vmci_qp *qpair) 2520static void qp_unlock(const struct vmci_qp *qpair)
2561{ 2521{
2562 if (vmci_can_block(qpair->flags)) 2522 qp_release_queue_mutex(qpair->produce_q);
2563 qp_release_queue_mutex(qpair->produce_q);
2564} 2523}
2565 2524
2566/* 2525/*
@@ -2568,17 +2527,12 @@ static void qp_unlock(const struct vmci_qp *qpair)
2568 * currently not mapped, it will be attempted to do so. 2527 * currently not mapped, it will be attempted to do so.
2569 */ 2528 */
2570static int qp_map_queue_headers(struct vmci_queue *produce_q, 2529static int qp_map_queue_headers(struct vmci_queue *produce_q,
2571 struct vmci_queue *consume_q, 2530 struct vmci_queue *consume_q)
2572 bool can_block)
2573{ 2531{
2574 int result; 2532 int result;
2575 2533
2576 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2534 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2577 if (can_block) 2535 result = qp_host_map_queues(produce_q, consume_q);
2578 result = qp_host_map_queues(produce_q, consume_q);
2579 else
2580 result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
2581
2582 if (result < VMCI_SUCCESS) 2536 if (result < VMCI_SUCCESS)
2583 return (produce_q->saved_header && 2537 return (produce_q->saved_header &&
2584 consume_q->saved_header) ? 2538 consume_q->saved_header) ?
@@ -2601,8 +2555,7 @@ static int qp_get_queue_headers(const struct vmci_qp *qpair,
2601{ 2555{
2602 int result; 2556 int result;
2603 2557
2604 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q, 2558 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2605 vmci_can_block(qpair->flags));
2606 if (result == VMCI_SUCCESS) { 2559 if (result == VMCI_SUCCESS) {
2607 *produce_q_header = qpair->produce_q->q_header; 2560 *produce_q_header = qpair->produce_q->q_header;
2608 *consume_q_header = qpair->consume_q->q_header; 2561 *consume_q_header = qpair->consume_q->q_header;
@@ -2645,9 +2598,6 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2645{ 2598{
2646 unsigned int generation; 2599 unsigned int generation;
2647 2600
2648 if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
2649 return false;
2650
2651 qpair->blocked++; 2601 qpair->blocked++;
2652 generation = qpair->generation; 2602 generation = qpair->generation;
2653 qp_unlock(qpair); 2603 qp_unlock(qpair);
@@ -2674,15 +2624,14 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2674 const u64 produce_q_size, 2624 const u64 produce_q_size,
2675 const void *buf, 2625 const void *buf,
2676 size_t buf_size, 2626 size_t buf_size,
2677 vmci_memcpy_to_queue_func memcpy_to_queue, 2627 vmci_memcpy_to_queue_func memcpy_to_queue)
2678 bool can_block)
2679{ 2628{
2680 s64 free_space; 2629 s64 free_space;
2681 u64 tail; 2630 u64 tail;
2682 size_t written; 2631 size_t written;
2683 ssize_t result; 2632 ssize_t result;
2684 2633
2685 result = qp_map_queue_headers(produce_q, consume_q, can_block); 2634 result = qp_map_queue_headers(produce_q, consume_q);
2686 if (unlikely(result != VMCI_SUCCESS)) 2635 if (unlikely(result != VMCI_SUCCESS))
2687 return result; 2636 return result;
2688 2637
@@ -2737,15 +2686,14 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2737 void *buf, 2686 void *buf,
2738 size_t buf_size, 2687 size_t buf_size,
2739 vmci_memcpy_from_queue_func memcpy_from_queue, 2688 vmci_memcpy_from_queue_func memcpy_from_queue,
2740 bool update_consumer, 2689 bool update_consumer)
2741 bool can_block)
2742{ 2690{
2743 s64 buf_ready; 2691 s64 buf_ready;
2744 u64 head; 2692 u64 head;
2745 size_t read; 2693 size_t read;
2746 ssize_t result; 2694 ssize_t result;
2747 2695
2748 result = qp_map_queue_headers(produce_q, consume_q, can_block); 2696 result = qp_map_queue_headers(produce_q, consume_q);
2749 if (unlikely(result != VMCI_SUCCESS)) 2697 if (unlikely(result != VMCI_SUCCESS))
2750 return result; 2698 return result;
2751 2699
@@ -2842,32 +2790,11 @@ int vmci_qpair_alloc(struct vmci_qp **qpair,
2842 route = vmci_guest_code_active() ? 2790 route = vmci_guest_code_active() ?
2843 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2791 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2844 2792
2845 /* If NONBLOCK or PINNED is set, we better be the guest personality. */ 2793 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2846 if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) && 2794 pr_devel("NONBLOCK OR PINNED set");
2847 VMCI_ROUTE_AS_GUEST != route) {
2848 pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
2849 return VMCI_ERROR_INVALID_ARGS; 2795 return VMCI_ERROR_INVALID_ARGS;
2850 } 2796 }
2851 2797
2852 /*
2853 * Limit the size of pinned QPs and check sanity.
2854 *
2855 * Pinned pages implies non-blocking mode. Mutexes aren't acquired
2856 * when the NONBLOCK flag is set in qpair code; and also should not be
2857 * acquired when the PINNED flagged is set. Since pinning pages
2858 * implies we want speed, it makes no sense not to have NONBLOCK
2859 * set if PINNED is set. Hence enforce this implication.
2860 */
2861 if (vmci_qp_pinned(flags)) {
2862 if (vmci_can_block(flags)) {
2863 pr_err("Attempted to enable pinning w/o non-blocking");
2864 return VMCI_ERROR_INVALID_ARGS;
2865 }
2866
2867 if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
2868 return VMCI_ERROR_NO_RESOURCES;
2869 }
2870
2871 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); 2798 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2872 if (!my_qpair) 2799 if (!my_qpair)
2873 return VMCI_ERROR_NO_MEM; 2800 return VMCI_ERROR_NO_MEM;
@@ -3195,8 +3122,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3195 qpair->consume_q, 3122 qpair->consume_q,
3196 qpair->produce_q_size, 3123 qpair->produce_q_size,
3197 buf, buf_size, 3124 buf, buf_size,
3198 qp_memcpy_to_queue, 3125 qp_memcpy_to_queue);
3199 vmci_can_block(qpair->flags));
3200 3126
3201 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3127 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3202 !qp_wait_for_ready_queue(qpair)) 3128 !qp_wait_for_ready_queue(qpair))
@@ -3237,8 +3163,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3237 qpair->consume_q, 3163 qpair->consume_q,
3238 qpair->consume_q_size, 3164 qpair->consume_q_size,
3239 buf, buf_size, 3165 buf, buf_size,
3240 qp_memcpy_from_queue, true, 3166 qp_memcpy_from_queue, true);
3241 vmci_can_block(qpair->flags));
3242 3167
3243 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3168 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3244 !qp_wait_for_ready_queue(qpair)) 3169 !qp_wait_for_ready_queue(qpair))
@@ -3280,8 +3205,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3280 qpair->consume_q, 3205 qpair->consume_q,
3281 qpair->consume_q_size, 3206 qpair->consume_q_size,
3282 buf, buf_size, 3207 buf, buf_size,
3283 qp_memcpy_from_queue, false, 3208 qp_memcpy_from_queue, false);
3284 vmci_can_block(qpair->flags));
3285 3209
3286 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3210 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3287 !qp_wait_for_ready_queue(qpair)) 3211 !qp_wait_for_ready_queue(qpair))
@@ -3323,8 +3247,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3323 qpair->consume_q, 3247 qpair->consume_q,
3324 qpair->produce_q_size, 3248 qpair->produce_q_size,
3325 iov, iov_size, 3249 iov, iov_size,
3326 qp_memcpy_to_queue_iov, 3250 qp_memcpy_to_queue_iov);
3327 vmci_can_block(qpair->flags));
3328 3251
3329 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3252 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3330 !qp_wait_for_ready_queue(qpair)) 3253 !qp_wait_for_ready_queue(qpair))
@@ -3367,7 +3290,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3367 qpair->consume_q_size, 3290 qpair->consume_q_size,
3368 iov, iov_size, 3291 iov, iov_size,
3369 qp_memcpy_from_queue_iov, 3292 qp_memcpy_from_queue_iov,
3370 true, vmci_can_block(qpair->flags)); 3293 true);
3371 3294
3372 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3295 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3373 !qp_wait_for_ready_queue(qpair)) 3296 !qp_wait_for_ready_queue(qpair))
@@ -3411,7 +3334,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3411 qpair->consume_q_size, 3334 qpair->consume_q_size,
3412 iov, iov_size, 3335 iov, iov_size,
3413 qp_memcpy_from_queue_iov, 3336 qp_memcpy_from_queue_iov,
3414 false, vmci_can_block(qpair->flags)); 3337 false);
3415 3338
3416 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3339 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3417 !qp_wait_for_ready_queue(qpair)) 3340 !qp_wait_for_ready_queue(qpair))
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
index 58c6959f6b6d..ed177f04ef24 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.h
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -146,24 +146,6 @@ VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
146 return page_store->len >= 2; 146 return page_store->len >= 2;
147} 147}
148 148
149/*
150 * Helper function to check if the non-blocking flag
151 * is set for a given queue pair.
152 */
153static inline bool vmci_can_block(u32 flags)
154{
155 return !(flags & VMCI_QPFLAG_NONBLOCK);
156}
157
158/*
159 * Helper function to check if the queue pair is pinned
160 * into memory.
161 */
162static inline bool vmci_qp_pinned(u32 flags)
163{
164 return flags & VMCI_QPFLAG_PINNED;
165}
166
167void vmci_qp_broker_exit(void); 149void vmci_qp_broker_exit(void);
168int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer, 150int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
169 u32 flags, u32 priv_flags, 151 u32 flags, u32 priv_flags,