aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/cpu_hotplug.c15
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c26
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--drivers/xen/mem-reservation.c4
-rw-r--r--drivers/xen/xen-balloon.c3
7 files changed, 42 insertions, 24 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index b459edfacff3..90d387b50ab7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
79 This value is used to allocate enough space in internal 79 This value is used to allocate enough space in internal
80 tables needed for physical memory administration. 80 tables needed for physical memory administration.
81 81
82config XEN_SCRUB_PAGES 82config XEN_SCRUB_PAGES_DEFAULT
83 bool "Scrub pages before returning them to system" 83 bool "Scrub pages before returning them to system by default"
84 depends on XEN_BALLOON 84 depends on XEN_BALLOON
85 default y 85 default y
86 help 86 help
87 Scrub pages before returning them to the system for reuse by 87 Scrub pages before returning them to the system for reuse by
88 other domains. This makes sure that any confidential data 88 other domains. This makes sure that any confidential data
89 is not accidentally visible to other domains. Is it more 89 is not accidentally visible to other domains. Is it more
90 secure, but slightly less efficient. 90 secure, but slightly less efficient. This can be controlled with
91 xen_scrub_pages=0 parameter and
92 /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
93 This option only sets the default value.
94
91 If in doubt, say yes. 95 If in doubt, say yes.
92 96
93config XEN_DEV_EVTCHN 97config XEN_DEV_EVTCHN
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index d4265c8ebb22..b1357aa4bc55 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
19 19
20static void disable_hotplug_cpu(int cpu) 20static void disable_hotplug_cpu(int cpu)
21{ 21{
22 if (cpu_online(cpu)) { 22 if (!cpu_is_hotpluggable(cpu))
23 lock_device_hotplug(); 23 return;
24 lock_device_hotplug();
25 if (cpu_online(cpu))
24 device_offline(get_cpu_device(cpu)); 26 device_offline(get_cpu_device(cpu));
25 unlock_device_hotplug(); 27 if (!cpu_online(cpu) && cpu_present(cpu)) {
26 }
27 if (cpu_present(cpu))
28 xen_arch_unregister_cpu(cpu); 28 xen_arch_unregister_cpu(cpu);
29 29 set_cpu_present(cpu, false);
30 set_cpu_present(cpu, false); 30 }
31 unlock_device_hotplug();
31} 32}
32 33
33static int vcpu_online(unsigned int cpu) 34static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 08e4af04d6f2..e6c1934734b7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
138 clear_evtchn_to_irq_row(row); 138 clear_evtchn_to_irq_row(row);
139 } 139 }
140 140
141 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; 141 evtchn_to_irq[row][col] = irq;
142 return 0; 142 return 0;
143} 143}
144 144
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57390c7666e5..b0b02a501167 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
492 return true; 492 return true;
493} 493}
494 494
495static void unmap_if_in_range(struct gntdev_grant_map *map, 495static int unmap_if_in_range(struct gntdev_grant_map *map,
496 unsigned long start, unsigned long end) 496 unsigned long start, unsigned long end,
497 bool blockable)
497{ 498{
498 unsigned long mstart, mend; 499 unsigned long mstart, mend;
499 int err; 500 int err;
500 501
502 if (!in_range(map, start, end))
503 return 0;
504
505 if (!blockable)
506 return -EAGAIN;
507
501 mstart = max(start, map->vma->vm_start); 508 mstart = max(start, map->vma->vm_start);
502 mend = min(end, map->vma->vm_end); 509 mend = min(end, map->vma->vm_end);
503 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 510 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
508 (mstart - map->vma->vm_start) >> PAGE_SHIFT, 515 (mstart - map->vma->vm_start) >> PAGE_SHIFT,
509 (mend - mstart) >> PAGE_SHIFT); 516 (mend - mstart) >> PAGE_SHIFT);
510 WARN_ON(err); 517 WARN_ON(err);
518
519 return 0;
511} 520}
512 521
513static int mn_invl_range_start(struct mmu_notifier *mn, 522static int mn_invl_range_start(struct mmu_notifier *mn,
@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
519 struct gntdev_grant_map *map; 528 struct gntdev_grant_map *map;
520 int ret = 0; 529 int ret = 0;
521 530
522 /* TODO do we really need a mutex here? */
523 if (blockable) 531 if (blockable)
524 mutex_lock(&priv->lock); 532 mutex_lock(&priv->lock);
525 else if (!mutex_trylock(&priv->lock)) 533 else if (!mutex_trylock(&priv->lock))
526 return -EAGAIN; 534 return -EAGAIN;
527 535
528 list_for_each_entry(map, &priv->maps, next) { 536 list_for_each_entry(map, &priv->maps, next) {
529 if (in_range(map, start, end)) { 537 ret = unmap_if_in_range(map, start, end, blockable);
530 ret = -EAGAIN; 538 if (ret)
531 goto out_unlock; 539 goto out_unlock;
532 }
533 unmap_if_in_range(map, start, end);
534 } 540 }
535 list_for_each_entry(map, &priv->freeable_maps, next) { 541 list_for_each_entry(map, &priv->freeable_maps, next) {
536 if (in_range(map, start, end)) { 542 ret = unmap_if_in_range(map, start, end, blockable);
537 ret = -EAGAIN; 543 if (ret)
538 goto out_unlock; 544 goto out_unlock;
539 }
540 unmap_if_in_range(map, start, end);
541 } 545 }
542 546
543out_unlock: 547out_unlock:
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c93d8ef8df34..5bb01a62f214 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
280 /* 280 /*
281 * The Xenstore watch fires directly after registering it and 281 * The Xenstore watch fires directly after registering it and
282 * after a suspend/resume cycle. So ENOENT is no error but 282 * after a suspend/resume cycle. So ENOENT is no error but
283 * might happen in those cases. 283 * might happen in those cases. ERANGE is observed when we get
284 * an empty value (''), this happens when we acknowledge the
285 * request by writing '\0' below.
284 */ 286 */
285 if (err != -ENOENT) 287 if (err != -ENOENT && err != -ERANGE)
286 pr_err("Error %d reading sysrq code in control/sysrq\n", 288 pr_err("Error %d reading sysrq code in control/sysrq\n",
287 err); 289 err);
288 xenbus_transaction_end(xbt, 1); 290 xenbus_transaction_end(xbt, 1);
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c
index 084799c6180e..3782cf070338 100644
--- a/drivers/xen/mem-reservation.c
+++ b/drivers/xen/mem-reservation.c
@@ -14,6 +14,10 @@
14 14
15#include <xen/interface/memory.h> 15#include <xen/interface/memory.h>
16#include <xen/mem-reservation.h> 16#include <xen/mem-reservation.h>
17#include <linux/moduleparam.h>
18
19bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
20core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
17 21
18/* 22/*
19 * Use one extent per PAGE_SIZE to avoid to break down the page into 23 * Use one extent per PAGE_SIZE to avoid to break down the page into
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 294f35ce9e46..63c1494a8d73 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -44,6 +44,7 @@
44#include <xen/xenbus.h> 44#include <xen/xenbus.h>
45#include <xen/features.h> 45#include <xen/features.h>
46#include <xen/page.h> 46#include <xen/page.h>
47#include <xen/mem-reservation.h>
47 48
48#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) 49#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
49 50
@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
137static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); 138static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
138static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); 139static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
139static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); 140static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
141static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
140 142
141static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, 143static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
142 char *buf) 144 char *buf)
@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
203 &dev_attr_max_schedule_delay.attr.attr, 205 &dev_attr_max_schedule_delay.attr.attr,
204 &dev_attr_retry_count.attr.attr, 206 &dev_attr_retry_count.attr.attr,
205 &dev_attr_max_retry_count.attr.attr, 207 &dev_attr_max_retry_count.attr.attr,
208 &dev_attr_scrub_pages.attr.attr,
206 NULL 209 NULL
207}; 210};
208 211