aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-devices-system-xen_memory9
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/cpu_hotplug.c15
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c26
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--drivers/xen/mem-reservation.c4
-rw-r--r--drivers/xen/xen-balloon.c3
-rw-r--r--include/xen/mem-reservation.h7
10 files changed, 61 insertions, 27 deletions
diff --git a/Documentation/ABI/stable/sysfs-devices-system-xen_memory b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
index caa311d59ac1..6d83f95a8a8e 100644
--- a/Documentation/ABI/stable/sysfs-devices-system-xen_memory
+++ b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
@@ -75,3 +75,12 @@ Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
75Description: 75Description:
76 Amount (in KiB) of low (or normal) memory in the 76 Amount (in KiB) of low (or normal) memory in the
77 balloon. 77 balloon.
78
79What: /sys/devices/system/xen_memory/xen_memory0/scrub_pages
80Date: September 2018
81KernelVersion: 4.20
82Contact: xen-devel@lists.xenproject.org
83Description:
84 Control scrubbing pages before returning them to Xen for others domains
85 use. Can be set with xen_scrub_pages cmdline
86 parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 64a3bf54b974..92eb1f42240d 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5000,6 +5000,12 @@
5000 Disables the PV optimizations forcing the HVM guest to 5000 Disables the PV optimizations forcing the HVM guest to
5001 run as generic HVM guest with no PV drivers. 5001 run as generic HVM guest with no PV drivers.
5002 5002
5003 xen_scrub_pages= [XEN]
5004 Boolean option to control scrubbing pages before giving them back
5005 to Xen, for use by other domains. Can be also changed at runtime
5006 with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
5007 Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
5008
5003 xirc2ps_cs= [NET,PCMCIA] 5009 xirc2ps_cs= [NET,PCMCIA]
5004 Format: 5010 Format:
5005 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] 5011 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index b459edfacff3..90d387b50ab7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
79 This value is used to allocate enough space in internal 79 This value is used to allocate enough space in internal
80 tables needed for physical memory administration. 80 tables needed for physical memory administration.
81 81
82config XEN_SCRUB_PAGES 82config XEN_SCRUB_PAGES_DEFAULT
83 bool "Scrub pages before returning them to system" 83 bool "Scrub pages before returning them to system by default"
84 depends on XEN_BALLOON 84 depends on XEN_BALLOON
85 default y 85 default y
86 help 86 help
87 Scrub pages before returning them to the system for reuse by 87 Scrub pages before returning them to the system for reuse by
88 other domains. This makes sure that any confidential data 88 other domains. This makes sure that any confidential data
89 is not accidentally visible to other domains. Is it more 89 is not accidentally visible to other domains. Is it more
90 secure, but slightly less efficient. 90 secure, but slightly less efficient. This can be controlled with
91 xen_scrub_pages=0 parameter and
92 /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
93 This option only sets the default value.
94
91 If in doubt, say yes. 95 If in doubt, say yes.
92 96
93config XEN_DEV_EVTCHN 97config XEN_DEV_EVTCHN
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index d4265c8ebb22..b1357aa4bc55 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
19 19
20static void disable_hotplug_cpu(int cpu) 20static void disable_hotplug_cpu(int cpu)
21{ 21{
22 if (cpu_online(cpu)) { 22 if (!cpu_is_hotpluggable(cpu))
23 lock_device_hotplug(); 23 return;
24 lock_device_hotplug();
25 if (cpu_online(cpu))
24 device_offline(get_cpu_device(cpu)); 26 device_offline(get_cpu_device(cpu));
25 unlock_device_hotplug(); 27 if (!cpu_online(cpu) && cpu_present(cpu)) {
26 }
27 if (cpu_present(cpu))
28 xen_arch_unregister_cpu(cpu); 28 xen_arch_unregister_cpu(cpu);
29 29 set_cpu_present(cpu, false);
30 set_cpu_present(cpu, false); 30 }
31 unlock_device_hotplug();
31} 32}
32 33
33static int vcpu_online(unsigned int cpu) 34static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 08e4af04d6f2..e6c1934734b7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
138 clear_evtchn_to_irq_row(row); 138 clear_evtchn_to_irq_row(row);
139 } 139 }
140 140
141 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; 141 evtchn_to_irq[row][col] = irq;
142 return 0; 142 return 0;
143} 143}
144 144
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57390c7666e5..b0b02a501167 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
492 return true; 492 return true;
493} 493}
494 494
495static void unmap_if_in_range(struct gntdev_grant_map *map, 495static int unmap_if_in_range(struct gntdev_grant_map *map,
496 unsigned long start, unsigned long end) 496 unsigned long start, unsigned long end,
497 bool blockable)
497{ 498{
498 unsigned long mstart, mend; 499 unsigned long mstart, mend;
499 int err; 500 int err;
500 501
502 if (!in_range(map, start, end))
503 return 0;
504
505 if (!blockable)
506 return -EAGAIN;
507
501 mstart = max(start, map->vma->vm_start); 508 mstart = max(start, map->vma->vm_start);
502 mend = min(end, map->vma->vm_end); 509 mend = min(end, map->vma->vm_end);
503 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 510 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
508 (mstart - map->vma->vm_start) >> PAGE_SHIFT, 515 (mstart - map->vma->vm_start) >> PAGE_SHIFT,
509 (mend - mstart) >> PAGE_SHIFT); 516 (mend - mstart) >> PAGE_SHIFT);
510 WARN_ON(err); 517 WARN_ON(err);
518
519 return 0;
511} 520}
512 521
513static int mn_invl_range_start(struct mmu_notifier *mn, 522static int mn_invl_range_start(struct mmu_notifier *mn,
@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
519 struct gntdev_grant_map *map; 528 struct gntdev_grant_map *map;
520 int ret = 0; 529 int ret = 0;
521 530
522 /* TODO do we really need a mutex here? */
523 if (blockable) 531 if (blockable)
524 mutex_lock(&priv->lock); 532 mutex_lock(&priv->lock);
525 else if (!mutex_trylock(&priv->lock)) 533 else if (!mutex_trylock(&priv->lock))
526 return -EAGAIN; 534 return -EAGAIN;
527 535
528 list_for_each_entry(map, &priv->maps, next) { 536 list_for_each_entry(map, &priv->maps, next) {
529 if (in_range(map, start, end)) { 537 ret = unmap_if_in_range(map, start, end, blockable);
530 ret = -EAGAIN; 538 if (ret)
531 goto out_unlock; 539 goto out_unlock;
532 }
533 unmap_if_in_range(map, start, end);
534 } 540 }
535 list_for_each_entry(map, &priv->freeable_maps, next) { 541 list_for_each_entry(map, &priv->freeable_maps, next) {
536 if (in_range(map, start, end)) { 542 ret = unmap_if_in_range(map, start, end, blockable);
537 ret = -EAGAIN; 543 if (ret)
538 goto out_unlock; 544 goto out_unlock;
539 }
540 unmap_if_in_range(map, start, end);
541 } 545 }
542 546
543out_unlock: 547out_unlock:
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c93d8ef8df34..5bb01a62f214 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
280 /* 280 /*
281 * The Xenstore watch fires directly after registering it and 281 * The Xenstore watch fires directly after registering it and
282 * after a suspend/resume cycle. So ENOENT is no error but 282 * after a suspend/resume cycle. So ENOENT is no error but
283 * might happen in those cases. 283 * might happen in those cases. ERANGE is observed when we get
284 * an empty value (''), this happens when we acknowledge the
285 * request by writing '\0' below.
284 */ 286 */
285 if (err != -ENOENT) 287 if (err != -ENOENT && err != -ERANGE)
286 pr_err("Error %d reading sysrq code in control/sysrq\n", 288 pr_err("Error %d reading sysrq code in control/sysrq\n",
287 err); 289 err);
288 xenbus_transaction_end(xbt, 1); 290 xenbus_transaction_end(xbt, 1);
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c
index 084799c6180e..3782cf070338 100644
--- a/drivers/xen/mem-reservation.c
+++ b/drivers/xen/mem-reservation.c
@@ -14,6 +14,10 @@
14 14
15#include <xen/interface/memory.h> 15#include <xen/interface/memory.h>
16#include <xen/mem-reservation.h> 16#include <xen/mem-reservation.h>
17#include <linux/moduleparam.h>
18
19bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
20core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
17 21
18/* 22/*
19 * Use one extent per PAGE_SIZE to avoid to break down the page into 23 * Use one extent per PAGE_SIZE to avoid to break down the page into
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 294f35ce9e46..63c1494a8d73 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -44,6 +44,7 @@
44#include <xen/xenbus.h> 44#include <xen/xenbus.h>
45#include <xen/features.h> 45#include <xen/features.h>
46#include <xen/page.h> 46#include <xen/page.h>
47#include <xen/mem-reservation.h>
47 48
48#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) 49#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
49 50
@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
137static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); 138static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
138static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); 139static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
139static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); 140static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
141static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
140 142
141static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, 143static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
142 char *buf) 144 char *buf)
@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
203 &dev_attr_max_schedule_delay.attr.attr, 205 &dev_attr_max_schedule_delay.attr.attr,
204 &dev_attr_retry_count.attr.attr, 206 &dev_attr_retry_count.attr.attr,
205 &dev_attr_max_retry_count.attr.attr, 207 &dev_attr_max_retry_count.attr.attr,
208 &dev_attr_scrub_pages.attr.attr,
206 NULL 209 NULL
207}; 210};
208 211
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h
index 80b52b4945e9..a2ab516fcd2c 100644
--- a/include/xen/mem-reservation.h
+++ b/include/xen/mem-reservation.h
@@ -17,11 +17,12 @@
17 17
18#include <xen/page.h> 18#include <xen/page.h>
19 19
20extern bool xen_scrub_pages;
21
20static inline void xenmem_reservation_scrub_page(struct page *page) 22static inline void xenmem_reservation_scrub_page(struct page *page)
21{ 23{
22#ifdef CONFIG_XEN_SCRUB_PAGES 24 if (xen_scrub_pages)
23 clear_highpage(page); 25 clear_highpage(page);
24#endif
25} 26}
26 27
27#ifdef CONFIG_XEN_HAVE_PVMMU 28#ifdef CONFIG_XEN_HAVE_PVMMU