aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-12-20 12:34:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-12-20 12:34:54 -0500
commit4203d0eb3acc459d1e7737193b5684e71185dca7 (patch)
tree357734dc414c22e87cd676dd1ff3feead64defc9
parent5263f0a88076ab32b3120356645734918bdc1700 (diff)
parentc1d15f5c8bc1170dafe16e988e55437245966dfe (diff)
Merge tag 'stable/for-linus-3.13-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull Xen bugfixes from Konrad Rzeszutek Wilk: - Fix balloon driver for auto-translate guests (PVHVM, ARM) to not use scratch pages. - Fix block API header for ARM32 and ARM64 to have proper layout - On ARM when mapping guests, stick on PTE_SPECIAL - When using SWIOTLB under ARM, don't call swiotlb functions twice - When unmapping guests memory and if we fail, don't return pages which failed to be unmapped. - Grant driver was using the wrong address on ARM. * tag 'stable/for-linus-3.13-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/balloon: Seperate the auto-translate logic properly (v2) xen/block: Correctly define structures in public headers on ARM32 and ARM64 arm: xen: foreign mapping PTEs are special. xen/arm64: do not call the swiotlb functions twice xen: privcmd: do not return pages which we have failed to unmap XEN: Grant table address, xen_hvm_resume_frames, is a phys_addr not a pfn
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h4
-rw-r--r--drivers/xen/balloon.c63
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/privcmd.c9
-rw-r--r--include/xen/interface/io/blkif.h10
6 files changed, 51 insertions, 44 deletions
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 83e4f959ee47..85501238b425 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
96 struct remap_data *info = data; 96 struct remap_data *info = data;
97 struct page *page = info->pages[info->index++]; 97 struct page *page = info->pages[info->index++];
98 unsigned long pfn = page_to_pfn(page); 98 unsigned long pfn = page_to_pfn(page);
99 pte_t pte = pfn_pte(pfn, info->prot); 99 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
100 100
101 if (map_foreign_page(pfn, info->fgmfn, info->domid)) 101 if (map_foreign_page(pfn, info->fgmfn, info->domid))
102 return -EFAULT; 102 return -EFAULT;
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
224 } 224 }
225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
226 return 0; 226 return 0;
227 xen_hvm_resume_frames = res.start >> PAGE_SHIFT; 227 xen_hvm_resume_frames = res.start;
228 xen_events_irq = irq_of_parse_and_map(node, 0); 228 xen_events_irq = irq_of_parse_and_map(node, 0);
229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", 229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
230 version, xen_events_irq, xen_hvm_resume_frames); 230 version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
231 xen_domain_type = XEN_HVM_DOMAIN; 231 xen_domain_type = XEN_HVM_DOMAIN;
232 232
233 xen_setup_features(); 233 xen_setup_features();
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2820f1a6eebe..dde3fc9c49f0 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir, 23 unsigned long offset, size_t size, enum dma_data_direction dir,
24 struct dma_attrs *attrs) 24 struct dma_attrs *attrs)
25{ 25{
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
27} 26}
28 27
29static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir, 29 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs) 30 struct dma_attrs *attrs)
32{ 31{
33 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
34} 32}
35 33
36static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 34static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
37 dma_addr_t handle, size_t size, enum dma_data_direction dir) 35 dma_addr_t handle, size_t size, enum dma_data_direction dir)
38{ 36{
39 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
40} 37}
41 38
42static inline void xen_dma_sync_single_for_device(struct device *hwdev, 39static inline void xen_dma_sync_single_for_device(struct device *hwdev,
43 dma_addr_t handle, size_t size, enum dma_data_direction dir) 40 dma_addr_t handle, size_t size, enum dma_data_direction dir)
44{ 41{
45 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
46} 42}
47#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ 43#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7c70b..4c02e2b94103 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
350 350
351 pfn = page_to_pfn(page); 351 pfn = page_to_pfn(page);
352 352
353 set_phys_to_machine(pfn, frame_list[i]);
354
355#ifdef CONFIG_XEN_HAVE_PVMMU 353#ifdef CONFIG_XEN_HAVE_PVMMU
356 /* Link back into the page tables if not highmem. */ 354 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
357 if (xen_pv_domain() && !PageHighMem(page)) { 355 set_phys_to_machine(pfn, frame_list[i]);
358 int ret; 356
359 ret = HYPERVISOR_update_va_mapping( 357 /* Link back into the page tables if not highmem. */
360 (unsigned long)__va(pfn << PAGE_SHIFT), 358 if (!PageHighMem(page)) {
361 mfn_pte(frame_list[i], PAGE_KERNEL), 359 int ret;
362 0); 360 ret = HYPERVISOR_update_va_mapping(
363 BUG_ON(ret); 361 (unsigned long)__va(pfn << PAGE_SHIFT),
362 mfn_pte(frame_list[i], PAGE_KERNEL),
363 0);
364 BUG_ON(ret);
365 }
364 } 366 }
365#endif 367#endif
366 368
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
378 enum bp_state state = BP_DONE; 380 enum bp_state state = BP_DONE;
379 unsigned long pfn, i; 381 unsigned long pfn, i;
380 struct page *page; 382 struct page *page;
381 struct page *scratch_page;
382 int ret; 383 int ret;
383 struct xen_memory_reservation reservation = { 384 struct xen_memory_reservation reservation = {
384 .address_bits = 0, 385 .address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
411 412
412 scrub_page(page); 413 scrub_page(page);
413 414
415#ifdef CONFIG_XEN_HAVE_PVMMU
414 /* 416 /*
415 * Ballooned out frames are effectively replaced with 417 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the 418 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent. 419 * p2m are consistent.
418 */ 420 */
419 scratch_page = get_balloon_scratch_page();
420#ifdef CONFIG_XEN_HAVE_PVMMU
421 if (xen_pv_domain() && !PageHighMem(page)) {
422 ret = HYPERVISOR_update_va_mapping(
423 (unsigned long)__va(pfn << PAGE_SHIFT),
424 pfn_pte(page_to_pfn(scratch_page),
425 PAGE_KERNEL_RO), 0);
426 BUG_ON(ret);
427 }
428#endif
429 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 421 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
430 unsigned long p; 422 unsigned long p;
423 struct page *scratch_page = get_balloon_scratch_page();
424
425 if (!PageHighMem(page)) {
426 ret = HYPERVISOR_update_va_mapping(
427 (unsigned long)__va(pfn << PAGE_SHIFT),
428 pfn_pte(page_to_pfn(scratch_page),
429 PAGE_KERNEL_RO), 0);
430 BUG_ON(ret);
431 }
431 p = page_to_pfn(scratch_page); 432 p = page_to_pfn(scratch_page);
432 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 433 __set_phys_to_machine(pfn, pfn_to_mfn(p));
434
435 put_balloon_scratch_page();
433 } 436 }
434 put_balloon_scratch_page(); 437#endif
435 438
436 balloon_append(pfn_to_page(pfn)); 439 balloon_append(pfn_to_page(pfn));
437 } 440 }
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
627 if (!xen_domain()) 630 if (!xen_domain())
628 return -ENODEV; 631 return -ENODEV;
629 632
630 for_each_online_cpu(cpu) 633 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
631 { 634 for_each_online_cpu(cpu)
632 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 635 {
633 if (per_cpu(balloon_scratch_page, cpu) == NULL) { 636 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
634 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 637 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
635 return -ENOMEM; 638 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
639 return -ENOMEM;
640 }
636 } 641 }
642 register_cpu_notifier(&balloon_cpu_notifier);
637 } 643 }
638 register_cpu_notifier(&balloon_cpu_notifier);
639 644
640 pr_info("Initialising balloon driver\n"); 645 pr_info("Initialising balloon driver\n");
641 646
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 028387192b60..aa846a48f400 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1176,7 +1176,8 @@ static int gnttab_setup(void)
1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, 1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
1177 PAGE_SIZE * max_nr_gframes); 1177 PAGE_SIZE * max_nr_gframes);
1178 if (gnttab_shared.addr == NULL) { 1178 if (gnttab_shared.addr == NULL) {
1179 pr_warn("Failed to ioremap gnttab share frames!\n"); 1179 pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
1180 xen_hvm_resume_frames);
1180 return -ENOMEM; 1181 return -ENOMEM;
1181 } 1182 }
1182 } 1183 }
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590fa1bb..569a13b9e856 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
533{ 533{
534 struct page **pages = vma->vm_private_data; 534 struct page **pages = vma->vm_private_data;
535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
536 int rc;
536 537
537 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 538 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
538 return; 539 return;
539 540
540 xen_unmap_domain_mfn_range(vma, numpgs, pages); 541 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
541 free_xenballooned_pages(numpgs, pages); 542 if (rc == 0)
543 free_xenballooned_pages(numpgs, pages);
544 else
545 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
546 numpgs, rc);
542 kfree(pages); 547 kfree(pages);
543} 548}
544 549
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 65e12099ef89..ae665ac59c36 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
146struct blkif_request_rw { 146struct blkif_request_rw {
147 uint8_t nr_segments; /* number of segments */ 147 uint8_t nr_segments; /* number of segments */
148 blkif_vdev_t handle; /* only for read/write requests */ 148 blkif_vdev_t handle; /* only for read/write requests */
149#ifdef CONFIG_X86_64 149#ifndef CONFIG_X86_32
150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ 150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
151#endif 151#endif
152 uint64_t id; /* private guest value, echoed in resp */ 152 uint64_t id; /* private guest value, echoed in resp */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ 163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ 164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
165 blkif_vdev_t _pad1; /* only for read/write requests */ 165 blkif_vdev_t _pad1; /* only for read/write requests */
166#ifdef CONFIG_X86_64 166#ifndef CONFIG_X86_32
167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ 167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
168#endif 168#endif
169 uint64_t id; /* private guest value, echoed in resp */ 169 uint64_t id; /* private guest value, echoed in resp */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
175struct blkif_request_other { 175struct blkif_request_other {
176 uint8_t _pad1; 176 uint8_t _pad1;
177 blkif_vdev_t _pad2; /* only for read/write requests */ 177 blkif_vdev_t _pad2; /* only for read/write requests */
178#ifdef CONFIG_X86_64 178#ifndef CONFIG_X86_32
179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ 179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
180#endif 180#endif
181 uint64_t id; /* private guest value, echoed in resp */ 181 uint64_t id; /* private guest value, echoed in resp */
@@ -184,7 +184,7 @@ struct blkif_request_other {
184struct blkif_request_indirect { 184struct blkif_request_indirect {
185 uint8_t indirect_op; 185 uint8_t indirect_op;
186 uint16_t nr_segments; 186 uint16_t nr_segments;
187#ifdef CONFIG_X86_64 187#ifndef CONFIG_X86_32
188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ 188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
189#endif 189#endif
190 uint64_t id; 190 uint64_t id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
192 blkif_vdev_t handle; 192 blkif_vdev_t handle;
193 uint16_t _pad2; 193 uint16_t _pad2;
194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; 194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
195#ifdef CONFIG_X86_64 195#ifndef CONFIG_X86_32
196 uint32_t _pad3; /* make it 64 byte aligned */ 196 uint32_t _pad3; /* make it 64 byte aligned */
197#else 197#else
198 uint64_t _pad3; /* make it 64 byte aligned */ 198 uint64_t _pad3; /* make it 64 byte aligned */