diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-24 20:57:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-06-24 20:57:37 -0400 |
commit | 032fd3e58c428c98538cc69113dd11b2801bf35a (patch) | |
tree | bcc9a77117490fcf77d90a5d08fb958575dad831 | |
parent | d05be0d7e87b7808525e6bc000a87e095cc80040 (diff) | |
parent | d2bd05d88d245c13b64c3bf9c8927a1c56453d8c (diff) |
Merge tag 'for-linus-4.7b-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen bug fixes from David Vrabel:
- fix x86 PV dom0 crash during early boot on some hardware
- fix two pciback bugs affects certain devices
- fix potential overflow when clearing page tables in x86 PV
* tag 'for-linus-4.7b-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen-pciback: return proper values during BAR sizing
x86/xen: avoid m2p lookup when setting early page table entries
xen/pciback: Fix conf_space read/write overlap check.
x86/xen: fix upper bound of pmd loop in xen_cleanhighmap()
xen/balloon: Fix declared-but-not-defined warning
-rw-r--r-- | arch/x86/xen/mmu.c | 74 | ||||
-rw-r--r-- | drivers/xen/balloon.c | 28 | ||||
-rw-r--r-- | drivers/xen/xen-pciback/conf_space.c | 6 | ||||
-rw-r--r-- | drivers/xen/xen-pciback/conf_space_header.c | 18 |
4 files changed, 58 insertions, 68 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 478a2de543a5..67433714b791 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr, | |||
1113 | 1113 | ||
1114 | /* NOTE: The loop is more greedy than the cleanup_highmap variant. | 1114 | /* NOTE: The loop is more greedy than the cleanup_highmap variant. |
1115 | * We include the PMD passed in on _both_ boundaries. */ | 1115 | * We include the PMD passed in on _both_ boundaries. */ |
1116 | for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); | 1116 | for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); |
1117 | pmd++, vaddr += PMD_SIZE) { | 1117 | pmd++, vaddr += PMD_SIZE) { |
1118 | if (pmd_none(*pmd)) | 1118 | if (pmd_none(*pmd)) |
1119 | continue; | 1119 | continue; |
@@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1551 | #endif | 1551 | #endif |
1552 | } | 1552 | } |
1553 | 1553 | ||
1554 | #ifdef CONFIG_X86_32 | ||
1555 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | ||
1556 | { | ||
1557 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | ||
1558 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | ||
1559 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | ||
1560 | pte_val_ma(pte)); | ||
1561 | |||
1562 | return pte; | ||
1563 | } | ||
1564 | #else /* CONFIG_X86_64 */ | ||
1565 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | ||
1566 | { | ||
1567 | unsigned long pfn; | ||
1568 | |||
1569 | if (xen_feature(XENFEAT_writable_page_tables) || | ||
1570 | xen_feature(XENFEAT_auto_translated_physmap) || | ||
1571 | xen_start_info->mfn_list >= __START_KERNEL_map) | ||
1572 | return pte; | ||
1573 | |||
1574 | /* | ||
1575 | * Pages belonging to the initial p2m list mapped outside the default | ||
1576 | * address range must be mapped read-only. This region contains the | ||
1577 | * page tables for mapping the p2m list, too, and page tables MUST be | ||
1578 | * mapped read-only. | ||
1579 | */ | ||
1580 | pfn = pte_pfn(pte); | ||
1581 | if (pfn >= xen_start_info->first_p2m_pfn && | ||
1582 | pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) | ||
1583 | pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW); | ||
1584 | |||
1585 | return pte; | ||
1586 | } | ||
1587 | #endif /* CONFIG_X86_64 */ | ||
1588 | |||
1589 | /* | 1554 | /* |
1590 | * Init-time set_pte while constructing initial pagetables, which | 1555 | * Init-time set_pte while constructing initial pagetables, which |
1591 | * doesn't allow RO page table pages to be remapped RW. | 1556 | * doesn't allow RO page table pages to be remapped RW. |
@@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1600 | * so always write the PTE directly and rely on Xen trapping and | 1565 | * so always write the PTE directly and rely on Xen trapping and |
1601 | * emulating any updates as necessary. | 1566 | * emulating any updates as necessary. |
1602 | */ | 1567 | */ |
1603 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | 1568 | __visible pte_t xen_make_pte_init(pteval_t pte) |
1604 | { | 1569 | { |
1605 | if (pte_mfn(pte) != INVALID_P2M_ENTRY) | 1570 | #ifdef CONFIG_X86_64 |
1606 | pte = mask_rw_pte(ptep, pte); | 1571 | unsigned long pfn; |
1607 | else | 1572 | |
1608 | pte = __pte_ma(0); | 1573 | /* |
1574 | * Pages belonging to the initial p2m list mapped outside the default | ||
1575 | * address range must be mapped read-only. This region contains the | ||
1576 | * page tables for mapping the p2m list, too, and page tables MUST be | ||
1577 | * mapped read-only. | ||
1578 | */ | ||
1579 | pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
1580 | if (xen_start_info->mfn_list < __START_KERNEL_map && | ||
1581 | pfn >= xen_start_info->first_p2m_pfn && | ||
1582 | pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) | ||
1583 | pte &= ~_PAGE_RW; | ||
1584 | #endif | ||
1585 | pte = pte_pfn_to_mfn(pte); | ||
1586 | return native_make_pte(pte); | ||
1587 | } | ||
1588 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); | ||
1609 | 1589 | ||
1590 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | ||
1591 | { | ||
1592 | #ifdef CONFIG_X86_32 | ||
1593 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | ||
1594 | if (pte_mfn(pte) != INVALID_P2M_ENTRY | ||
1595 | && pte_val_ma(*ptep) & _PAGE_PRESENT) | ||
1596 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | ||
1597 | pte_val_ma(pte)); | ||
1598 | #endif | ||
1610 | native_set_pte(ptep, pte); | 1599 | native_set_pte(ptep, pte); |
1611 | } | 1600 | } |
1612 | 1601 | ||
@@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void) | |||
2407 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | 2396 | pv_mmu_ops.alloc_pud = xen_alloc_pud; |
2408 | pv_mmu_ops.release_pud = xen_release_pud; | 2397 | pv_mmu_ops.release_pud = xen_release_pud; |
2409 | #endif | 2398 | #endif |
2399 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte); | ||
2410 | 2400 | ||
2411 | #ifdef CONFIG_X86_64 | 2401 | #ifdef CONFIG_X86_64 |
2412 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | 2402 | pv_mmu_ops.write_cr3 = &xen_write_cr3; |
@@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { | |||
2455 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), | 2445 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), |
2456 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), | 2446 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), |
2457 | 2447 | ||
2458 | .make_pte = PV_CALLEE_SAVE(xen_make_pte), | 2448 | .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), |
2459 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), | 2449 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), |
2460 | 2450 | ||
2461 | #ifdef CONFIG_X86_PAE | 2451 | #ifdef CONFIG_X86_PAE |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index d46839f51e73..e4db19e88ab1 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); | |||
151 | static void balloon_process(struct work_struct *work); | 151 | static void balloon_process(struct work_struct *work); |
152 | static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); | 152 | static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); |
153 | 153 | ||
154 | static void release_memory_resource(struct resource *resource); | ||
155 | |||
156 | /* When ballooning out (allocating memory to return to Xen) we don't really | 154 | /* When ballooning out (allocating memory to return to Xen) we don't really |
157 | want the kernel to try too hard since that can trigger the oom killer. */ | 155 | want the kernel to try too hard since that can trigger the oom killer. */ |
158 | #define GFP_BALLOON \ | 156 | #define GFP_BALLOON \ |
@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state) | |||
248 | } | 246 | } |
249 | 247 | ||
250 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG | 248 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG |
249 | static void release_memory_resource(struct resource *resource) | ||
250 | { | ||
251 | if (!resource) | ||
252 | return; | ||
253 | |||
254 | /* | ||
255 | * No need to reset region to identity mapped since we now | ||
256 | * know that no I/O can be in this region | ||
257 | */ | ||
258 | release_resource(resource); | ||
259 | kfree(resource); | ||
260 | } | ||
261 | |||
251 | static struct resource *additional_memory_resource(phys_addr_t size) | 262 | static struct resource *additional_memory_resource(phys_addr_t size) |
252 | { | 263 | { |
253 | struct resource *res; | 264 | struct resource *res; |
@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size) | |||
286 | return res; | 297 | return res; |
287 | } | 298 | } |
288 | 299 | ||
289 | static void release_memory_resource(struct resource *resource) | ||
290 | { | ||
291 | if (!resource) | ||
292 | return; | ||
293 | |||
294 | /* | ||
295 | * No need to reset region to identity mapped since we now | ||
296 | * know that no I/O can be in this region | ||
297 | */ | ||
298 | release_resource(resource); | ||
299 | kfree(resource); | ||
300 | } | ||
301 | |||
302 | static enum bp_state reserve_additional_memory(void) | 300 | static enum bp_state reserve_additional_memory(void) |
303 | { | 301 | { |
304 | long credit; | 302 | long credit; |
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 8e67336f8ddd..6a25533da237 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c | |||
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, | |||
183 | field_start = OFFSET(cfg_entry); | 183 | field_start = OFFSET(cfg_entry); |
184 | field_end = OFFSET(cfg_entry) + field->size; | 184 | field_end = OFFSET(cfg_entry) + field->size; |
185 | 185 | ||
186 | if ((req_start >= field_start && req_start < field_end) | 186 | if (req_end > field_start && field_end > req_start) { |
187 | || (req_end > field_start && req_end <= field_end)) { | ||
188 | err = conf_space_read(dev, cfg_entry, field_start, | 187 | err = conf_space_read(dev, cfg_entry, field_start, |
189 | &tmp_val); | 188 | &tmp_val); |
190 | if (err) | 189 | if (err) |
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) | |||
230 | field_start = OFFSET(cfg_entry); | 229 | field_start = OFFSET(cfg_entry); |
231 | field_end = OFFSET(cfg_entry) + field->size; | 230 | field_end = OFFSET(cfg_entry) + field->size; |
232 | 231 | ||
233 | if ((req_start >= field_start && req_start < field_end) | 232 | if (req_end > field_start && field_end > req_start) { |
234 | || (req_end > field_start && req_end <= field_end)) { | ||
235 | tmp_val = 0; | 233 | tmp_val = 0; |
236 | 234 | ||
237 | err = xen_pcibk_config_read(dev, field_start, | 235 | err = xen_pcibk_config_read(dev, field_start, |
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index ad3d17d29c81..9ead1c2ff1dd 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
@@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) | |||
145 | /* A write to obtain the length must happen as a 32-bit write. | 145 | /* A write to obtain the length must happen as a 32-bit write. |
146 | * This does not (yet) support writing individual bytes | 146 | * This does not (yet) support writing individual bytes |
147 | */ | 147 | */ |
148 | if (value == ~PCI_ROM_ADDRESS_ENABLE) | 148 | if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U) |
149 | bar->which = 1; | 149 | bar->which = 1; |
150 | else { | 150 | else { |
151 | u32 tmpval; | 151 | u32 tmpval; |
@@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev, | |||
225 | (PCI_BASE_ADDRESS_SPACE_MEMORY | | 225 | (PCI_BASE_ADDRESS_SPACE_MEMORY | |
226 | PCI_BASE_ADDRESS_MEM_TYPE_64))) { | 226 | PCI_BASE_ADDRESS_MEM_TYPE_64))) { |
227 | bar_info->val = res[pos - 1].start >> 32; | 227 | bar_info->val = res[pos - 1].start >> 32; |
228 | bar_info->len_val = res[pos - 1].end >> 32; | 228 | bar_info->len_val = -resource_size(&res[pos - 1]) >> 32; |
229 | return; | 229 | return; |
230 | } | 230 | } |
231 | } | 231 | } |
232 | 232 | ||
233 | if (!res[pos].flags || | ||
234 | (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | | ||
235 | IORESOURCE_BUSY))) | ||
236 | return; | ||
237 | |||
233 | bar_info->val = res[pos].start | | 238 | bar_info->val = res[pos].start | |
234 | (res[pos].flags & PCI_REGION_FLAG_MASK); | 239 | (res[pos].flags & PCI_REGION_FLAG_MASK); |
235 | bar_info->len_val = resource_size(&res[pos]); | 240 | bar_info->len_val = -resource_size(&res[pos]) | |
241 | (res[pos].flags & PCI_REGION_FLAG_MASK); | ||
236 | } | 242 | } |
237 | 243 | ||
238 | static void *bar_init(struct pci_dev *dev, int offset) | 244 | static void *bar_init(struct pci_dev *dev, int offset) |
239 | { | 245 | { |
240 | struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); | 246 | struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL); |
241 | 247 | ||
242 | if (!bar) | 248 | if (!bar) |
243 | return ERR_PTR(-ENOMEM); | 249 | return ERR_PTR(-ENOMEM); |
244 | 250 | ||
245 | read_dev_bar(dev, bar, offset, ~0); | 251 | read_dev_bar(dev, bar, offset, ~0); |
246 | bar->which = 0; | ||
247 | 252 | ||
248 | return bar; | 253 | return bar; |
249 | } | 254 | } |
250 | 255 | ||
251 | static void *rom_init(struct pci_dev *dev, int offset) | 256 | static void *rom_init(struct pci_dev *dev, int offset) |
252 | { | 257 | { |
253 | struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); | 258 | struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL); |
254 | 259 | ||
255 | if (!bar) | 260 | if (!bar) |
256 | return ERR_PTR(-ENOMEM); | 261 | return ERR_PTR(-ENOMEM); |
257 | 262 | ||
258 | read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); | 263 | read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); |
259 | bar->which = 0; | ||
260 | 264 | ||
261 | return bar; | 265 | return bar; |
262 | } | 266 | } |