aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c56
-rw-r--r--drivers/pci/iov.c1
-rw-r--r--drivers/pci/pci.h37
-rw-r--r--drivers/pci/setup-bus.c4
4 files changed, 47 insertions, 51 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 505c1c7075f0..6af6b628175b 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -39,6 +39,7 @@
39#include <linux/syscore_ops.h> 39#include <linux/syscore_ops.h>
40#include <linux/tboot.h> 40#include <linux/tboot.h>
41#include <linux/dmi.h> 41#include <linux/dmi.h>
42#include <linux/pci-ats.h>
42#include <asm/cacheflush.h> 43#include <asm/cacheflush.h>
43#include <asm/iommu.h> 44#include <asm/iommu.h>
44#include "pci.h" 45#include "pci.h"
@@ -1299,7 +1300,7 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1299static struct iova_domain reserved_iova_list; 1300static struct iova_domain reserved_iova_list;
1300static struct lock_class_key reserved_rbtree_key; 1301static struct lock_class_key reserved_rbtree_key;
1301 1302
1302static void dmar_init_reserved_ranges(void) 1303static int dmar_init_reserved_ranges(void)
1303{ 1304{
1304 struct pci_dev *pdev = NULL; 1305 struct pci_dev *pdev = NULL;
1305 struct iova *iova; 1306 struct iova *iova;
@@ -1313,8 +1314,10 @@ static void dmar_init_reserved_ranges(void)
1313 /* IOAPIC ranges shouldn't be accessed by DMA */ 1314 /* IOAPIC ranges shouldn't be accessed by DMA */
1314 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), 1315 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1315 IOVA_PFN(IOAPIC_RANGE_END)); 1316 IOVA_PFN(IOAPIC_RANGE_END));
1316 if (!iova) 1317 if (!iova) {
1317 printk(KERN_ERR "Reserve IOAPIC range failed\n"); 1318 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1319 return -ENODEV;
1320 }
1318 1321
1319 /* Reserve all PCI MMIO to avoid peer-to-peer access */ 1322 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1320 for_each_pci_dev(pdev) { 1323 for_each_pci_dev(pdev) {
@@ -1327,11 +1330,13 @@ static void dmar_init_reserved_ranges(void)
1327 iova = reserve_iova(&reserved_iova_list, 1330 iova = reserve_iova(&reserved_iova_list,
1328 IOVA_PFN(r->start), 1331 IOVA_PFN(r->start),
1329 IOVA_PFN(r->end)); 1332 IOVA_PFN(r->end));
1330 if (!iova) 1333 if (!iova) {
1331 printk(KERN_ERR "Reserve iova failed\n"); 1334 printk(KERN_ERR "Reserve iova failed\n");
1335 return -ENODEV;
1336 }
1332 } 1337 }
1333 } 1338 }
1334 1339 return 0;
1335} 1340}
1336 1341
1337static void domain_reserve_special_ranges(struct dmar_domain *domain) 1342static void domain_reserve_special_ranges(struct dmar_domain *domain)
@@ -1835,7 +1840,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1835 1840
1836 ret = iommu_attach_domain(domain, iommu); 1841 ret = iommu_attach_domain(domain, iommu);
1837 if (ret) { 1842 if (ret) {
1838 domain_exit(domain); 1843 free_domain_mem(domain);
1839 goto error; 1844 goto error;
1840 } 1845 }
1841 1846
@@ -2213,7 +2218,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
2213 return 0; 2218 return 0;
2214} 2219}
2215 2220
2216int __init init_dmars(void) 2221static int __init init_dmars(int force_on)
2217{ 2222{
2218 struct dmar_drhd_unit *drhd; 2223 struct dmar_drhd_unit *drhd;
2219 struct dmar_rmrr_unit *rmrr; 2224 struct dmar_rmrr_unit *rmrr;
@@ -2393,8 +2398,15 @@ int __init init_dmars(void)
2393 * enable translation 2398 * enable translation
2394 */ 2399 */
2395 for_each_drhd_unit(drhd) { 2400 for_each_drhd_unit(drhd) {
2396 if (drhd->ignored) 2401 if (drhd->ignored) {
2402 /*
2403 * we always have to disable PMRs or DMA may fail on
2404 * this device
2405 */
2406 if (force_on)
2407 iommu_disable_protect_mem_regions(drhd->iommu);
2397 continue; 2408 continue;
2409 }
2398 iommu = drhd->iommu; 2410 iommu = drhd->iommu;
2399 2411
2400 iommu_flush_write_buffer(iommu); 2412 iommu_flush_write_buffer(iommu);
@@ -3240,9 +3252,15 @@ static int device_notifier(struct notifier_block *nb,
3240 if (!domain) 3252 if (!domain)
3241 return 0; 3253 return 0;
3242 3254
3243 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) 3255 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3244 domain_remove_one_dev_info(domain, pdev); 3256 domain_remove_one_dev_info(domain, pdev);
3245 3257
3258 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3259 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3260 list_empty(&domain->devices))
3261 domain_exit(domain);
3262 }
3263
3246 return 0; 3264 return 0;
3247} 3265}
3248 3266
@@ -3277,12 +3295,21 @@ int __init intel_iommu_init(void)
3277 if (no_iommu || dmar_disabled) 3295 if (no_iommu || dmar_disabled)
3278 return -ENODEV; 3296 return -ENODEV;
3279 3297
3280 iommu_init_mempool(); 3298 if (iommu_init_mempool()) {
3281 dmar_init_reserved_ranges(); 3299 if (force_on)
3300 panic("tboot: Failed to initialize iommu memory\n");
3301 return -ENODEV;
3302 }
3303
3304 if (dmar_init_reserved_ranges()) {
3305 if (force_on)
3306 panic("tboot: Failed to reserve iommu ranges\n");
3307 return -ENODEV;
3308 }
3282 3309
3283 init_no_remapping_devices(); 3310 init_no_remapping_devices();
3284 3311
3285 ret = init_dmars(); 3312 ret = init_dmars(force_on);
3286 if (ret) { 3313 if (ret) {
3287 if (force_on) 3314 if (force_on)
3288 panic("tboot: Failed to initialize DMARs\n"); 3315 panic("tboot: Failed to initialize DMARs\n");
@@ -3391,6 +3418,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3391 domain->iommu_count--; 3418 domain->iommu_count--;
3392 domain_update_iommu_cap(domain); 3419 domain_update_iommu_cap(domain);
3393 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); 3420 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3421
3422 spin_lock_irqsave(&iommu->lock, tmp_flags);
3423 clear_bit(domain->id, iommu->domain_ids);
3424 iommu->domains[domain->id] = NULL;
3425 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3394 } 3426 }
3395 3427
3396 spin_unlock_irqrestore(&device_domain_lock, flags); 3428 spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3607,9 +3639,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3607 3639
3608 pte = dmar_domain->pgd; 3640 pte = dmar_domain->pgd;
3609 if (dma_pte_present(pte)) { 3641 if (dma_pte_present(pte)) {
3610 free_pgtable_page(dmar_domain->pgd);
3611 dmar_domain->pgd = (struct dma_pte *) 3642 dmar_domain->pgd = (struct dma_pte *)
3612 phys_to_virt(dma_pte_addr(pte)); 3643 phys_to_virt(dma_pte_addr(pte));
3644 free_pgtable_page(pte);
3613 } 3645 }
3614 dmar_domain->agaw--; 3646 dmar_domain->agaw--;
3615 } 3647 }
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 553d8ee55c1c..42fae4776515 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -13,6 +13,7 @@
13#include <linux/mutex.h> 13#include <linux/mutex.h>
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/pci-ats.h>
16#include "pci.h" 17#include "pci.h"
17 18
18#define VIRTFN_ID_LEN 16 19#define VIRTFN_ID_LEN 16
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4ee9e8a2607f..731e20265ace 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -249,15 +249,6 @@ struct pci_sriov {
249 u8 __iomem *mstate; /* VF Migration State Array */ 249 u8 __iomem *mstate; /* VF Migration State Array */
250}; 250};
251 251
252/* Address Translation Service */
253struct pci_ats {
254 int pos; /* capability position */
255 int stu; /* Smallest Translation Unit */
256 int qdep; /* Invalidate Queue Depth */
257 int ref_cnt; /* Physical Function reference count */
258 unsigned int is_enabled:1; /* Enable bit is set */
259};
260
261#ifdef CONFIG_PCI_IOV 252#ifdef CONFIG_PCI_IOV
262extern int pci_iov_init(struct pci_dev *dev); 253extern int pci_iov_init(struct pci_dev *dev);
263extern void pci_iov_release(struct pci_dev *dev); 254extern void pci_iov_release(struct pci_dev *dev);
@@ -268,19 +259,6 @@ extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
268extern void pci_restore_iov_state(struct pci_dev *dev); 259extern void pci_restore_iov_state(struct pci_dev *dev);
269extern int pci_iov_bus_range(struct pci_bus *bus); 260extern int pci_iov_bus_range(struct pci_bus *bus);
270 261
271extern int pci_enable_ats(struct pci_dev *dev, int ps);
272extern void pci_disable_ats(struct pci_dev *dev);
273extern int pci_ats_queue_depth(struct pci_dev *dev);
274/**
275 * pci_ats_enabled - query the ATS status
276 * @dev: the PCI device
277 *
278 * Returns 1 if ATS capability is enabled, or 0 if not.
279 */
280static inline int pci_ats_enabled(struct pci_dev *dev)
281{
282 return dev->ats && dev->ats->is_enabled;
283}
284#else 262#else
285static inline int pci_iov_init(struct pci_dev *dev) 263static inline int pci_iov_init(struct pci_dev *dev)
286{ 264{
@@ -303,21 +281,6 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
303 return 0; 281 return 0;
304} 282}
305 283
306static inline int pci_enable_ats(struct pci_dev *dev, int ps)
307{
308 return -ENODEV;
309}
310static inline void pci_disable_ats(struct pci_dev *dev)
311{
312}
313static inline int pci_ats_queue_depth(struct pci_dev *dev)
314{
315 return -ENODEV;
316}
317static inline int pci_ats_enabled(struct pci_dev *dev)
318{
319 return 0;
320}
321#endif /* CONFIG_PCI_IOV */ 284#endif /* CONFIG_PCI_IOV */
322 285
323static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, 286static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 7a65db400253..1e9e5a5b8c81 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
579 } 579 }
580 size0 = calculate_iosize(size, min_size, size1, 580 size0 = calculate_iosize(size, min_size, size1,
581 resource_size(b_res), 4096); 581 resource_size(b_res), 4096);
582 size1 = !add_size? size0: 582 size1 = (!add_head || (add_head && !add_size)) ? size0 :
583 calculate_iosize(size, min_size+add_size, size1, 583 calculate_iosize(size, min_size+add_size, size1,
584 resource_size(b_res), 4096); 584 resource_size(b_res), 4096);
585 if (!size0 && !size1) { 585 if (!size0 && !size1) {
@@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
677 align += aligns[order]; 677 align += aligns[order];
678 } 678 }
679 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); 679 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
680 size1 = !add_size ? size : 680 size1 = (!add_head || (add_head && !add_size)) ? size0 :
681 calculate_memsize(size, min_size+add_size, 0, 681 calculate_memsize(size, min_size+add_size, 0,
682 resource_size(b_res), min_align); 682 resource_size(b_res), min_align);
683 if (!size0 && !size1) { 683 if (!size0 && !size1) {