diff options
-rw-r--r-- | arch/sparc/kernel/chmc.c | 1 | ||||
-rw-r--r-- | drivers/md/raid1.c | 3 | ||||
-rw-r--r-- | drivers/md/raid10.c | 19 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 8 | ||||
-rw-r--r-- | drivers/pci/dmar.c | 73 | ||||
-rw-r--r-- | drivers/pci/intr_remapping.c | 21 | ||||
-rw-r--r-- | include/linux/intel-iommu.h | 3 |
7 files changed, 87 insertions, 41 deletions
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index 3b9f4d6e14a9..e1a9598e2a4d 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c | |||
@@ -306,6 +306,7 @@ static int jbusmc_print_dimm(int syndrome_code, | |||
306 | buf[1] = '?'; | 306 | buf[1] = '?'; |
307 | buf[2] = '?'; | 307 | buf[2] = '?'; |
308 | buf[3] = '\0'; | 308 | buf[3] = '\0'; |
309 | return 0; | ||
309 | } | 310 | } |
310 | p = dp->controller; | 311 | p = dp->controller; |
311 | prop = &p->layout; | 312 | prop = &p->layout; |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 01e3cffd03b8..e2466425d9ca 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1237,8 +1237,9 @@ static void end_sync_write(struct bio *bio, int error) | |||
1237 | update_head_pos(mirror, r1_bio); | 1237 | update_head_pos(mirror, r1_bio); |
1238 | 1238 | ||
1239 | if (atomic_dec_and_test(&r1_bio->remaining)) { | 1239 | if (atomic_dec_and_test(&r1_bio->remaining)) { |
1240 | md_done_sync(mddev, r1_bio->sectors, uptodate); | 1240 | sector_t s = r1_bio->sectors; |
1241 | put_buf(r1_bio); | 1241 | put_buf(r1_bio); |
1242 | md_done_sync(mddev, s, uptodate); | ||
1242 | } | 1243 | } |
1243 | } | 1244 | } |
1244 | 1245 | ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 6736d6dff981..7301631abe04 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1236,6 +1236,7 @@ static void end_sync_read(struct bio *bio, int error) | |||
1236 | /* for reconstruct, we always reschedule after a read. | 1236 | /* for reconstruct, we always reschedule after a read. |
1237 | * for resync, only after all reads | 1237 | * for resync, only after all reads |
1238 | */ | 1238 | */ |
1239 | rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); | ||
1239 | if (test_bit(R10BIO_IsRecover, &r10_bio->state) || | 1240 | if (test_bit(R10BIO_IsRecover, &r10_bio->state) || |
1240 | atomic_dec_and_test(&r10_bio->remaining)) { | 1241 | atomic_dec_and_test(&r10_bio->remaining)) { |
1241 | /* we have read all the blocks, | 1242 | /* we have read all the blocks, |
@@ -1243,7 +1244,6 @@ static void end_sync_read(struct bio *bio, int error) | |||
1243 | */ | 1244 | */ |
1244 | reschedule_retry(r10_bio); | 1245 | reschedule_retry(r10_bio); |
1245 | } | 1246 | } |
1246 | rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); | ||
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | static void end_sync_write(struct bio *bio, int error) | 1249 | static void end_sync_write(struct bio *bio, int error) |
@@ -1264,11 +1264,13 @@ static void end_sync_write(struct bio *bio, int error) | |||
1264 | 1264 | ||
1265 | update_head_pos(i, r10_bio); | 1265 | update_head_pos(i, r10_bio); |
1266 | 1266 | ||
1267 | rdev_dec_pending(conf->mirrors[d].rdev, mddev); | ||
1267 | while (atomic_dec_and_test(&r10_bio->remaining)) { | 1268 | while (atomic_dec_and_test(&r10_bio->remaining)) { |
1268 | if (r10_bio->master_bio == NULL) { | 1269 | if (r10_bio->master_bio == NULL) { |
1269 | /* the primary of several recovery bios */ | 1270 | /* the primary of several recovery bios */ |
1270 | md_done_sync(mddev, r10_bio->sectors, 1); | 1271 | sector_t s = r10_bio->sectors; |
1271 | put_buf(r10_bio); | 1272 | put_buf(r10_bio); |
1273 | md_done_sync(mddev, s, 1); | ||
1272 | break; | 1274 | break; |
1273 | } else { | 1275 | } else { |
1274 | r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; | 1276 | r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; |
@@ -1276,7 +1278,6 @@ static void end_sync_write(struct bio *bio, int error) | |||
1276 | r10_bio = r10_bio2; | 1278 | r10_bio = r10_bio2; |
1277 | } | 1279 | } |
1278 | } | 1280 | } |
1279 | rdev_dec_pending(conf->mirrors[d].rdev, mddev); | ||
1280 | } | 1281 | } |
1281 | 1282 | ||
1282 | /* | 1283 | /* |
@@ -1749,8 +1750,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1749 | if (!go_faster && conf->nr_waiting) | 1750 | if (!go_faster && conf->nr_waiting) |
1750 | msleep_interruptible(1000); | 1751 | msleep_interruptible(1000); |
1751 | 1752 | ||
1752 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | ||
1753 | |||
1754 | /* Again, very different code for resync and recovery. | 1753 | /* Again, very different code for resync and recovery. |
1755 | * Both must result in an r10bio with a list of bios that | 1754 | * Both must result in an r10bio with a list of bios that |
1756 | * have bi_end_io, bi_sector, bi_bdev set, | 1755 | * have bi_end_io, bi_sector, bi_bdev set, |
@@ -1886,6 +1885,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1886 | /* resync. Schedule a read for every block at this virt offset */ | 1885 | /* resync. Schedule a read for every block at this virt offset */ |
1887 | int count = 0; | 1886 | int count = 0; |
1888 | 1887 | ||
1888 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | ||
1889 | |||
1889 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, | 1890 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, |
1890 | &sync_blocks, mddev->degraded) && | 1891 | &sync_blocks, mddev->degraded) && |
1891 | !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | 1892 | !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { |
@@ -2010,13 +2011,13 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
2010 | /* There is nowhere to write, so all non-sync | 2011 | /* There is nowhere to write, so all non-sync |
2011 | * drives must be failed, so try the next chunk... | 2012 | * drives must be failed, so try the next chunk... |
2012 | */ | 2013 | */ |
2013 | { | 2014 | if (sector_nr + max_sync < max_sector) |
2014 | sector_t sec = max_sector - sector_nr; | 2015 | max_sector = sector_nr + max_sync; |
2015 | sectors_skipped += sec; | 2016 | |
2017 | sectors_skipped += (max_sector - sector_nr); | ||
2016 | chunks_skipped ++; | 2018 | chunks_skipped ++; |
2017 | sector_nr = max_sector; | 2019 | sector_nr = max_sector; |
2018 | goto skipped; | 2020 | goto skipped; |
2019 | } | ||
2020 | } | 2021 | } |
2021 | 2022 | ||
2022 | static int run(mddev_t *mddev) | 2023 | static int run(mddev_t *mddev) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index b0ee86c62685..ab13ff22a8c0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -148,7 +148,7 @@ static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
148 | pci_unmap_single(dev, | 148 | pci_unmap_single(dev, |
149 | pci_unmap_addr(&txq->cmd[index]->meta, mapping), | 149 | pci_unmap_addr(&txq->cmd[index]->meta, mapping), |
150 | pci_unmap_len(&txq->cmd[index]->meta, len), | 150 | pci_unmap_len(&txq->cmd[index]->meta, len), |
151 | PCI_DMA_TODEVICE); | 151 | PCI_DMA_BIDIRECTIONAL); |
152 | 152 | ||
153 | /* Unmap chunks, if any. */ | 153 | /* Unmap chunks, if any. */ |
154 | for (i = 1; i < num_tbs; i++) { | 154 | for (i = 1; i < num_tbs; i++) { |
@@ -964,7 +964,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
964 | * within command buffer array. */ | 964 | * within command buffer array. */ |
965 | txcmd_phys = pci_map_single(priv->pci_dev, | 965 | txcmd_phys = pci_map_single(priv->pci_dev, |
966 | out_cmd, sizeof(struct iwl_cmd), | 966 | out_cmd, sizeof(struct iwl_cmd), |
967 | PCI_DMA_TODEVICE); | 967 | PCI_DMA_BIDIRECTIONAL); |
968 | pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); | 968 | pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); |
969 | pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); | 969 | pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); |
970 | /* Add buffer containing Tx command and MAC(!) header to TFD's | 970 | /* Add buffer containing Tx command and MAC(!) header to TFD's |
@@ -1115,7 +1115,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1115 | IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); | 1115 | IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); |
1116 | 1116 | ||
1117 | phys_addr = pci_map_single(priv->pci_dev, out_cmd, | 1117 | phys_addr = pci_map_single(priv->pci_dev, out_cmd, |
1118 | len, PCI_DMA_TODEVICE); | 1118 | len, PCI_DMA_BIDIRECTIONAL); |
1119 | pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr); | 1119 | pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr); |
1120 | pci_unmap_len_set(&out_cmd->meta, len, len); | 1120 | pci_unmap_len_set(&out_cmd->meta, len, len); |
1121 | phys_addr += offsetof(struct iwl_cmd, hdr); | 1121 | phys_addr += offsetof(struct iwl_cmd, hdr); |
@@ -1212,7 +1212,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, | |||
1212 | pci_unmap_single(priv->pci_dev, | 1212 | pci_unmap_single(priv->pci_dev, |
1213 | pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping), | 1213 | pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping), |
1214 | pci_unmap_len(&txq->cmd[cmd_idx]->meta, len), | 1214 | pci_unmap_len(&txq->cmd[cmd_idx]->meta, len), |
1215 | PCI_DMA_TODEVICE); | 1215 | PCI_DMA_BIDIRECTIONAL); |
1216 | 1216 | ||
1217 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | 1217 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; |
1218 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1218 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index f5a662a50acb..26c536b51c5a 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -330,6 +330,14 @@ parse_dmar_table(void) | |||
330 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | 330 | entry_header = (struct acpi_dmar_header *)(dmar + 1); |
331 | while (((unsigned long)entry_header) < | 331 | while (((unsigned long)entry_header) < |
332 | (((unsigned long)dmar) + dmar_tbl->length)) { | 332 | (((unsigned long)dmar) + dmar_tbl->length)) { |
333 | /* Avoid looping forever on bad ACPI tables */ | ||
334 | if (entry_header->length == 0) { | ||
335 | printk(KERN_WARNING PREFIX | ||
336 | "Invalid 0-length structure\n"); | ||
337 | ret = -EINVAL; | ||
338 | break; | ||
339 | } | ||
340 | |||
333 | dmar_table_print_dmar_entry(entry_header); | 341 | dmar_table_print_dmar_entry(entry_header); |
334 | 342 | ||
335 | switch (entry_header->type) { | 343 | switch (entry_header->type) { |
@@ -491,7 +499,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
491 | int map_size; | 499 | int map_size; |
492 | u32 ver; | 500 | u32 ver; |
493 | static int iommu_allocated = 0; | 501 | static int iommu_allocated = 0; |
494 | int agaw; | 502 | int agaw = 0; |
495 | 503 | ||
496 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | 504 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
497 | if (!iommu) | 505 | if (!iommu) |
@@ -507,6 +515,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
507 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | 515 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); |
508 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 516 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
509 | 517 | ||
518 | #ifdef CONFIG_DMAR | ||
510 | agaw = iommu_calculate_agaw(iommu); | 519 | agaw = iommu_calculate_agaw(iommu); |
511 | if (agaw < 0) { | 520 | if (agaw < 0) { |
512 | printk(KERN_ERR | 521 | printk(KERN_ERR |
@@ -514,6 +523,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
514 | iommu->seq_id); | 523 | iommu->seq_id); |
515 | goto error; | 524 | goto error; |
516 | } | 525 | } |
526 | #endif | ||
517 | iommu->agaw = agaw; | 527 | iommu->agaw = agaw; |
518 | 528 | ||
519 | /* the registers might be more than one page */ | 529 | /* the registers might be more than one page */ |
@@ -571,19 +581,49 @@ static inline void reclaim_free_desc(struct q_inval *qi) | |||
571 | } | 581 | } |
572 | } | 582 | } |
573 | 583 | ||
584 | static int qi_check_fault(struct intel_iommu *iommu, int index) | ||
585 | { | ||
586 | u32 fault; | ||
587 | int head; | ||
588 | struct q_inval *qi = iommu->qi; | ||
589 | int wait_index = (index + 1) % QI_LENGTH; | ||
590 | |||
591 | fault = readl(iommu->reg + DMAR_FSTS_REG); | ||
592 | |||
593 | /* | ||
594 | * If IQE happens, the head points to the descriptor associated | ||
595 | * with the error. No new descriptors are fetched until the IQE | ||
596 | * is cleared. | ||
597 | */ | ||
598 | if (fault & DMA_FSTS_IQE) { | ||
599 | head = readl(iommu->reg + DMAR_IQH_REG); | ||
600 | if ((head >> 4) == index) { | ||
601 | memcpy(&qi->desc[index], &qi->desc[wait_index], | ||
602 | sizeof(struct qi_desc)); | ||
603 | __iommu_flush_cache(iommu, &qi->desc[index], | ||
604 | sizeof(struct qi_desc)); | ||
605 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); | ||
606 | return -EINVAL; | ||
607 | } | ||
608 | } | ||
609 | |||
610 | return 0; | ||
611 | } | ||
612 | |||
574 | /* | 613 | /* |
575 | * Submit the queued invalidation descriptor to the remapping | 614 | * Submit the queued invalidation descriptor to the remapping |
576 | * hardware unit and wait for its completion. | 615 | * hardware unit and wait for its completion. |
577 | */ | 616 | */ |
578 | void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | 617 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
579 | { | 618 | { |
619 | int rc = 0; | ||
580 | struct q_inval *qi = iommu->qi; | 620 | struct q_inval *qi = iommu->qi; |
581 | struct qi_desc *hw, wait_desc; | 621 | struct qi_desc *hw, wait_desc; |
582 | int wait_index, index; | 622 | int wait_index, index; |
583 | unsigned long flags; | 623 | unsigned long flags; |
584 | 624 | ||
585 | if (!qi) | 625 | if (!qi) |
586 | return; | 626 | return 0; |
587 | 627 | ||
588 | hw = qi->desc; | 628 | hw = qi->desc; |
589 | 629 | ||
@@ -601,7 +641,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
601 | 641 | ||
602 | hw[index] = *desc; | 642 | hw[index] = *desc; |
603 | 643 | ||
604 | wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | 644 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
645 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | ||
605 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); | 646 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
606 | 647 | ||
607 | hw[wait_index] = wait_desc; | 648 | hw[wait_index] = wait_desc; |
@@ -612,13 +653,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
612 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | 653 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
613 | qi->free_cnt -= 2; | 654 | qi->free_cnt -= 2; |
614 | 655 | ||
615 | spin_lock(&iommu->register_lock); | ||
616 | /* | 656 | /* |
617 | * update the HW tail register indicating the presence of | 657 | * update the HW tail register indicating the presence of |
618 | * new descriptors. | 658 | * new descriptors. |
619 | */ | 659 | */ |
620 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | 660 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); |
621 | spin_unlock(&iommu->register_lock); | ||
622 | 661 | ||
623 | while (qi->desc_status[wait_index] != QI_DONE) { | 662 | while (qi->desc_status[wait_index] != QI_DONE) { |
624 | /* | 663 | /* |
@@ -628,15 +667,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
628 | * a deadlock where the interrupt context can wait indefinitely | 667 | * a deadlock where the interrupt context can wait indefinitely |
629 | * for free slots in the queue. | 668 | * for free slots in the queue. |
630 | */ | 669 | */ |
670 | rc = qi_check_fault(iommu, index); | ||
671 | if (rc) | ||
672 | goto out; | ||
673 | |||
631 | spin_unlock(&qi->q_lock); | 674 | spin_unlock(&qi->q_lock); |
632 | cpu_relax(); | 675 | cpu_relax(); |
633 | spin_lock(&qi->q_lock); | 676 | spin_lock(&qi->q_lock); |
634 | } | 677 | } |
635 | 678 | out: | |
636 | qi->desc_status[index] = QI_DONE; | 679 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; |
637 | 680 | ||
638 | reclaim_free_desc(qi); | 681 | reclaim_free_desc(qi); |
639 | spin_unlock_irqrestore(&qi->q_lock, flags); | 682 | spin_unlock_irqrestore(&qi->q_lock, flags); |
683 | |||
684 | return rc; | ||
640 | } | 685 | } |
641 | 686 | ||
642 | /* | 687 | /* |
@@ -649,13 +694,13 @@ void qi_global_iec(struct intel_iommu *iommu) | |||
649 | desc.low = QI_IEC_TYPE; | 694 | desc.low = QI_IEC_TYPE; |
650 | desc.high = 0; | 695 | desc.high = 0; |
651 | 696 | ||
697 | /* should never fail */ | ||
652 | qi_submit_sync(&desc, iommu); | 698 | qi_submit_sync(&desc, iommu); |
653 | } | 699 | } |
654 | 700 | ||
655 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | 701 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
656 | u64 type, int non_present_entry_flush) | 702 | u64 type, int non_present_entry_flush) |
657 | { | 703 | { |
658 | |||
659 | struct qi_desc desc; | 704 | struct qi_desc desc; |
660 | 705 | ||
661 | if (non_present_entry_flush) { | 706 | if (non_present_entry_flush) { |
@@ -669,10 +714,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | |||
669 | | QI_CC_GRAN(type) | QI_CC_TYPE; | 714 | | QI_CC_GRAN(type) | QI_CC_TYPE; |
670 | desc.high = 0; | 715 | desc.high = 0; |
671 | 716 | ||
672 | qi_submit_sync(&desc, iommu); | 717 | return qi_submit_sync(&desc, iommu); |
673 | |||
674 | return 0; | ||
675 | |||
676 | } | 718 | } |
677 | 719 | ||
678 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 720 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
@@ -702,10 +744,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |||
702 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | 744 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) |
703 | | QI_IOTLB_AM(size_order); | 745 | | QI_IOTLB_AM(size_order); |
704 | 746 | ||
705 | qi_submit_sync(&desc, iommu); | 747 | return qi_submit_sync(&desc, iommu); |
706 | |||
707 | return 0; | ||
708 | |||
709 | } | 748 | } |
710 | 749 | ||
711 | /* | 750 | /* |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index f78371b22529..45effc5726c0 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -207,7 +207,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
207 | return index; | 207 | return index; |
208 | } | 208 | } |
209 | 209 | ||
210 | static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | 210 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) |
211 | { | 211 | { |
212 | struct qi_desc desc; | 212 | struct qi_desc desc; |
213 | 213 | ||
@@ -215,7 +215,7 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | |||
215 | | QI_IEC_SELECTIVE; | 215 | | QI_IEC_SELECTIVE; |
216 | desc.high = 0; | 216 | desc.high = 0; |
217 | 217 | ||
218 | qi_submit_sync(&desc, iommu); | 218 | return qi_submit_sync(&desc, iommu); |
219 | } | 219 | } |
220 | 220 | ||
221 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | 221 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
@@ -283,6 +283,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | |||
283 | 283 | ||
284 | int modify_irte(int irq, struct irte *irte_modified) | 284 | int modify_irte(int irq, struct irte *irte_modified) |
285 | { | 285 | { |
286 | int rc; | ||
286 | int index; | 287 | int index; |
287 | struct irte *irte; | 288 | struct irte *irte; |
288 | struct intel_iommu *iommu; | 289 | struct intel_iommu *iommu; |
@@ -303,14 +304,15 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
303 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); | 304 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); |
304 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 305 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
305 | 306 | ||
306 | qi_flush_iec(iommu, index, 0); | 307 | rc = qi_flush_iec(iommu, index, 0); |
307 | |||
308 | spin_unlock(&irq_2_ir_lock); | 308 | spin_unlock(&irq_2_ir_lock); |
309 | return 0; | 309 | |
310 | return rc; | ||
310 | } | 311 | } |
311 | 312 | ||
312 | int flush_irte(int irq) | 313 | int flush_irte(int irq) |
313 | { | 314 | { |
315 | int rc; | ||
314 | int index; | 316 | int index; |
315 | struct intel_iommu *iommu; | 317 | struct intel_iommu *iommu; |
316 | struct irq_2_iommu *irq_iommu; | 318 | struct irq_2_iommu *irq_iommu; |
@@ -326,10 +328,10 @@ int flush_irte(int irq) | |||
326 | 328 | ||
327 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 329 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
328 | 330 | ||
329 | qi_flush_iec(iommu, index, irq_iommu->irte_mask); | 331 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
330 | spin_unlock(&irq_2_ir_lock); | 332 | spin_unlock(&irq_2_ir_lock); |
331 | 333 | ||
332 | return 0; | 334 | return rc; |
333 | } | 335 | } |
334 | 336 | ||
335 | struct intel_iommu *map_ioapic_to_ir(int apic) | 337 | struct intel_iommu *map_ioapic_to_ir(int apic) |
@@ -355,6 +357,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) | |||
355 | 357 | ||
356 | int free_irte(int irq) | 358 | int free_irte(int irq) |
357 | { | 359 | { |
360 | int rc = 0; | ||
358 | int index, i; | 361 | int index, i; |
359 | struct irte *irte; | 362 | struct irte *irte; |
360 | struct intel_iommu *iommu; | 363 | struct intel_iommu *iommu; |
@@ -375,7 +378,7 @@ int free_irte(int irq) | |||
375 | if (!irq_iommu->sub_handle) { | 378 | if (!irq_iommu->sub_handle) { |
376 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) | 379 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) |
377 | set_64bit((unsigned long *)irte, 0); | 380 | set_64bit((unsigned long *)irte, 0); |
378 | qi_flush_iec(iommu, index, irq_iommu->irte_mask); | 381 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
379 | } | 382 | } |
380 | 383 | ||
381 | irq_iommu->iommu = NULL; | 384 | irq_iommu->iommu = NULL; |
@@ -385,7 +388,7 @@ int free_irte(int irq) | |||
385 | 388 | ||
386 | spin_unlock(&irq_2_ir_lock); | 389 | spin_unlock(&irq_2_ir_lock); |
387 | 390 | ||
388 | return 0; | 391 | return rc; |
389 | } | 392 | } |
390 | 393 | ||
391 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | 394 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index c4f6c101dbcd..d2e3cbfba14f 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -194,6 +194,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
194 | /* FSTS_REG */ | 194 | /* FSTS_REG */ |
195 | #define DMA_FSTS_PPF ((u32)2) | 195 | #define DMA_FSTS_PPF ((u32)2) |
196 | #define DMA_FSTS_PFO ((u32)1) | 196 | #define DMA_FSTS_PFO ((u32)1) |
197 | #define DMA_FSTS_IQE (1 << 4) | ||
197 | #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) | 198 | #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) |
198 | 199 | ||
199 | /* FRCD_REG, 32 bits access */ | 200 | /* FRCD_REG, 32 bits access */ |
@@ -328,7 +329,7 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |||
328 | unsigned int size_order, u64 type, | 329 | unsigned int size_order, u64 type, |
329 | int non_present_entry_flush); | 330 | int non_present_entry_flush); |
330 | 331 | ||
331 | extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | 332 | extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); |
332 | 333 | ||
333 | extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | 334 | extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); |
334 | extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); | 335 | extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); |