diff options
Diffstat (limited to 'drivers/iommu/dmar.c')
| -rw-r--r-- | drivers/iommu/dmar.c | 248 |
1 files changed, 49 insertions, 199 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 3dc9befa5aec..35c1e17fce1d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | */ | 46 | */ |
| 47 | LIST_HEAD(dmar_drhd_units); | 47 | LIST_HEAD(dmar_drhd_units); |
| 48 | 48 | ||
| 49 | static struct acpi_table_header * __initdata dmar_tbl; | 49 | struct acpi_table_header * __initdata dmar_tbl; |
| 50 | static acpi_size dmar_tbl_size; | 50 | static acpi_size dmar_tbl_size; |
| 51 | 51 | ||
| 52 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | 52 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
| @@ -118,8 +118,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, | |||
| 118 | return 0; | 118 | return 0; |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, | 121 | int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, |
| 122 | struct pci_dev ***devices, u16 segment) | 122 | struct pci_dev ***devices, u16 segment) |
| 123 | { | 123 | { |
| 124 | struct acpi_dmar_device_scope *scope; | 124 | struct acpi_dmar_device_scope *scope; |
| 125 | void * tmp = start; | 125 | void * tmp = start; |
| @@ -217,133 +217,6 @@ static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) | |||
| 217 | return ret; | 217 | return ret; |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | #ifdef CONFIG_DMAR | ||
| 221 | LIST_HEAD(dmar_rmrr_units); | ||
| 222 | |||
| 223 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | ||
| 224 | { | ||
| 225 | list_add(&rmrr->list, &dmar_rmrr_units); | ||
| 226 | } | ||
| 227 | |||
| 228 | |||
| 229 | static int __init | ||
| 230 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) | ||
| 231 | { | ||
| 232 | struct acpi_dmar_reserved_memory *rmrr; | ||
| 233 | struct dmar_rmrr_unit *rmrru; | ||
| 234 | |||
| 235 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | ||
| 236 | if (!rmrru) | ||
| 237 | return -ENOMEM; | ||
| 238 | |||
| 239 | rmrru->hdr = header; | ||
| 240 | rmrr = (struct acpi_dmar_reserved_memory *)header; | ||
| 241 | rmrru->base_address = rmrr->base_address; | ||
| 242 | rmrru->end_address = rmrr->end_address; | ||
| 243 | |||
| 244 | dmar_register_rmrr_unit(rmrru); | ||
| 245 | return 0; | ||
| 246 | } | ||
| 247 | |||
| 248 | static int __init | ||
| 249 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | ||
| 250 | { | ||
| 251 | struct acpi_dmar_reserved_memory *rmrr; | ||
| 252 | int ret; | ||
| 253 | |||
| 254 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | ||
| 255 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), | ||
| 256 | ((void *)rmrr) + rmrr->header.length, | ||
| 257 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); | ||
| 258 | |||
| 259 | if (ret || (rmrru->devices_cnt == 0)) { | ||
| 260 | list_del(&rmrru->list); | ||
| 261 | kfree(rmrru); | ||
| 262 | } | ||
| 263 | return ret; | ||
| 264 | } | ||
| 265 | |||
| 266 | static LIST_HEAD(dmar_atsr_units); | ||
| 267 | |||
| 268 | static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | ||
| 269 | { | ||
| 270 | struct acpi_dmar_atsr *atsr; | ||
| 271 | struct dmar_atsr_unit *atsru; | ||
| 272 | |||
| 273 | atsr = container_of(hdr, struct acpi_dmar_atsr, header); | ||
| 274 | atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); | ||
| 275 | if (!atsru) | ||
| 276 | return -ENOMEM; | ||
| 277 | |||
| 278 | atsru->hdr = hdr; | ||
| 279 | atsru->include_all = atsr->flags & 0x1; | ||
| 280 | |||
| 281 | list_add(&atsru->list, &dmar_atsr_units); | ||
| 282 | |||
| 283 | return 0; | ||
| 284 | } | ||
| 285 | |||
| 286 | static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) | ||
| 287 | { | ||
| 288 | int rc; | ||
| 289 | struct acpi_dmar_atsr *atsr; | ||
| 290 | |||
| 291 | if (atsru->include_all) | ||
| 292 | return 0; | ||
| 293 | |||
| 294 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
| 295 | rc = dmar_parse_dev_scope((void *)(atsr + 1), | ||
| 296 | (void *)atsr + atsr->header.length, | ||
| 297 | &atsru->devices_cnt, &atsru->devices, | ||
| 298 | atsr->segment); | ||
| 299 | if (rc || !atsru->devices_cnt) { | ||
| 300 | list_del(&atsru->list); | ||
| 301 | kfree(atsru); | ||
| 302 | } | ||
| 303 | |||
| 304 | return rc; | ||
| 305 | } | ||
| 306 | |||
| 307 | int dmar_find_matched_atsr_unit(struct pci_dev *dev) | ||
| 308 | { | ||
| 309 | int i; | ||
| 310 | struct pci_bus *bus; | ||
| 311 | struct acpi_dmar_atsr *atsr; | ||
| 312 | struct dmar_atsr_unit *atsru; | ||
| 313 | |||
| 314 | dev = pci_physfn(dev); | ||
| 315 | |||
| 316 | list_for_each_entry(atsru, &dmar_atsr_units, list) { | ||
| 317 | atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | ||
| 318 | if (atsr->segment == pci_domain_nr(dev->bus)) | ||
| 319 | goto found; | ||
| 320 | } | ||
| 321 | |||
| 322 | return 0; | ||
| 323 | |||
| 324 | found: | ||
| 325 | for (bus = dev->bus; bus; bus = bus->parent) { | ||
| 326 | struct pci_dev *bridge = bus->self; | ||
| 327 | |||
| 328 | if (!bridge || !pci_is_pcie(bridge) || | ||
| 329 | bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | ||
| 330 | return 0; | ||
| 331 | |||
| 332 | if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { | ||
| 333 | for (i = 0; i < atsru->devices_cnt; i++) | ||
| 334 | if (atsru->devices[i] == bridge) | ||
| 335 | return 1; | ||
| 336 | break; | ||
| 337 | } | ||
| 338 | } | ||
| 339 | |||
| 340 | if (atsru->include_all) | ||
| 341 | return 1; | ||
| 342 | |||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | #endif | ||
| 346 | |||
| 347 | #ifdef CONFIG_ACPI_NUMA | 220 | #ifdef CONFIG_ACPI_NUMA |
| 348 | static int __init | 221 | static int __init |
| 349 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) | 222 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) |
| @@ -484,14 +357,10 @@ parse_dmar_table(void) | |||
| 484 | ret = dmar_parse_one_drhd(entry_header); | 357 | ret = dmar_parse_one_drhd(entry_header); |
| 485 | break; | 358 | break; |
| 486 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 359 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
| 487 | #ifdef CONFIG_DMAR | ||
| 488 | ret = dmar_parse_one_rmrr(entry_header); | 360 | ret = dmar_parse_one_rmrr(entry_header); |
| 489 | #endif | ||
| 490 | break; | 361 | break; |
| 491 | case ACPI_DMAR_TYPE_ATSR: | 362 | case ACPI_DMAR_TYPE_ATSR: |
| 492 | #ifdef CONFIG_DMAR | ||
| 493 | ret = dmar_parse_one_atsr(entry_header); | 363 | ret = dmar_parse_one_atsr(entry_header); |
| 494 | #endif | ||
| 495 | break; | 364 | break; |
| 496 | case ACPI_DMAR_HARDWARE_AFFINITY: | 365 | case ACPI_DMAR_HARDWARE_AFFINITY: |
| 497 | #ifdef CONFIG_ACPI_NUMA | 366 | #ifdef CONFIG_ACPI_NUMA |
| @@ -557,34 +426,31 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) | |||
| 557 | 426 | ||
| 558 | int __init dmar_dev_scope_init(void) | 427 | int __init dmar_dev_scope_init(void) |
| 559 | { | 428 | { |
| 429 | static int dmar_dev_scope_initialized; | ||
| 560 | struct dmar_drhd_unit *drhd, *drhd_n; | 430 | struct dmar_drhd_unit *drhd, *drhd_n; |
| 561 | int ret = -ENODEV; | 431 | int ret = -ENODEV; |
| 562 | 432 | ||
| 433 | if (dmar_dev_scope_initialized) | ||
| 434 | return dmar_dev_scope_initialized; | ||
| 435 | |||
| 436 | if (list_empty(&dmar_drhd_units)) | ||
| 437 | goto fail; | ||
| 438 | |||
| 563 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { | 439 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { |
| 564 | ret = dmar_parse_dev(drhd); | 440 | ret = dmar_parse_dev(drhd); |
| 565 | if (ret) | 441 | if (ret) |
| 566 | return ret; | 442 | goto fail; |
| 567 | } | 443 | } |
| 568 | 444 | ||
| 569 | #ifdef CONFIG_DMAR | 445 | ret = dmar_parse_rmrr_atsr_dev(); |
| 570 | { | 446 | if (ret) |
| 571 | struct dmar_rmrr_unit *rmrr, *rmrr_n; | 447 | goto fail; |
| 572 | struct dmar_atsr_unit *atsr, *atsr_n; | ||
| 573 | |||
| 574 | list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { | ||
| 575 | ret = rmrr_parse_dev(rmrr); | ||
| 576 | if (ret) | ||
| 577 | return ret; | ||
| 578 | } | ||
| 579 | 448 | ||
| 580 | list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) { | 449 | dmar_dev_scope_initialized = 1; |
| 581 | ret = atsr_parse_dev(atsr); | 450 | return 0; |
| 582 | if (ret) | ||
| 583 | return ret; | ||
| 584 | } | ||
| 585 | } | ||
| 586 | #endif | ||
| 587 | 451 | ||
| 452 | fail: | ||
| 453 | dmar_dev_scope_initialized = ret; | ||
| 588 | return ret; | 454 | return ret; |
| 589 | } | 455 | } |
| 590 | 456 | ||
| @@ -611,14 +477,6 @@ int __init dmar_table_init(void) | |||
| 611 | return -ENODEV; | 477 | return -ENODEV; |
| 612 | } | 478 | } |
| 613 | 479 | ||
| 614 | #ifdef CONFIG_DMAR | ||
| 615 | if (list_empty(&dmar_rmrr_units)) | ||
| 616 | printk(KERN_INFO PREFIX "No RMRR found\n"); | ||
| 617 | |||
| 618 | if (list_empty(&dmar_atsr_units)) | ||
| 619 | printk(KERN_INFO PREFIX "No ATSR found\n"); | ||
| 620 | #endif | ||
| 621 | |||
| 622 | return 0; | 480 | return 0; |
| 623 | } | 481 | } |
| 624 | 482 | ||
| @@ -682,9 +540,6 @@ int __init check_zero_address(void) | |||
| 682 | return 1; | 540 | return 1; |
| 683 | 541 | ||
| 684 | failed: | 542 | failed: |
| 685 | #ifdef CONFIG_DMAR | ||
| 686 | dmar_disabled = 1; | ||
| 687 | #endif | ||
| 688 | return 0; | 543 | return 0; |
| 689 | } | 544 | } |
| 690 | 545 | ||
| @@ -696,22 +551,21 @@ int __init detect_intel_iommu(void) | |||
| 696 | if (ret) | 551 | if (ret) |
| 697 | ret = check_zero_address(); | 552 | ret = check_zero_address(); |
| 698 | { | 553 | { |
| 699 | #ifdef CONFIG_INTR_REMAP | ||
| 700 | struct acpi_table_dmar *dmar; | 554 | struct acpi_table_dmar *dmar; |
| 701 | 555 | ||
| 702 | dmar = (struct acpi_table_dmar *) dmar_tbl; | 556 | dmar = (struct acpi_table_dmar *) dmar_tbl; |
| 703 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) | 557 | |
| 558 | if (ret && intr_remapping_enabled && cpu_has_x2apic && | ||
| 559 | dmar->flags & 0x1) | ||
| 704 | printk(KERN_INFO | 560 | printk(KERN_INFO |
| 705 | "Queued invalidation will be enabled to support " | 561 | "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); |
| 706 | "x2apic and Intr-remapping.\n"); | 562 | |
| 707 | #endif | ||
| 708 | #ifdef CONFIG_DMAR | ||
| 709 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { | 563 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { |
| 710 | iommu_detected = 1; | 564 | iommu_detected = 1; |
| 711 | /* Make sure ACS will be enabled */ | 565 | /* Make sure ACS will be enabled */ |
| 712 | pci_request_acs(); | 566 | pci_request_acs(); |
| 713 | } | 567 | } |
| 714 | #endif | 568 | |
| 715 | #ifdef CONFIG_X86 | 569 | #ifdef CONFIG_X86 |
| 716 | if (ret) | 570 | if (ret) |
| 717 | x86_init.iommu.iommu_init = intel_iommu_init; | 571 | x86_init.iommu.iommu_init = intel_iommu_init; |
| @@ -758,7 +612,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 758 | goto err_unmap; | 612 | goto err_unmap; |
| 759 | } | 613 | } |
| 760 | 614 | ||
| 761 | #ifdef CONFIG_DMAR | ||
| 762 | agaw = iommu_calculate_agaw(iommu); | 615 | agaw = iommu_calculate_agaw(iommu); |
| 763 | if (agaw < 0) { | 616 | if (agaw < 0) { |
| 764 | printk(KERN_ERR | 617 | printk(KERN_ERR |
| @@ -773,7 +626,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 773 | iommu->seq_id); | 626 | iommu->seq_id); |
| 774 | goto err_unmap; | 627 | goto err_unmap; |
| 775 | } | 628 | } |
| 776 | #endif | ||
| 777 | iommu->agaw = agaw; | 629 | iommu->agaw = agaw; |
| 778 | iommu->msagaw = msagaw; | 630 | iommu->msagaw = msagaw; |
| 779 | 631 | ||
| @@ -800,7 +652,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 800 | (unsigned long long)iommu->cap, | 652 | (unsigned long long)iommu->cap, |
| 801 | (unsigned long long)iommu->ecap); | 653 | (unsigned long long)iommu->ecap); |
| 802 | 654 | ||
| 803 | spin_lock_init(&iommu->register_lock); | 655 | raw_spin_lock_init(&iommu->register_lock); |
| 804 | 656 | ||
| 805 | drhd->iommu = iommu; | 657 | drhd->iommu = iommu; |
| 806 | return 0; | 658 | return 0; |
| @@ -817,9 +669,7 @@ void free_iommu(struct intel_iommu *iommu) | |||
| 817 | if (!iommu) | 669 | if (!iommu) |
| 818 | return; | 670 | return; |
| 819 | 671 | ||
| 820 | #ifdef CONFIG_DMAR | ||
| 821 | free_dmar_iommu(iommu); | 672 | free_dmar_iommu(iommu); |
| 822 | #endif | ||
| 823 | 673 | ||
| 824 | if (iommu->reg) | 674 | if (iommu->reg) |
| 825 | iounmap(iommu->reg); | 675 | iounmap(iommu->reg); |
| @@ -921,11 +771,11 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
| 921 | restart: | 771 | restart: |
| 922 | rc = 0; | 772 | rc = 0; |
| 923 | 773 | ||
| 924 | spin_lock_irqsave(&qi->q_lock, flags); | 774 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
| 925 | while (qi->free_cnt < 3) { | 775 | while (qi->free_cnt < 3) { |
| 926 | spin_unlock_irqrestore(&qi->q_lock, flags); | 776 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
| 927 | cpu_relax(); | 777 | cpu_relax(); |
| 928 | spin_lock_irqsave(&qi->q_lock, flags); | 778 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
| 929 | } | 779 | } |
| 930 | 780 | ||
| 931 | index = qi->free_head; | 781 | index = qi->free_head; |
| @@ -965,15 +815,15 @@ restart: | |||
| 965 | if (rc) | 815 | if (rc) |
| 966 | break; | 816 | break; |
| 967 | 817 | ||
| 968 | spin_unlock(&qi->q_lock); | 818 | raw_spin_unlock(&qi->q_lock); |
| 969 | cpu_relax(); | 819 | cpu_relax(); |
| 970 | spin_lock(&qi->q_lock); | 820 | raw_spin_lock(&qi->q_lock); |
| 971 | } | 821 | } |
| 972 | 822 | ||
| 973 | qi->desc_status[index] = QI_DONE; | 823 | qi->desc_status[index] = QI_DONE; |
| 974 | 824 | ||
| 975 | reclaim_free_desc(qi); | 825 | reclaim_free_desc(qi); |
| 976 | spin_unlock_irqrestore(&qi->q_lock, flags); | 826 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
| 977 | 827 | ||
| 978 | if (rc == -EAGAIN) | 828 | if (rc == -EAGAIN) |
| 979 | goto restart; | 829 | goto restart; |
| @@ -1062,7 +912,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) | |||
| 1062 | if (!ecap_qis(iommu->ecap)) | 912 | if (!ecap_qis(iommu->ecap)) |
| 1063 | return; | 913 | return; |
| 1064 | 914 | ||
| 1065 | spin_lock_irqsave(&iommu->register_lock, flags); | 915 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 1066 | 916 | ||
| 1067 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | 917 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); |
| 1068 | if (!(sts & DMA_GSTS_QIES)) | 918 | if (!(sts & DMA_GSTS_QIES)) |
| @@ -1082,7 +932,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) | |||
| 1082 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, | 932 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, |
| 1083 | !(sts & DMA_GSTS_QIES), sts); | 933 | !(sts & DMA_GSTS_QIES), sts); |
| 1084 | end: | 934 | end: |
| 1085 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 935 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1086 | } | 936 | } |
| 1087 | 937 | ||
| 1088 | /* | 938 | /* |
| @@ -1097,7 +947,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) | |||
| 1097 | qi->free_head = qi->free_tail = 0; | 947 | qi->free_head = qi->free_tail = 0; |
| 1098 | qi->free_cnt = QI_LENGTH; | 948 | qi->free_cnt = QI_LENGTH; |
| 1099 | 949 | ||
| 1100 | spin_lock_irqsave(&iommu->register_lock, flags); | 950 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
| 1101 | 951 | ||
| 1102 | /* write zero to the tail reg */ | 952 | /* write zero to the tail reg */ |
| 1103 | writel(0, iommu->reg + DMAR_IQT_REG); | 953 | writel(0, iommu->reg + DMAR_IQT_REG); |
| @@ -1110,7 +960,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) | |||
| 1110 | /* Make sure hardware complete it */ | 960 | /* Make sure hardware complete it */ |
| 1111 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | 961 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); |
| 1112 | 962 | ||
| 1113 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 963 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1114 | } | 964 | } |
| 1115 | 965 | ||
| 1116 | /* | 966 | /* |
| @@ -1159,7 +1009,7 @@ int dmar_enable_qi(struct intel_iommu *iommu) | |||
| 1159 | qi->free_head = qi->free_tail = 0; | 1009 | qi->free_head = qi->free_tail = 0; |
| 1160 | qi->free_cnt = QI_LENGTH; | 1010 | qi->free_cnt = QI_LENGTH; |
| 1161 | 1011 | ||
| 1162 | spin_lock_init(&qi->q_lock); | 1012 | raw_spin_lock_init(&qi->q_lock); |
| 1163 | 1013 | ||
| 1164 | __dmar_enable_qi(iommu); | 1014 | __dmar_enable_qi(iommu); |
| 1165 | 1015 | ||
| @@ -1225,11 +1075,11 @@ void dmar_msi_unmask(struct irq_data *data) | |||
| 1225 | unsigned long flag; | 1075 | unsigned long flag; |
| 1226 | 1076 | ||
| 1227 | /* unmask it */ | 1077 | /* unmask it */ |
| 1228 | spin_lock_irqsave(&iommu->register_lock, flag); | 1078 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1229 | writel(0, iommu->reg + DMAR_FECTL_REG); | 1079 | writel(0, iommu->reg + DMAR_FECTL_REG); |
| 1230 | /* Read a reg to force flush the post write */ | 1080 | /* Read a reg to force flush the post write */ |
| 1231 | readl(iommu->reg + DMAR_FECTL_REG); | 1081 | readl(iommu->reg + DMAR_FECTL_REG); |
| 1232 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1082 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1233 | } | 1083 | } |
| 1234 | 1084 | ||
| 1235 | void dmar_msi_mask(struct irq_data *data) | 1085 | void dmar_msi_mask(struct irq_data *data) |
| @@ -1238,11 +1088,11 @@ void dmar_msi_mask(struct irq_data *data) | |||
| 1238 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); | 1088 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
| 1239 | 1089 | ||
| 1240 | /* mask it */ | 1090 | /* mask it */ |
| 1241 | spin_lock_irqsave(&iommu->register_lock, flag); | 1091 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1242 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); | 1092 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); |
| 1243 | /* Read a reg to force flush the post write */ | 1093 | /* Read a reg to force flush the post write */ |
| 1244 | readl(iommu->reg + DMAR_FECTL_REG); | 1094 | readl(iommu->reg + DMAR_FECTL_REG); |
| 1245 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1095 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1246 | } | 1096 | } |
| 1247 | 1097 | ||
| 1248 | void dmar_msi_write(int irq, struct msi_msg *msg) | 1098 | void dmar_msi_write(int irq, struct msi_msg *msg) |
| @@ -1250,11 +1100,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg) | |||
| 1250 | struct intel_iommu *iommu = irq_get_handler_data(irq); | 1100 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
| 1251 | unsigned long flag; | 1101 | unsigned long flag; |
| 1252 | 1102 | ||
| 1253 | spin_lock_irqsave(&iommu->register_lock, flag); | 1103 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1254 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); | 1104 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); |
| 1255 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | 1105 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); |
| 1256 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | 1106 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); |
| 1257 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1107 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1258 | } | 1108 | } |
| 1259 | 1109 | ||
| 1260 | void dmar_msi_read(int irq, struct msi_msg *msg) | 1110 | void dmar_msi_read(int irq, struct msi_msg *msg) |
| @@ -1262,11 +1112,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg) | |||
| 1262 | struct intel_iommu *iommu = irq_get_handler_data(irq); | 1112 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
| 1263 | unsigned long flag; | 1113 | unsigned long flag; |
| 1264 | 1114 | ||
| 1265 | spin_lock_irqsave(&iommu->register_lock, flag); | 1115 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1266 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); | 1116 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); |
| 1267 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | 1117 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); |
| 1268 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | 1118 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); |
| 1269 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1119 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1270 | } | 1120 | } |
| 1271 | 1121 | ||
| 1272 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, | 1122 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, |
| @@ -1303,7 +1153,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
| 1303 | u32 fault_status; | 1153 | u32 fault_status; |
| 1304 | unsigned long flag; | 1154 | unsigned long flag; |
| 1305 | 1155 | ||
| 1306 | spin_lock_irqsave(&iommu->register_lock, flag); | 1156 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1307 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | 1157 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
| 1308 | if (fault_status) | 1158 | if (fault_status) |
| 1309 | printk(KERN_ERR "DRHD: handling fault status reg %x\n", | 1159 | printk(KERN_ERR "DRHD: handling fault status reg %x\n", |
| @@ -1342,7 +1192,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
| 1342 | writel(DMA_FRCD_F, iommu->reg + reg + | 1192 | writel(DMA_FRCD_F, iommu->reg + reg + |
| 1343 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | 1193 | fault_index * PRIMARY_FAULT_REG_LEN + 12); |
| 1344 | 1194 | ||
| 1345 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1195 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1346 | 1196 | ||
| 1347 | dmar_fault_do_one(iommu, type, fault_reason, | 1197 | dmar_fault_do_one(iommu, type, fault_reason, |
| 1348 | source_id, guest_addr); | 1198 | source_id, guest_addr); |
| @@ -1350,14 +1200,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
| 1350 | fault_index++; | 1200 | fault_index++; |
| 1351 | if (fault_index >= cap_num_fault_regs(iommu->cap)) | 1201 | if (fault_index >= cap_num_fault_regs(iommu->cap)) |
| 1352 | fault_index = 0; | 1202 | fault_index = 0; |
| 1353 | spin_lock_irqsave(&iommu->register_lock, flag); | 1203 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
| 1354 | } | 1204 | } |
| 1355 | clear_rest: | 1205 | clear_rest: |
| 1356 | /* clear all the other faults */ | 1206 | /* clear all the other faults */ |
| 1357 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | 1207 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
| 1358 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); | 1208 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); |
| 1359 | 1209 | ||
| 1360 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 1210 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1361 | return IRQ_HANDLED; | 1211 | return IRQ_HANDLED; |
| 1362 | } | 1212 | } |
| 1363 | 1213 | ||
| @@ -1388,7 +1238,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu) | |||
| 1388 | return ret; | 1238 | return ret; |
| 1389 | } | 1239 | } |
| 1390 | 1240 | ||
| 1391 | ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); | 1241 | ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); |
| 1392 | if (ret) | 1242 | if (ret) |
| 1393 | printk(KERN_ERR "IOMMU: can't request irq\n"); | 1243 | printk(KERN_ERR "IOMMU: can't request irq\n"); |
| 1394 | return ret; | 1244 | return ret; |
