aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/dmar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r--drivers/pci/dmar.c491
1 files changed, 454 insertions, 37 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index f941f609dbf..691b3adeb87 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -19,15 +19,18 @@
19 * Author: Shaohua Li <shaohua.li@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * 21 *
22 * This file implements early detection/parsing of DMA Remapping Devices 22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI 23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables. 24 * tables.
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
25 */ 27 */
26 28
27#include <linux/pci.h> 29#include <linux/pci.h>
28#include <linux/dmar.h> 30#include <linux/dmar.h>
29#include "iova.h" 31#include <linux/iova.h>
30#include "intel-iommu.h" 32#include <linux/intel-iommu.h>
33#include <linux/timer.h>
31 34
32#undef PREFIX 35#undef PREFIX
33#define PREFIX "DMAR:" 36#define PREFIX "DMAR:"
@@ -37,7 +40,6 @@
37 * these units are not supported by the architecture. 40 * these units are not supported by the architecture.
38 */ 41 */
39LIST_HEAD(dmar_drhd_units); 42LIST_HEAD(dmar_drhd_units);
40LIST_HEAD(dmar_rmrr_units);
41 43
42static struct acpi_table_header * __initdata dmar_tbl; 44static struct acpi_table_header * __initdata dmar_tbl;
43 45
@@ -53,11 +55,6 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 list_add(&drhd->list, &dmar_drhd_units); 55 list_add(&drhd->list, &dmar_drhd_units);
54} 56}
55 57
56static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
57{
58 list_add(&rmrr->list, &dmar_rmrr_units);
59}
60
61static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, 58static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
62 struct pci_dev **dev, u16 segment) 59 struct pci_dev **dev, u16 segment)
63{ 60{
@@ -172,19 +169,36 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 struct acpi_dmar_hardware_unit *drhd; 169 struct acpi_dmar_hardware_unit *drhd;
173 struct dmar_drhd_unit *dmaru; 170 struct dmar_drhd_unit *dmaru;
174 int ret = 0; 171 int ret = 0;
175 static int include_all;
176 172
177 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 173 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
178 if (!dmaru) 174 if (!dmaru)
179 return -ENOMEM; 175 return -ENOMEM;
180 176
177 dmaru->hdr = header;
181 drhd = (struct acpi_dmar_hardware_unit *)header; 178 drhd = (struct acpi_dmar_hardware_unit *)header;
182 dmaru->reg_base_addr = drhd->address; 179 dmaru->reg_base_addr = drhd->address;
183 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 180 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
184 181
182 ret = alloc_iommu(dmaru);
183 if (ret) {
184 kfree(dmaru);
185 return ret;
186 }
187 dmar_register_drhd_unit(dmaru);
188 return 0;
189}
190
191static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
192{
193 struct acpi_dmar_hardware_unit *drhd;
194 static int include_all;
195 int ret = 0;
196
197 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
198
185 if (!dmaru->include_all) 199 if (!dmaru->include_all)
186 ret = dmar_parse_dev_scope((void *)(drhd + 1), 200 ret = dmar_parse_dev_scope((void *)(drhd + 1),
187 ((void *)drhd) + header->length, 201 ((void *)drhd) + drhd->header.length,
188 &dmaru->devices_cnt, &dmaru->devices, 202 &dmaru->devices_cnt, &dmaru->devices,
189 drhd->segment); 203 drhd->segment);
190 else { 204 else {
@@ -197,37 +211,59 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
197 include_all = 1; 211 include_all = 1;
198 } 212 }
199 213
200 if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) 214 if (ret) {
215 list_del(&dmaru->list);
201 kfree(dmaru); 216 kfree(dmaru);
202 else 217 }
203 dmar_register_drhd_unit(dmaru);
204 return ret; 218 return ret;
205} 219}
206 220
221#ifdef CONFIG_DMAR
222LIST_HEAD(dmar_rmrr_units);
223
224static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
225{
226 list_add(&rmrr->list, &dmar_rmrr_units);
227}
228
229
207static int __init 230static int __init
208dmar_parse_one_rmrr(struct acpi_dmar_header *header) 231dmar_parse_one_rmrr(struct acpi_dmar_header *header)
209{ 232{
210 struct acpi_dmar_reserved_memory *rmrr; 233 struct acpi_dmar_reserved_memory *rmrr;
211 struct dmar_rmrr_unit *rmrru; 234 struct dmar_rmrr_unit *rmrru;
212 int ret = 0;
213 235
214 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); 236 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
215 if (!rmrru) 237 if (!rmrru)
216 return -ENOMEM; 238 return -ENOMEM;
217 239
240 rmrru->hdr = header;
218 rmrr = (struct acpi_dmar_reserved_memory *)header; 241 rmrr = (struct acpi_dmar_reserved_memory *)header;
219 rmrru->base_address = rmrr->base_address; 242 rmrru->base_address = rmrr->base_address;
220 rmrru->end_address = rmrr->end_address; 243 rmrru->end_address = rmrr->end_address;
244
245 dmar_register_rmrr_unit(rmrru);
246 return 0;
247}
248
249static int __init
250rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
251{
252 struct acpi_dmar_reserved_memory *rmrr;
253 int ret;
254
255 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
221 ret = dmar_parse_dev_scope((void *)(rmrr + 1), 256 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
222 ((void *)rmrr) + header->length, 257 ((void *)rmrr) + rmrr->header.length,
223 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); 258 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
224 259
225 if (ret || (rmrru->devices_cnt == 0)) 260 if (ret || (rmrru->devices_cnt == 0)) {
261 list_del(&rmrru->list);
226 kfree(rmrru); 262 kfree(rmrru);
227 else 263 }
228 dmar_register_rmrr_unit(rmrru);
229 return ret; 264 return ret;
230} 265}
266#endif
231 267
232static void __init 268static void __init
233dmar_table_print_dmar_entry(struct acpi_dmar_header *header) 269dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
@@ -240,19 +276,39 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
240 drhd = (struct acpi_dmar_hardware_unit *)header; 276 drhd = (struct acpi_dmar_hardware_unit *)header;
241 printk (KERN_INFO PREFIX 277 printk (KERN_INFO PREFIX
242 "DRHD (flags: 0x%08x)base: 0x%016Lx\n", 278 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
243 drhd->flags, drhd->address); 279 drhd->flags, (unsigned long long)drhd->address);
244 break; 280 break;
245 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 281 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
246 rmrr = (struct acpi_dmar_reserved_memory *)header; 282 rmrr = (struct acpi_dmar_reserved_memory *)header;
247 283
248 printk (KERN_INFO PREFIX 284 printk (KERN_INFO PREFIX
249 "RMRR base: 0x%016Lx end: 0x%016Lx\n", 285 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
250 rmrr->base_address, rmrr->end_address); 286 (unsigned long long)rmrr->base_address,
287 (unsigned long long)rmrr->end_address);
251 break; 288 break;
252 } 289 }
253} 290}
254 291
255/** 292/**
293 * dmar_table_detect - checks to see if the platform supports DMAR devices
294 */
295static int __init dmar_table_detect(void)
296{
297 acpi_status status = AE_OK;
298
299 /* if we could find DMAR table, then there are DMAR devices */
300 status = acpi_get_table(ACPI_SIG_DMAR, 0,
301 (struct acpi_table_header **)&dmar_tbl);
302
303 if (ACPI_SUCCESS(status) && !dmar_tbl) {
304 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
305 status = AE_NOT_FOUND;
306 }
307
308 return (ACPI_SUCCESS(status) ? 1 : 0);
309}
310
311/**
256 * parse_dmar_table - parses the DMA reporting table 312 * parse_dmar_table - parses the DMA reporting table
257 */ 313 */
258static int __init 314static int __init
@@ -262,11 +318,17 @@ parse_dmar_table(void)
262 struct acpi_dmar_header *entry_header; 318 struct acpi_dmar_header *entry_header;
263 int ret = 0; 319 int ret = 0;
264 320
321 /*
322 * Do it again, earlier dmar_tbl mapping could be mapped with
323 * fixed map.
324 */
325 dmar_table_detect();
326
265 dmar = (struct acpi_table_dmar *)dmar_tbl; 327 dmar = (struct acpi_table_dmar *)dmar_tbl;
266 if (!dmar) 328 if (!dmar)
267 return -ENODEV; 329 return -ENODEV;
268 330
269 if (dmar->width < PAGE_SHIFT_4K - 1) { 331 if (dmar->width < PAGE_SHIFT - 1) {
270 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); 332 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
271 return -EINVAL; 333 return -EINVAL;
272 } 334 }
@@ -284,7 +346,9 @@ parse_dmar_table(void)
284 ret = dmar_parse_one_drhd(entry_header); 346 ret = dmar_parse_one_drhd(entry_header);
285 break; 347 break;
286 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 348 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
349#ifdef CONFIG_DMAR
287 ret = dmar_parse_one_rmrr(entry_header); 350 ret = dmar_parse_one_rmrr(entry_header);
351#endif
288 break; 352 break;
289 default: 353 default:
290 printk(KERN_WARNING PREFIX 354 printk(KERN_WARNING PREFIX
@@ -300,15 +364,77 @@ parse_dmar_table(void)
300 return ret; 364 return ret;
301} 365}
302 366
367int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
368 struct pci_dev *dev)
369{
370 int index;
303 371
304int __init dmar_table_init(void) 372 while (dev) {
373 for (index = 0; index < cnt; index++)
374 if (dev == devices[index])
375 return 1;
376
377 /* Check our parent */
378 dev = dev->bus->self;
379 }
380
381 return 0;
382}
383
384struct dmar_drhd_unit *
385dmar_find_matched_drhd_unit(struct pci_dev *dev)
305{ 386{
387 struct dmar_drhd_unit *drhd = NULL;
388
389 list_for_each_entry(drhd, &dmar_drhd_units, list) {
390 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
391 drhd->devices_cnt, dev))
392 return drhd;
393 }
394
395 return NULL;
396}
397
398int __init dmar_dev_scope_init(void)
399{
400 struct dmar_drhd_unit *drhd, *drhd_n;
401 int ret = -ENODEV;
402
403 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
404 ret = dmar_parse_dev(drhd);
405 if (ret)
406 return ret;
407 }
408
409#ifdef CONFIG_DMAR
410 {
411 struct dmar_rmrr_unit *rmrr, *rmrr_n;
412 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
413 ret = rmrr_parse_dev(rmrr);
414 if (ret)
415 return ret;
416 }
417 }
418#endif
419
420 return ret;
421}
422
306 423
424int __init dmar_table_init(void)
425{
426 static int dmar_table_initialized;
307 int ret; 427 int ret;
308 428
429 if (dmar_table_initialized)
430 return 0;
431
432 dmar_table_initialized = 1;
433
309 ret = parse_dmar_table(); 434 ret = parse_dmar_table();
310 if (ret) { 435 if (ret) {
311 printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); 436 if (ret != -ENODEV)
437 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
312 return ret; 438 return ret;
313 } 439 }
314 440
@@ -317,29 +443,320 @@ int __init dmar_table_init(void)
317 return -ENODEV; 443 return -ENODEV;
318 } 444 }
319 445
320 if (list_empty(&dmar_rmrr_units)) { 446#ifdef CONFIG_DMAR
447 if (list_empty(&dmar_rmrr_units))
321 printk(KERN_INFO PREFIX "No RMRR found\n"); 448 printk(KERN_INFO PREFIX "No RMRR found\n");
322 return -ENODEV; 449#endif
450
451#ifdef CONFIG_INTR_REMAP
452 parse_ioapics_under_ir();
453#endif
454 return 0;
455}
456
457void __init detect_intel_iommu(void)
458{
459 int ret;
460
461 ret = dmar_table_detect();
462
463 {
464#ifdef CONFIG_INTR_REMAP
465 struct acpi_table_dmar *dmar;
466 /*
467 * for now we will disable dma-remapping when interrupt
468 * remapping is enabled.
469 * When support for queued invalidation for IOTLB invalidation
470 * is added, we will not need this any more.
471 */
472 dmar = (struct acpi_table_dmar *) dmar_tbl;
473 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
474 printk(KERN_INFO
475 "Queued invalidation will be enabled to support "
476 "x2apic and Intr-remapping.\n");
477#endif
478#ifdef CONFIG_DMAR
479 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
480 !dmar_disabled)
481 iommu_detected = 1;
482#endif
483 }
484 dmar_tbl = NULL;
485}
486
487
488int alloc_iommu(struct dmar_drhd_unit *drhd)
489{
490 struct intel_iommu *iommu;
491 int map_size;
492 u32 ver;
493 static int iommu_allocated = 0;
494
495 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
496 if (!iommu)
497 return -ENOMEM;
498
499 iommu->seq_id = iommu_allocated++;
500
501 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
502 if (!iommu->reg) {
503 printk(KERN_ERR "IOMMU: can't map the region\n");
504 goto error;
323 } 505 }
506 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
507 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
508
509 /* the registers might be more than one page */
510 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
511 cap_max_fault_reg_offset(iommu->cap));
512 map_size = VTD_PAGE_ALIGN(map_size);
513 if (map_size > VTD_PAGE_SIZE) {
514 iounmap(iommu->reg);
515 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
516 if (!iommu->reg) {
517 printk(KERN_ERR "IOMMU: can't map the region\n");
518 goto error;
519 }
520 }
521
522 ver = readl(iommu->reg + DMAR_VER_REG);
523 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
524 (unsigned long long)drhd->reg_base_addr,
525 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
526 (unsigned long long)iommu->cap,
527 (unsigned long long)iommu->ecap);
528
529 spin_lock_init(&iommu->register_lock);
324 530
531 drhd->iommu = iommu;
325 return 0; 532 return 0;
533error:
534 kfree(iommu);
535 return -1;
326} 536}
327 537
328/** 538void free_iommu(struct intel_iommu *iommu)
329 * early_dmar_detect - checks to see if the platform supports DMAR devices 539{
540 if (!iommu)
541 return;
542
543#ifdef CONFIG_DMAR
544 free_dmar_iommu(iommu);
545#endif
546
547 if (iommu->reg)
548 iounmap(iommu->reg);
549 kfree(iommu);
550}
551
552/*
553 * Reclaim all the submitted descriptors which have completed its work.
330 */ 554 */
331int __init early_dmar_detect(void) 555static inline void reclaim_free_desc(struct q_inval *qi)
332{ 556{
333 acpi_status status = AE_OK; 557 while (qi->desc_status[qi->free_tail] == QI_DONE) {
558 qi->desc_status[qi->free_tail] = QI_FREE;
559 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
560 qi->free_cnt++;
561 }
562}
334 563
335 /* if we could find DMAR table, then there are DMAR devices */ 564/*
336 status = acpi_get_table(ACPI_SIG_DMAR, 0, 565 * Submit the queued invalidation descriptor to the remapping
337 (struct acpi_table_header **)&dmar_tbl); 566 * hardware unit and wait for its completion.
567 */
568void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
569{
570 struct q_inval *qi = iommu->qi;
571 struct qi_desc *hw, wait_desc;
572 int wait_index, index;
573 unsigned long flags;
338 574
339 if (ACPI_SUCCESS(status) && !dmar_tbl) { 575 if (!qi)
340 printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); 576 return;
341 status = AE_NOT_FOUND; 577
578 hw = qi->desc;
579
580 spin_lock_irqsave(&qi->q_lock, flags);
581 while (qi->free_cnt < 3) {
582 spin_unlock_irqrestore(&qi->q_lock, flags);
583 cpu_relax();
584 spin_lock_irqsave(&qi->q_lock, flags);
342 } 585 }
343 586
344 return (ACPI_SUCCESS(status) ? 1 : 0); 587 index = qi->free_head;
588 wait_index = (index + 1) % QI_LENGTH;
589
590 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
591
592 hw[index] = *desc;
593
594 wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
595 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
596
597 hw[wait_index] = wait_desc;
598
599 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
600 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
601
602 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
603 qi->free_cnt -= 2;
604
605 spin_lock(&iommu->register_lock);
606 /*
607 * update the HW tail register indicating the presence of
608 * new descriptors.
609 */
610 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
611 spin_unlock(&iommu->register_lock);
612
613 while (qi->desc_status[wait_index] != QI_DONE) {
614 /*
615 * We will leave the interrupts disabled, to prevent interrupt
616 * context to queue another cmd while a cmd is already submitted
617 * and waiting for completion on this cpu. This is to avoid
618 * a deadlock where the interrupt context can wait indefinitely
619 * for free slots in the queue.
620 */
621 spin_unlock(&qi->q_lock);
622 cpu_relax();
623 spin_lock(&qi->q_lock);
624 }
625
626 qi->desc_status[index] = QI_DONE;
627
628 reclaim_free_desc(qi);
629 spin_unlock_irqrestore(&qi->q_lock, flags);
630}
631
632/*
633 * Flush the global interrupt entry cache.
634 */
635void qi_global_iec(struct intel_iommu *iommu)
636{
637 struct qi_desc desc;
638
639 desc.low = QI_IEC_TYPE;
640 desc.high = 0;
641
642 qi_submit_sync(&desc, iommu);
643}
644
645int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
646 u64 type, int non_present_entry_flush)
647{
648
649 struct qi_desc desc;
650
651 if (non_present_entry_flush) {
652 if (!cap_caching_mode(iommu->cap))
653 return 1;
654 else
655 did = 0;
656 }
657
658 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
659 | QI_CC_GRAN(type) | QI_CC_TYPE;
660 desc.high = 0;
661
662 qi_submit_sync(&desc, iommu);
663
664 return 0;
665
666}
667
668int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
669 unsigned int size_order, u64 type,
670 int non_present_entry_flush)
671{
672 u8 dw = 0, dr = 0;
673
674 struct qi_desc desc;
675 int ih = 0;
676
677 if (non_present_entry_flush) {
678 if (!cap_caching_mode(iommu->cap))
679 return 1;
680 else
681 did = 0;
682 }
683
684 if (cap_write_drain(iommu->cap))
685 dw = 1;
686
687 if (cap_read_drain(iommu->cap))
688 dr = 1;
689
690 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
691 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
692 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
693 | QI_IOTLB_AM(size_order);
694
695 qi_submit_sync(&desc, iommu);
696
697 return 0;
698
699}
700
701/*
702 * Enable Queued Invalidation interface. This is a must to support
703 * interrupt-remapping. Also used by DMA-remapping, which replaces
704 * register based IOTLB invalidation.
705 */
706int dmar_enable_qi(struct intel_iommu *iommu)
707{
708 u32 cmd, sts;
709 unsigned long flags;
710 struct q_inval *qi;
711
712 if (!ecap_qis(iommu->ecap))
713 return -ENOENT;
714
715 /*
716 * queued invalidation is already setup and enabled.
717 */
718 if (iommu->qi)
719 return 0;
720
721 iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL);
722 if (!iommu->qi)
723 return -ENOMEM;
724
725 qi = iommu->qi;
726
727 qi->desc = (void *)(get_zeroed_page(GFP_KERNEL));
728 if (!qi->desc) {
729 kfree(qi);
730 iommu->qi = 0;
731 return -ENOMEM;
732 }
733
734 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL);
735 if (!qi->desc_status) {
736 free_page((unsigned long) qi->desc);
737 kfree(qi);
738 iommu->qi = 0;
739 return -ENOMEM;
740 }
741
742 qi->free_head = qi->free_tail = 0;
743 qi->free_cnt = QI_LENGTH;
744
745 spin_lock_init(&qi->q_lock);
746
747 spin_lock_irqsave(&iommu->register_lock, flags);
748 /* write zero to the tail reg */
749 writel(0, iommu->reg + DMAR_IQT_REG);
750
751 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
752
753 cmd = iommu->gcmd | DMA_GCMD_QIE;
754 iommu->gcmd |= DMA_GCMD_QIE;
755 writel(cmd, iommu->reg + DMAR_GCMD_REG);
756
757 /* Make sure hardware complete it */
758 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
759 spin_unlock_irqrestore(&iommu->register_lock, flags);
760
761 return 0;
345} 762}