diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-06-26 15:27:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 04:12:15 -0400 |
commit | a19ae1eccfb2d97f4704b1a2b3d1d9905845dcac (patch) | |
tree | c1e82d5281c8a6cbf593f3d2cd5f4c7994610b0d /arch/x86/kernel/amd_iommu.c | |
parent | 000fca2dfcfbd2859fed1208c38c899ab4873049 (diff) |
x86, AMD IOMMU: add functions to send IOMMU commands
This patch adds generic handling function as well as all functions to send
specific commands to the IOMMU hardware as required by this driver.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: Sebastian.Biemueller@amd.com
Cc: robert.richter@amd.com
Cc: joro@8bytes.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 106 |
1 files changed, 106 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 90392c7b253b..a24ee4a5203a 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -37,4 +37,110 @@ struct command { | |||
37 | u32 data[4]; | 37 | u32 data[4]; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd) | ||
41 | { | ||
42 | u32 tail, head; | ||
43 | u8 *target; | ||
44 | |||
45 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
46 | target = (iommu->cmd_buf + tail); | ||
47 | memcpy_toio(target, cmd, sizeof(*cmd)); | ||
48 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; | ||
49 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
50 | if (tail == head) | ||
51 | return -ENOMEM; | ||
52 | writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static int iommu_queue_command(struct amd_iommu *iommu, struct command *cmd) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | int ret; | ||
61 | |||
62 | spin_lock_irqsave(&iommu->lock, flags); | ||
63 | ret = __iommu_queue_command(iommu, cmd); | ||
64 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
65 | |||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | static int iommu_completion_wait(struct amd_iommu *iommu) | ||
70 | { | ||
71 | int ret; | ||
72 | struct command cmd; | ||
73 | volatile u64 ready = 0; | ||
74 | unsigned long ready_phys = virt_to_phys(&ready); | ||
75 | |||
76 | memset(&cmd, 0, sizeof(cmd)); | ||
77 | cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK; | ||
78 | cmd.data[1] = HIGH_U32(ready_phys); | ||
79 | cmd.data[2] = 1; /* value written to 'ready' */ | ||
80 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | ||
81 | |||
82 | iommu->need_sync = 0; | ||
83 | |||
84 | ret = iommu_queue_command(iommu, &cmd); | ||
85 | |||
86 | if (ret) | ||
87 | return ret; | ||
88 | |||
89 | while (!ready) | ||
90 | cpu_relax(); | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | ||
96 | { | ||
97 | struct command cmd; | ||
98 | |||
99 | BUG_ON(iommu == NULL); | ||
100 | |||
101 | memset(&cmd, 0, sizeof(cmd)); | ||
102 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | ||
103 | cmd.data[0] = devid; | ||
104 | |||
105 | iommu->need_sync = 1; | ||
106 | |||
107 | return iommu_queue_command(iommu, &cmd); | ||
108 | } | ||
109 | |||
110 | static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | ||
111 | u64 address, u16 domid, int pde, int s) | ||
112 | { | ||
113 | struct command cmd; | ||
114 | |||
115 | memset(&cmd, 0, sizeof(cmd)); | ||
116 | address &= PAGE_MASK; | ||
117 | CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); | ||
118 | cmd.data[1] |= domid; | ||
119 | cmd.data[2] = LOW_U32(address); | ||
120 | cmd.data[3] = HIGH_U32(address); | ||
121 | if (s) | ||
122 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | ||
123 | if (pde) | ||
124 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | ||
125 | |||
126 | iommu->need_sync = 1; | ||
127 | |||
128 | return iommu_queue_command(iommu, &cmd); | ||
129 | } | ||
130 | |||
131 | static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | ||
132 | u64 address, size_t size) | ||
133 | { | ||
134 | int i; | ||
135 | unsigned pages = to_pages(address, size); | ||
136 | |||
137 | address &= PAGE_MASK; | ||
138 | |||
139 | for (i = 0; i < pages; ++i) { | ||
140 | iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 0); | ||
141 | address += PAGE_SIZE; | ||
142 | } | ||
143 | |||
144 | return 0; | ||
145 | } | ||
40 | 146 | ||