diff options
author | Len Brown <len.brown@intel.com> | 2009-01-09 03:39:43 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2009-01-09 03:39:43 -0500 |
commit | b2576e1d4408e134e2188c967b1f28af39cd79d4 (patch) | |
tree | 004f3c82faab760f304ce031d6d2f572e7746a50 /drivers/pci | |
parent | 3cc8a5f4ba91f67bbdb81a43a99281a26aab8d77 (diff) | |
parent | 2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff) |
Merge branch 'linus' into release
Diffstat (limited to 'drivers/pci')
42 files changed, 3011 insertions, 1314 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index e1ca42591ac4..2a4501dd2515 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -42,6 +42,15 @@ config PCI_DEBUG | |||
42 | 42 | ||
43 | When in doubt, say N. | 43 | When in doubt, say N. |
44 | 44 | ||
45 | config PCI_STUB | ||
46 | tristate "PCI Stub driver" | ||
47 | depends on PCI | ||
48 | help | ||
49 | Say Y or M here if you want be able to reserve a PCI device | ||
50 | when it is going to be assigned to a guest operating system. | ||
51 | |||
52 | When in doubt, say N. | ||
53 | |||
45 | config HT_IRQ | 54 | config HT_IRQ |
46 | bool "Interrupts on hypertransport devices" | 55 | bool "Interrupts on hypertransport devices" |
47 | default y | 56 | default y |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index af3bfe22847b..3d07ce24f6a8 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -53,6 +53,8 @@ obj-$(CONFIG_HOTPLUG) += setup-bus.o | |||
53 | 53 | ||
54 | obj-$(CONFIG_PCI_SYSCALL) += syscall.o | 54 | obj-$(CONFIG_PCI_SYSCALL) += syscall.o |
55 | 55 | ||
56 | obj-$(CONFIG_PCI_STUB) += pci-stub.o | ||
57 | |||
56 | ifeq ($(CONFIG_PCI_DEBUG),y) | 58 | ifeq ($(CONFIG_PCI_DEBUG),y) |
57 | EXTRA_CFLAGS += -DDEBUG | 59 | EXTRA_CFLAGS += -DDEBUG |
58 | endif | 60 | endif |
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 39bb96b413ef..381444794778 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c | |||
@@ -66,6 +66,39 @@ EXPORT_SYMBOL(pci_bus_write_config_byte); | |||
66 | EXPORT_SYMBOL(pci_bus_write_config_word); | 66 | EXPORT_SYMBOL(pci_bus_write_config_word); |
67 | EXPORT_SYMBOL(pci_bus_write_config_dword); | 67 | EXPORT_SYMBOL(pci_bus_write_config_dword); |
68 | 68 | ||
69 | |||
70 | /** | ||
71 | * pci_read_vpd - Read one entry from Vital Product Data | ||
72 | * @dev: pci device struct | ||
73 | * @pos: offset in vpd space | ||
74 | * @count: number of bytes to read | ||
75 | * @buf: pointer to where to store result | ||
76 | * | ||
77 | */ | ||
78 | ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf) | ||
79 | { | ||
80 | if (!dev->vpd || !dev->vpd->ops) | ||
81 | return -ENODEV; | ||
82 | return dev->vpd->ops->read(dev, pos, count, buf); | ||
83 | } | ||
84 | EXPORT_SYMBOL(pci_read_vpd); | ||
85 | |||
86 | /** | ||
87 | * pci_write_vpd - Write entry to Vital Product Data | ||
88 | * @dev: pci device struct | ||
89 | * @pos: offset in vpd space | ||
90 | * @count: number of bytes to read | ||
91 | * @val: value to write | ||
92 | * | ||
93 | */ | ||
94 | ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) | ||
95 | { | ||
96 | if (!dev->vpd || !dev->vpd->ops) | ||
97 | return -ENODEV; | ||
98 | return dev->vpd->ops->write(dev, pos, count, buf); | ||
99 | } | ||
100 | EXPORT_SYMBOL(pci_write_vpd); | ||
101 | |||
69 | /* | 102 | /* |
70 | * The following routines are to prevent the user from accessing PCI config | 103 | * The following routines are to prevent the user from accessing PCI config |
71 | * space when it's unsafe to do so. Some devices require this during BIST and | 104 | * space when it's unsafe to do so. Some devices require this during BIST and |
@@ -133,125 +166,145 @@ PCI_USER_WRITE_CONFIG(dword, u32) | |||
133 | 166 | ||
134 | struct pci_vpd_pci22 { | 167 | struct pci_vpd_pci22 { |
135 | struct pci_vpd base; | 168 | struct pci_vpd base; |
136 | spinlock_t lock; /* controls access to hardware and the flags */ | 169 | struct mutex lock; |
137 | u8 cap; | 170 | u16 flag; |
138 | bool busy; | 171 | bool busy; |
139 | bool flag; /* value of F bit to wait for */ | 172 | u8 cap; |
140 | }; | 173 | }; |
141 | 174 | ||
142 | /* Wait for last operation to complete */ | 175 | /* |
176 | * Wait for last operation to complete. | ||
177 | * This code has to spin since there is no other notification from the PCI | ||
178 | * hardware. Since the VPD is often implemented by serial attachment to an | ||
179 | * EEPROM, it may take many milliseconds to complete. | ||
180 | */ | ||
143 | static int pci_vpd_pci22_wait(struct pci_dev *dev) | 181 | static int pci_vpd_pci22_wait(struct pci_dev *dev) |
144 | { | 182 | { |
145 | struct pci_vpd_pci22 *vpd = | 183 | struct pci_vpd_pci22 *vpd = |
146 | container_of(dev->vpd, struct pci_vpd_pci22, base); | 184 | container_of(dev->vpd, struct pci_vpd_pci22, base); |
147 | u16 flag, status; | 185 | unsigned long timeout = jiffies + HZ/20 + 2; |
148 | int wait; | 186 | u16 status; |
149 | int ret; | 187 | int ret; |
150 | 188 | ||
151 | if (!vpd->busy) | 189 | if (!vpd->busy) |
152 | return 0; | 190 | return 0; |
153 | 191 | ||
154 | flag = vpd->flag ? PCI_VPD_ADDR_F : 0; | ||
155 | wait = vpd->flag ? 10 : 1000; /* read: 100 us; write: 10 ms */ | ||
156 | for (;;) { | 192 | for (;;) { |
157 | ret = pci_user_read_config_word(dev, | 193 | ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR, |
158 | vpd->cap + PCI_VPD_ADDR, | ||
159 | &status); | 194 | &status); |
160 | if (ret < 0) | 195 | if (ret) |
161 | return ret; | 196 | return ret; |
162 | if ((status & PCI_VPD_ADDR_F) == flag) { | 197 | |
198 | if ((status & PCI_VPD_ADDR_F) == vpd->flag) { | ||
163 | vpd->busy = false; | 199 | vpd->busy = false; |
164 | return 0; | 200 | return 0; |
165 | } | 201 | } |
166 | if (wait-- == 0) | 202 | |
203 | if (time_after(jiffies, timeout)) | ||
167 | return -ETIMEDOUT; | 204 | return -ETIMEDOUT; |
168 | udelay(10); | 205 | if (fatal_signal_pending(current)) |
206 | return -EINTR; | ||
207 | if (!cond_resched()) | ||
208 | udelay(10); | ||
169 | } | 209 | } |
170 | } | 210 | } |
171 | 211 | ||
172 | static int pci_vpd_pci22_read(struct pci_dev *dev, int pos, int size, | 212 | static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count, |
173 | char *buf) | 213 | void *arg) |
174 | { | 214 | { |
175 | struct pci_vpd_pci22 *vpd = | 215 | struct pci_vpd_pci22 *vpd = |
176 | container_of(dev->vpd, struct pci_vpd_pci22, base); | 216 | container_of(dev->vpd, struct pci_vpd_pci22, base); |
177 | u32 val; | ||
178 | int ret; | 217 | int ret; |
179 | int begin, end, i; | 218 | loff_t end = pos + count; |
219 | u8 *buf = arg; | ||
180 | 220 | ||
181 | if (pos < 0 || pos > vpd->base.len || size > vpd->base.len - pos) | 221 | if (pos < 0 || pos > vpd->base.len || end > vpd->base.len) |
182 | return -EINVAL; | 222 | return -EINVAL; |
183 | if (size == 0) | ||
184 | return 0; | ||
185 | 223 | ||
186 | spin_lock_irq(&vpd->lock); | 224 | if (mutex_lock_killable(&vpd->lock)) |
187 | ret = pci_vpd_pci22_wait(dev); | 225 | return -EINTR; |
188 | if (ret < 0) | 226 | |
189 | goto out; | ||
190 | ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, | ||
191 | pos & ~3); | ||
192 | if (ret < 0) | ||
193 | goto out; | ||
194 | vpd->busy = true; | ||
195 | vpd->flag = 1; | ||
196 | ret = pci_vpd_pci22_wait(dev); | 227 | ret = pci_vpd_pci22_wait(dev); |
197 | if (ret < 0) | 228 | if (ret < 0) |
198 | goto out; | 229 | goto out; |
199 | ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, | 230 | |
200 | &val); | 231 | while (pos < end) { |
201 | out: | 232 | u32 val; |
202 | spin_unlock_irq(&vpd->lock); | 233 | unsigned int i, skip; |
203 | if (ret < 0) | 234 | |
204 | return ret; | 235 | ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, |
205 | 236 | pos & ~3); | |
206 | /* Convert to bytes */ | 237 | if (ret < 0) |
207 | begin = pos & 3; | 238 | break; |
208 | end = min(4, begin + size); | 239 | vpd->busy = true; |
209 | for (i = 0; i < end; ++i) { | 240 | vpd->flag = PCI_VPD_ADDR_F; |
210 | if (i >= begin) | 241 | ret = pci_vpd_pci22_wait(dev); |
211 | *buf++ = val; | 242 | if (ret < 0) |
212 | val >>= 8; | 243 | break; |
244 | |||
245 | ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val); | ||
246 | if (ret < 0) | ||
247 | break; | ||
248 | |||
249 | skip = pos & 3; | ||
250 | for (i = 0; i < sizeof(u32); i++) { | ||
251 | if (i >= skip) { | ||
252 | *buf++ = val; | ||
253 | if (++pos == end) | ||
254 | break; | ||
255 | } | ||
256 | val >>= 8; | ||
257 | } | ||
213 | } | 258 | } |
214 | return end - begin; | 259 | out: |
260 | mutex_unlock(&vpd->lock); | ||
261 | return ret ? ret : count; | ||
215 | } | 262 | } |
216 | 263 | ||
217 | static int pci_vpd_pci22_write(struct pci_dev *dev, int pos, int size, | 264 | static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count, |
218 | const char *buf) | 265 | const void *arg) |
219 | { | 266 | { |
220 | struct pci_vpd_pci22 *vpd = | 267 | struct pci_vpd_pci22 *vpd = |
221 | container_of(dev->vpd, struct pci_vpd_pci22, base); | 268 | container_of(dev->vpd, struct pci_vpd_pci22, base); |
222 | u32 val; | 269 | const u8 *buf = arg; |
223 | int ret; | 270 | loff_t end = pos + count; |
271 | int ret = 0; | ||
224 | 272 | ||
225 | if (pos < 0 || pos > vpd->base.len || pos & 3 || | 273 | if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len) |
226 | size > vpd->base.len - pos || size < 4) | ||
227 | return -EINVAL; | 274 | return -EINVAL; |
228 | 275 | ||
229 | val = (u8) *buf++; | 276 | if (mutex_lock_killable(&vpd->lock)) |
230 | val |= ((u8) *buf++) << 8; | 277 | return -EINTR; |
231 | val |= ((u8) *buf++) << 16; | ||
232 | val |= ((u32)(u8) *buf++) << 24; | ||
233 | 278 | ||
234 | spin_lock_irq(&vpd->lock); | ||
235 | ret = pci_vpd_pci22_wait(dev); | 279 | ret = pci_vpd_pci22_wait(dev); |
236 | if (ret < 0) | 280 | if (ret < 0) |
237 | goto out; | 281 | goto out; |
238 | ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, | ||
239 | val); | ||
240 | if (ret < 0) | ||
241 | goto out; | ||
242 | ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, | ||
243 | pos | PCI_VPD_ADDR_F); | ||
244 | if (ret < 0) | ||
245 | goto out; | ||
246 | vpd->busy = true; | ||
247 | vpd->flag = 0; | ||
248 | ret = pci_vpd_pci22_wait(dev); | ||
249 | out: | ||
250 | spin_unlock_irq(&vpd->lock); | ||
251 | if (ret < 0) | ||
252 | return ret; | ||
253 | 282 | ||
254 | return 4; | 283 | while (pos < end) { |
284 | u32 val; | ||
285 | |||
286 | val = *buf++; | ||
287 | val |= *buf++ << 8; | ||
288 | val |= *buf++ << 16; | ||
289 | val |= *buf++ << 24; | ||
290 | |||
291 | ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val); | ||
292 | if (ret < 0) | ||
293 | break; | ||
294 | ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, | ||
295 | pos | PCI_VPD_ADDR_F); | ||
296 | if (ret < 0) | ||
297 | break; | ||
298 | |||
299 | vpd->busy = true; | ||
300 | vpd->flag = 0; | ||
301 | ret = pci_vpd_pci22_wait(dev); | ||
302 | |||
303 | pos += sizeof(u32); | ||
304 | } | ||
305 | out: | ||
306 | mutex_unlock(&vpd->lock); | ||
307 | return ret ? ret : count; | ||
255 | } | 308 | } |
256 | 309 | ||
257 | static void pci_vpd_pci22_release(struct pci_dev *dev) | 310 | static void pci_vpd_pci22_release(struct pci_dev *dev) |
@@ -259,7 +312,7 @@ static void pci_vpd_pci22_release(struct pci_dev *dev) | |||
259 | kfree(container_of(dev->vpd, struct pci_vpd_pci22, base)); | 312 | kfree(container_of(dev->vpd, struct pci_vpd_pci22, base)); |
260 | } | 313 | } |
261 | 314 | ||
262 | static struct pci_vpd_ops pci_vpd_pci22_ops = { | 315 | static const struct pci_vpd_ops pci_vpd_pci22_ops = { |
263 | .read = pci_vpd_pci22_read, | 316 | .read = pci_vpd_pci22_read, |
264 | .write = pci_vpd_pci22_write, | 317 | .write = pci_vpd_pci22_write, |
265 | .release = pci_vpd_pci22_release, | 318 | .release = pci_vpd_pci22_release, |
@@ -279,7 +332,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev) | |||
279 | 332 | ||
280 | vpd->base.len = PCI_VPD_PCI22_SIZE; | 333 | vpd->base.len = PCI_VPD_PCI22_SIZE; |
281 | vpd->base.ops = &pci_vpd_pci22_ops; | 334 | vpd->base.ops = &pci_vpd_pci22_ops; |
282 | spin_lock_init(&vpd->lock); | 335 | mutex_init(&vpd->lock); |
283 | vpd->cap = cap; | 336 | vpd->cap = cap; |
284 | vpd->busy = false; | 337 | vpd->busy = false; |
285 | dev->vpd = &vpd->base; | 338 | dev->vpd = &vpd->base; |
@@ -287,6 +340,29 @@ int pci_vpd_pci22_init(struct pci_dev *dev) | |||
287 | } | 340 | } |
288 | 341 | ||
289 | /** | 342 | /** |
343 | * pci_vpd_truncate - Set available Vital Product Data size | ||
344 | * @dev: pci device struct | ||
345 | * @size: available memory in bytes | ||
346 | * | ||
347 | * Adjust size of available VPD area. | ||
348 | */ | ||
349 | int pci_vpd_truncate(struct pci_dev *dev, size_t size) | ||
350 | { | ||
351 | if (!dev->vpd) | ||
352 | return -EINVAL; | ||
353 | |||
354 | /* limited by the access method */ | ||
355 | if (size > dev->vpd->len) | ||
356 | return -EINVAL; | ||
357 | |||
358 | dev->vpd->len = size; | ||
359 | dev->vpd->attr->size = size; | ||
360 | |||
361 | return 0; | ||
362 | } | ||
363 | EXPORT_SYMBOL(pci_vpd_truncate); | ||
364 | |||
365 | /** | ||
290 | * pci_block_user_cfg_access - Block userspace PCI config reads/writes | 366 | * pci_block_user_cfg_access - Block userspace PCI config reads/writes |
291 | * @dev: pci device struct | 367 | * @dev: pci device struct |
292 | * | 368 | * |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 999cc4088b59..52b54f053be0 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -71,7 +71,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | |||
71 | } | 71 | } |
72 | 72 | ||
73 | /** | 73 | /** |
74 | * add a single device | 74 | * pci_bus_add_device - add a single device |
75 | * @dev: device to add | 75 | * @dev: device to add |
76 | * | 76 | * |
77 | * This adds a single pci device to the global | 77 | * This adds a single pci device to the global |
@@ -91,6 +91,37 @@ int pci_bus_add_device(struct pci_dev *dev) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | /** | 93 | /** |
94 | * pci_bus_add_child - add a child bus | ||
95 | * @bus: bus to add | ||
96 | * | ||
97 | * This adds sysfs entries for a single bus | ||
98 | */ | ||
99 | int pci_bus_add_child(struct pci_bus *bus) | ||
100 | { | ||
101 | int retval; | ||
102 | |||
103 | if (bus->bridge) | ||
104 | bus->dev.parent = bus->bridge; | ||
105 | |||
106 | retval = device_register(&bus->dev); | ||
107 | if (retval) | ||
108 | return retval; | ||
109 | |||
110 | bus->is_added = 1; | ||
111 | |||
112 | retval = device_create_file(&bus->dev, &dev_attr_cpuaffinity); | ||
113 | if (retval) | ||
114 | return retval; | ||
115 | |||
116 | retval = device_create_file(&bus->dev, &dev_attr_cpulistaffinity); | ||
117 | |||
118 | /* Create legacy_io and legacy_mem files for this bus */ | ||
119 | pci_create_legacy_files(bus); | ||
120 | |||
121 | return retval; | ||
122 | } | ||
123 | |||
124 | /** | ||
94 | * pci_bus_add_devices - insert newly discovered PCI devices | 125 | * pci_bus_add_devices - insert newly discovered PCI devices |
95 | * @bus: bus to check for new devices | 126 | * @bus: bus to check for new devices |
96 | * | 127 | * |
@@ -105,7 +136,7 @@ int pci_bus_add_device(struct pci_dev *dev) | |||
105 | void pci_bus_add_devices(struct pci_bus *bus) | 136 | void pci_bus_add_devices(struct pci_bus *bus) |
106 | { | 137 | { |
107 | struct pci_dev *dev; | 138 | struct pci_dev *dev; |
108 | struct pci_bus *child_bus; | 139 | struct pci_bus *child; |
109 | int retval; | 140 | int retval; |
110 | 141 | ||
111 | list_for_each_entry(dev, &bus->devices, bus_list) { | 142 | list_for_each_entry(dev, &bus->devices, bus_list) { |
@@ -120,45 +151,29 @@ void pci_bus_add_devices(struct pci_bus *bus) | |||
120 | list_for_each_entry(dev, &bus->devices, bus_list) { | 151 | list_for_each_entry(dev, &bus->devices, bus_list) { |
121 | BUG_ON(!dev->is_added); | 152 | BUG_ON(!dev->is_added); |
122 | 153 | ||
154 | child = dev->subordinate; | ||
123 | /* | 155 | /* |
124 | * If there is an unattached subordinate bus, attach | 156 | * If there is an unattached subordinate bus, attach |
125 | * it and then scan for unattached PCI devices. | 157 | * it and then scan for unattached PCI devices. |
126 | */ | 158 | */ |
127 | if (dev->subordinate) { | 159 | if (!child) |
128 | if (list_empty(&dev->subordinate->node)) { | 160 | continue; |
129 | down_write(&pci_bus_sem); | 161 | if (list_empty(&child->node)) { |
130 | list_add_tail(&dev->subordinate->node, | 162 | down_write(&pci_bus_sem); |
131 | &dev->bus->children); | 163 | list_add_tail(&child->node, &dev->bus->children); |
132 | up_write(&pci_bus_sem); | 164 | up_write(&pci_bus_sem); |
133 | } | ||
134 | pci_bus_add_devices(dev->subordinate); | ||
135 | |||
136 | /* register the bus with sysfs as the parent is now | ||
137 | * properly registered. */ | ||
138 | child_bus = dev->subordinate; | ||
139 | if (child_bus->is_added) | ||
140 | continue; | ||
141 | child_bus->dev.parent = child_bus->bridge; | ||
142 | retval = device_register(&child_bus->dev); | ||
143 | if (retval) | ||
144 | dev_err(&dev->dev, "Error registering pci_bus," | ||
145 | " continuing...\n"); | ||
146 | else { | ||
147 | child_bus->is_added = 1; | ||
148 | retval = device_create_file(&child_bus->dev, | ||
149 | &dev_attr_cpuaffinity); | ||
150 | } | ||
151 | if (retval) | ||
152 | dev_err(&dev->dev, "Error creating cpuaffinity" | ||
153 | " file, continuing...\n"); | ||
154 | |||
155 | retval = device_create_file(&child_bus->dev, | ||
156 | &dev_attr_cpulistaffinity); | ||
157 | if (retval) | ||
158 | dev_err(&dev->dev, | ||
159 | "Error creating cpulistaffinity" | ||
160 | " file, continuing...\n"); | ||
161 | } | 165 | } |
166 | pci_bus_add_devices(child); | ||
167 | |||
168 | /* | ||
169 | * register the bus with sysfs as the parent is now | ||
170 | * properly registered. | ||
171 | */ | ||
172 | if (child->is_added) | ||
173 | continue; | ||
174 | retval = pci_bus_add_child(child); | ||
175 | if (retval) | ||
176 | dev_err(&dev->dev, "Error adding bus, continuing\n"); | ||
162 | } | 177 | } |
163 | } | 178 | } |
164 | 179 | ||
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 691b3adeb870..f5a662a50acb 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -191,26 +191,17 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
191 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) | 191 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
192 | { | 192 | { |
193 | struct acpi_dmar_hardware_unit *drhd; | 193 | struct acpi_dmar_hardware_unit *drhd; |
194 | static int include_all; | ||
195 | int ret = 0; | 194 | int ret = 0; |
196 | 195 | ||
197 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | 196 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; |
198 | 197 | ||
199 | if (!dmaru->include_all) | 198 | if (dmaru->include_all) |
200 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | 199 | return 0; |
200 | |||
201 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | ||
201 | ((void *)drhd) + drhd->header.length, | 202 | ((void *)drhd) + drhd->header.length, |
202 | &dmaru->devices_cnt, &dmaru->devices, | 203 | &dmaru->devices_cnt, &dmaru->devices, |
203 | drhd->segment); | 204 | drhd->segment); |
204 | else { | ||
205 | /* Only allow one INCLUDE_ALL */ | ||
206 | if (include_all) { | ||
207 | printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL " | ||
208 | "device scope is allowed\n"); | ||
209 | ret = -EINVAL; | ||
210 | } | ||
211 | include_all = 1; | ||
212 | } | ||
213 | |||
214 | if (ret) { | 205 | if (ret) { |
215 | list_del(&dmaru->list); | 206 | list_del(&dmaru->list); |
216 | kfree(dmaru); | 207 | kfree(dmaru); |
@@ -384,12 +375,21 @@ int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | |||
384 | struct dmar_drhd_unit * | 375 | struct dmar_drhd_unit * |
385 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | 376 | dmar_find_matched_drhd_unit(struct pci_dev *dev) |
386 | { | 377 | { |
387 | struct dmar_drhd_unit *drhd = NULL; | 378 | struct dmar_drhd_unit *dmaru = NULL; |
379 | struct acpi_dmar_hardware_unit *drhd; | ||
388 | 380 | ||
389 | list_for_each_entry(drhd, &dmar_drhd_units, list) { | 381 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { |
390 | if (drhd->include_all || dmar_pci_device_match(drhd->devices, | 382 | drhd = container_of(dmaru->hdr, |
391 | drhd->devices_cnt, dev)) | 383 | struct acpi_dmar_hardware_unit, |
392 | return drhd; | 384 | header); |
385 | |||
386 | if (dmaru->include_all && | ||
387 | drhd->segment == pci_domain_nr(dev->bus)) | ||
388 | return dmaru; | ||
389 | |||
390 | if (dmar_pci_device_match(dmaru->devices, | ||
391 | dmaru->devices_cnt, dev)) | ||
392 | return dmaru; | ||
393 | } | 393 | } |
394 | 394 | ||
395 | return NULL; | 395 | return NULL; |
@@ -491,6 +491,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
491 | int map_size; | 491 | int map_size; |
492 | u32 ver; | 492 | u32 ver; |
493 | static int iommu_allocated = 0; | 493 | static int iommu_allocated = 0; |
494 | int agaw; | ||
494 | 495 | ||
495 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | 496 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
496 | if (!iommu) | 497 | if (!iommu) |
@@ -506,6 +507,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
506 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | 507 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); |
507 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 508 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
508 | 509 | ||
510 | agaw = iommu_calculate_agaw(iommu); | ||
511 | if (agaw < 0) { | ||
512 | printk(KERN_ERR | ||
513 | "Cannot get a valid agaw for iommu (seq_id = %d)\n", | ||
514 | iommu->seq_id); | ||
515 | goto error; | ||
516 | } | ||
517 | iommu->agaw = agaw; | ||
518 | |||
509 | /* the registers might be more than one page */ | 519 | /* the registers might be more than one page */ |
510 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | 520 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), |
511 | cap_max_fault_reg_offset(iommu->cap)); | 521 | cap_max_fault_reg_offset(iommu->cap)); |
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 9bdbe1a6688f..e31fb91652ce 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile | |||
@@ -55,6 +55,9 @@ pciehp-objs := pciehp_core.o \ | |||
55 | pciehp_ctrl.o \ | 55 | pciehp_ctrl.o \ |
56 | pciehp_pci.o \ | 56 | pciehp_pci.o \ |
57 | pciehp_hpc.o | 57 | pciehp_hpc.o |
58 | ifdef CONFIG_ACPI | ||
59 | pciehp-objs += pciehp_acpi.o | ||
60 | endif | ||
58 | 61 | ||
59 | shpchp-objs := shpchp_core.o \ | 62 | shpchp-objs := shpchp_core.o \ |
60 | shpchp_ctrl.o \ | 63 | shpchp_ctrl.o \ |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 2c981cbb0719..1c1141801060 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
@@ -500,5 +500,74 @@ int acpi_root_bridge(acpi_handle handle) | |||
500 | } | 500 | } |
501 | EXPORT_SYMBOL_GPL(acpi_root_bridge); | 501 | EXPORT_SYMBOL_GPL(acpi_root_bridge); |
502 | 502 | ||
503 | |||
504 | static int is_ejectable(acpi_handle handle) | ||
505 | { | ||
506 | acpi_status status; | ||
507 | acpi_handle tmp; | ||
508 | unsigned long long removable; | ||
509 | status = acpi_get_handle(handle, "_ADR", &tmp); | ||
510 | if (ACPI_FAILURE(status)) | ||
511 | return 0; | ||
512 | status = acpi_get_handle(handle, "_EJ0", &tmp); | ||
513 | if (ACPI_SUCCESS(status)) | ||
514 | return 1; | ||
515 | status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); | ||
516 | if (ACPI_SUCCESS(status) && removable) | ||
517 | return 1; | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | /** | ||
522 | * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot | ||
523 | * @pbus: the PCI bus of the PCI slot corresponding to 'handle' | ||
524 | * @handle: ACPI handle to check | ||
525 | * | ||
526 | * Return 1 if handle is ejectable PCI slot, 0 otherwise. | ||
527 | */ | ||
528 | int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle) | ||
529 | { | ||
530 | acpi_handle bridge_handle, parent_handle; | ||
531 | |||
532 | if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus))) | ||
533 | return 0; | ||
534 | if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) | ||
535 | return 0; | ||
536 | if (bridge_handle != parent_handle) | ||
537 | return 0; | ||
538 | return is_ejectable(handle); | ||
539 | } | ||
540 | EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable); | ||
541 | |||
542 | static acpi_status | ||
543 | check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) | ||
544 | { | ||
545 | int *found = (int *)context; | ||
546 | if (is_ejectable(handle)) { | ||
547 | *found = 1; | ||
548 | return AE_CTRL_TERMINATE; | ||
549 | } | ||
550 | return AE_OK; | ||
551 | } | ||
552 | |||
553 | /** | ||
554 | * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots | ||
555 | * @pbus - PCI bus to scan | ||
556 | * | ||
557 | * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. | ||
558 | */ | ||
559 | int acpi_pci_detect_ejectable(struct pci_bus *pbus) | ||
560 | { | ||
561 | acpi_handle handle; | ||
562 | int found = 0; | ||
563 | |||
564 | if (!(handle = acpi_pci_get_bridge_handle(pbus))) | ||
565 | return 0; | ||
566 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | ||
567 | check_hotplug, (void *)&found, NULL); | ||
568 | return found; | ||
569 | } | ||
570 | EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); | ||
571 | |||
503 | module_param(debug_acpi, bool, 0644); | 572 | module_param(debug_acpi, bool, 0644); |
504 | MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not"); | 573 | MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not"); |
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 9bcb6cbd5aa9..4fc168b70095 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h | |||
@@ -44,7 +44,7 @@ | |||
44 | do { \ | 44 | do { \ |
45 | if (acpiphp_debug) \ | 45 | if (acpiphp_debug) \ |
46 | printk(KERN_DEBUG "%s: " format, \ | 46 | printk(KERN_DEBUG "%s: " format, \ |
47 | MY_NAME , ## arg); \ | 47 | MY_NAME , ## arg); \ |
48 | } while (0) | 48 | } while (0) |
49 | #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) | 49 | #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) |
50 | #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) | 50 | #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 3affc6472e65..f09b1010d477 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/kernel.h> | 46 | #include <linux/kernel.h> |
47 | #include <linux/pci.h> | 47 | #include <linux/pci.h> |
48 | #include <linux/pci_hotplug.h> | 48 | #include <linux/pci_hotplug.h> |
49 | #include <linux/pci-acpi.h> | ||
49 | #include <linux/mutex.h> | 50 | #include <linux/mutex.h> |
50 | 51 | ||
51 | #include "../pci.h" | 52 | #include "../pci.h" |
@@ -62,61 +63,6 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus); | |||
62 | static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus); | 63 | static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus); |
63 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); | 64 | static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); |
64 | 65 | ||
65 | |||
66 | /* | ||
67 | * initialization & terminatation routines | ||
68 | */ | ||
69 | |||
70 | /** | ||
71 | * is_ejectable - determine if a slot is ejectable | ||
72 | * @handle: handle to acpi namespace | ||
73 | * | ||
74 | * Ejectable slot should satisfy at least these conditions: | ||
75 | * | ||
76 | * 1. has _ADR method | ||
77 | * 2. has _EJ0 method | ||
78 | * | ||
79 | * optionally | ||
80 | * | ||
81 | * 1. has _STA method | ||
82 | * 2. has _PS0 method | ||
83 | * 3. has _PS3 method | ||
84 | * 4. .. | ||
85 | */ | ||
86 | static int is_ejectable(acpi_handle handle) | ||
87 | { | ||
88 | acpi_status status; | ||
89 | acpi_handle tmp; | ||
90 | |||
91 | status = acpi_get_handle(handle, "_ADR", &tmp); | ||
92 | if (ACPI_FAILURE(status)) { | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | status = acpi_get_handle(handle, "_EJ0", &tmp); | ||
97 | if (ACPI_FAILURE(status)) { | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | return 1; | ||
102 | } | ||
103 | |||
104 | |||
105 | /* callback routine to check for the existence of ejectable slots */ | ||
106 | static acpi_status | ||
107 | is_ejectable_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | ||
108 | { | ||
109 | int *count = (int *)context; | ||
110 | |||
111 | if (is_ejectable(handle)) { | ||
112 | (*count)++; | ||
113 | /* only one ejectable slot is enough */ | ||
114 | return AE_CTRL_TERMINATE; | ||
115 | } else { | ||
116 | return AE_OK; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | /* callback routine to check for the existence of a pci dock device */ | 66 | /* callback routine to check for the existence of a pci dock device */ |
121 | static acpi_status | 67 | static acpi_status |
122 | is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv) | 68 | is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv) |
@@ -131,9 +77,6 @@ is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
131 | } | 77 | } |
132 | } | 78 | } |
133 | 79 | ||
134 | |||
135 | |||
136 | |||
137 | /* | 80 | /* |
138 | * the _DCK method can do funny things... and sometimes not | 81 | * the _DCK method can do funny things... and sometimes not |
139 | * hah-hah funny. | 82 | * hah-hah funny. |
@@ -160,9 +103,9 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, | |||
160 | 103 | ||
161 | if (((buses >> 8) & 0xff) != bus->secondary) { | 104 | if (((buses >> 8) & 0xff) != bus->secondary) { |
162 | buses = (buses & 0xff000000) | 105 | buses = (buses & 0xff000000) |
163 | | ((unsigned int)(bus->primary) << 0) | 106 | | ((unsigned int)(bus->primary) << 0) |
164 | | ((unsigned int)(bus->secondary) << 8) | 107 | | ((unsigned int)(bus->secondary) << 8) |
165 | | ((unsigned int)(bus->subordinate) << 16); | 108 | | ((unsigned int)(bus->subordinate) << 16); |
166 | pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses); | 109 | pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses); |
167 | } | 110 | } |
168 | return NOTIFY_OK; | 111 | return NOTIFY_OK; |
@@ -184,17 +127,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
184 | acpi_status status = AE_OK; | 127 | acpi_status status = AE_OK; |
185 | unsigned long long adr, sun; | 128 | unsigned long long adr, sun; |
186 | int device, function, retval; | 129 | int device, function, retval; |
130 | struct pci_bus *pbus = bridge->pci_bus; | ||
187 | 131 | ||
188 | status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); | 132 | if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) |
189 | |||
190 | if (ACPI_FAILURE(status)) | ||
191 | return AE_OK; | ||
192 | |||
193 | status = acpi_get_handle(handle, "_EJ0", &tmp); | ||
194 | |||
195 | if (ACPI_FAILURE(status) && !(is_dock_device(handle))) | ||
196 | return AE_OK; | 133 | return AE_OK; |
197 | 134 | ||
135 | acpi_evaluate_integer(handle, "_ADR", NULL, &adr); | ||
198 | device = (adr >> 16) & 0xffff; | 136 | device = (adr >> 16) & 0xffff; |
199 | function = adr & 0xffff; | 137 | function = adr & 0xffff; |
200 | 138 | ||
@@ -205,7 +143,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
205 | INIT_LIST_HEAD(&newfunc->sibling); | 143 | INIT_LIST_HEAD(&newfunc->sibling); |
206 | newfunc->handle = handle; | 144 | newfunc->handle = handle; |
207 | newfunc->function = function; | 145 | newfunc->function = function; |
208 | if (ACPI_SUCCESS(status)) | 146 | |
147 | if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) | ||
209 | newfunc->flags = FUNC_HAS_EJ0; | 148 | newfunc->flags = FUNC_HAS_EJ0; |
210 | 149 | ||
211 | if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp))) | 150 | if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp))) |
@@ -256,8 +195,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
256 | bridge->nr_slots++; | 195 | bridge->nr_slots++; |
257 | 196 | ||
258 | dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", | 197 | dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", |
259 | slot->sun, pci_domain_nr(bridge->pci_bus), | 198 | slot->sun, pci_domain_nr(pbus), pbus->number, device); |
260 | bridge->pci_bus->number, slot->device); | ||
261 | retval = acpiphp_register_hotplug_slot(slot); | 199 | retval = acpiphp_register_hotplug_slot(slot); |
262 | if (retval) { | 200 | if (retval) { |
263 | if (retval == -EBUSY) | 201 | if (retval == -EBUSY) |
@@ -274,8 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
274 | list_add_tail(&newfunc->sibling, &slot->funcs); | 212 | list_add_tail(&newfunc->sibling, &slot->funcs); |
275 | 213 | ||
276 | /* associate corresponding pci_dev */ | 214 | /* associate corresponding pci_dev */ |
277 | newfunc->pci_dev = pci_get_slot(bridge->pci_bus, | 215 | newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function)); |
278 | PCI_DEVFN(device, function)); | ||
279 | if (newfunc->pci_dev) { | 216 | if (newfunc->pci_dev) { |
280 | slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); | 217 | slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); |
281 | } | 218 | } |
@@ -324,27 +261,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
324 | 261 | ||
325 | 262 | ||
326 | /* see if it's worth looking at this bridge */ | 263 | /* see if it's worth looking at this bridge */ |
327 | static int detect_ejectable_slots(acpi_handle *bridge_handle) | 264 | static int detect_ejectable_slots(struct pci_bus *pbus) |
328 | { | 265 | { |
329 | acpi_status status; | 266 | int found = acpi_pci_detect_ejectable(pbus); |
330 | int count; | 267 | if (!found) { |
331 | 268 | acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus); | |
332 | count = 0; | 269 | acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1, |
333 | 270 | is_pci_dock_device, (void *)&found, NULL); | |
334 | /* only check slots defined directly below bridge object */ | 271 | } |
335 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1, | 272 | return found; |
336 | is_ejectable_slot, (void *)&count, NULL); | ||
337 | |||
338 | /* | ||
339 | * we also need to add this bridge if there is a dock bridge or | ||
340 | * other pci device on a dock station (removable) | ||
341 | */ | ||
342 | if (!count) | ||
343 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, | ||
344 | (u32)1, is_pci_dock_device, (void *)&count, | ||
345 | NULL); | ||
346 | |||
347 | return count; | ||
348 | } | 273 | } |
349 | 274 | ||
350 | 275 | ||
@@ -554,7 +479,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
554 | goto out; | 479 | goto out; |
555 | 480 | ||
556 | /* check if this bridge has ejectable slots */ | 481 | /* check if this bridge has ejectable slots */ |
557 | if ((detect_ejectable_slots(handle) > 0)) { | 482 | if ((detect_ejectable_slots(dev->subordinate) > 0)) { |
558 | dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); | 483 | dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); |
559 | add_p2p_bridge(handle, dev); | 484 | add_p2p_bridge(handle, dev); |
560 | } | 485 | } |
@@ -615,7 +540,7 @@ static int add_bridge(acpi_handle handle) | |||
615 | } | 540 | } |
616 | 541 | ||
617 | /* check if this bridge has ejectable slots */ | 542 | /* check if this bridge has ejectable slots */ |
618 | if (detect_ejectable_slots(handle) > 0) { | 543 | if (detect_ejectable_slots(pci_bus) > 0) { |
619 | dbg("found PCI host-bus bridge with hot-pluggable slots\n"); | 544 | dbg("found PCI host-bus bridge with hot-pluggable slots\n"); |
620 | add_host_bridge(handle, pci_bus); | 545 | add_host_bridge(handle, pci_bus); |
621 | } | 546 | } |
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 881fdd2b7313..5befa7e379b7 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c | |||
@@ -271,7 +271,7 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context) | |||
271 | dbg("%s: generationg bus event\n", __func__); | 271 | dbg("%s: generationg bus event\n", __func__); |
272 | acpi_bus_generate_proc_event(note->device, note->event, detail); | 272 | acpi_bus_generate_proc_event(note->device, note->event, detail); |
273 | acpi_bus_generate_netlink_event(note->device->pnp.device_class, | 273 | acpi_bus_generate_netlink_event(note->device->pnp.device_class, |
274 | note->device->dev.bus_id, | 274 | dev_name(¬e->device->dev), |
275 | note->event, detail); | 275 | note->event, detail); |
276 | } else | 276 | } else |
277 | note->event = event; | 277 | note->event = event; |
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 8514c3a1746a..c2e1bcbb28a7 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | #include "cpqphp.h" | 46 | #include "cpqphp.h" |
47 | #include "cpqphp_nvram.h" | 47 | #include "cpqphp_nvram.h" |
48 | #include "../../../arch/x86/pci/pci.h" /* horrible hack showing how processor dependent we are... */ | 48 | #include <asm/pci_x86.h> |
49 | 49 | ||
50 | 50 | ||
51 | /* Global variables */ | 51 | /* Global variables */ |
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index a60a25290995..cc227a8c4b11 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c | |||
@@ -1954,7 +1954,7 @@ void cpqhp_pushbutton_thread(unsigned long slot) | |||
1954 | return ; | 1954 | return ; |
1955 | } | 1955 | } |
1956 | 1956 | ||
1957 | if (func != NULL && ctrl != NULL) { | 1957 | if (ctrl != NULL) { |
1958 | if (cpqhp_process_SI(ctrl, func) != 0) { | 1958 | if (cpqhp_process_SI(ctrl, func) != 0) { |
1959 | amber_LED_on(ctrl, hp_slot); | 1959 | amber_LED_on(ctrl, hp_slot); |
1960 | green_LED_off(ctrl, hp_slot); | 1960 | green_LED_off(ctrl, hp_slot); |
@@ -2604,7 +2604,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func | |||
2604 | for (cloop = 0; cloop < 4; cloop++) { | 2604 | for (cloop = 0; cloop < 4; cloop++) { |
2605 | if (irqs.valid_INT & (0x01 << cloop)) { | 2605 | if (irqs.valid_INT & (0x01 << cloop)) { |
2606 | rc = cpqhp_set_irq(func->bus, func->device, | 2606 | rc = cpqhp_set_irq(func->bus, func->device, |
2607 | 0x0A + cloop, irqs.interrupt[cloop]); | 2607 | cloop + 1, irqs.interrupt[cloop]); |
2608 | if (rc) | 2608 | if (rc) |
2609 | goto free_and_out; | 2609 | goto free_and_out; |
2610 | } | 2610 | } |
@@ -2945,7 +2945,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func | |||
2945 | } | 2945 | } |
2946 | 2946 | ||
2947 | if (!behind_bridge) { | 2947 | if (!behind_bridge) { |
2948 | rc = cpqhp_set_irq(func->bus, func->device, temp_byte + 0x09, IRQ); | 2948 | rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ); |
2949 | if (rc) | 2949 | if (rc) |
2950 | return 1; | 2950 | return 1; |
2951 | } else { | 2951 | } else { |
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 09021930589f..6c0ed0fcb8ee 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include "../pci.h" | 37 | #include "../pci.h" |
38 | #include "cpqphp.h" | 38 | #include "cpqphp.h" |
39 | #include "cpqphp_nvram.h" | 39 | #include "cpqphp_nvram.h" |
40 | #include "../../../arch/x86/pci/pci.h" /* horrible hack showing how processor dependent we are... */ | 40 | #include <asm/pci_x86.h> |
41 | 41 | ||
42 | 42 | ||
43 | u8 cpqhp_nic_irq; | 43 | u8 cpqhp_nic_irq; |
@@ -171,7 +171,7 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num) | |||
171 | fakebus->number = bus_num; | 171 | fakebus->number = bus_num; |
172 | dbg("%s: dev %d, bus %d, pin %d, num %d\n", | 172 | dbg("%s: dev %d, bus %d, pin %d, num %d\n", |
173 | __func__, dev_num, bus_num, int_pin, irq_num); | 173 | __func__, dev_num, bus_num, int_pin, irq_num); |
174 | rc = pcibios_set_irq_routing(fakedev, int_pin - 0x0a, irq_num); | 174 | rc = pcibios_set_irq_routing(fakedev, int_pin - 1, irq_num); |
175 | kfree(fakedev); | 175 | kfree(fakedev); |
176 | kfree(fakebus); | 176 | kfree(fakebus); |
177 | dbg("%s: rc %d\n", __func__, rc); | 177 | dbg("%s: rc %d\n", __func__, rc); |
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c index 3a2637a00934..b0e7de9e536d 100644 --- a/drivers/pci/hotplug/fakephp.c +++ b/drivers/pci/hotplug/fakephp.c | |||
@@ -324,6 +324,7 @@ static int disable_slot(struct hotplug_slot *slot) | |||
324 | 324 | ||
325 | if (test_and_set_bit(0, &dslot->removed)) { | 325 | if (test_and_set_bit(0, &dslot->removed)) { |
326 | dbg("Slot already scheduled for removal\n"); | 326 | dbg("Slot already scheduled for removal\n"); |
327 | pci_dev_put(dev); | ||
327 | return -ENODEV; | 328 | return -ENODEV; |
328 | } | 329 | } |
329 | 330 | ||
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 633e743442ac..dd18f857dfb0 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/wait.h> | 36 | #include <linux/wait.h> |
37 | #include "../pci.h" | 37 | #include "../pci.h" |
38 | #include "../../../arch/x86/pci/pci.h" /* for struct irq_routing_table */ | 38 | #include <asm/pci_x86.h> /* for struct irq_routing_table */ |
39 | #include "ibmphp.h" | 39 | #include "ibmphp.h" |
40 | 40 | ||
41 | #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) | 41 | #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 7072952ea1d2..db85284ffb62 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -219,11 +219,23 @@ struct hpc_ops { | |||
219 | #include <acpi/acpi_bus.h> | 219 | #include <acpi/acpi_bus.h> |
220 | #include <linux/pci-acpi.h> | 220 | #include <linux/pci-acpi.h> |
221 | 221 | ||
222 | extern void __init pciehp_acpi_slot_detection_init(void); | ||
223 | extern int pciehp_acpi_slot_detection_check(struct pci_dev *dev); | ||
224 | |||
225 | static inline void pciehp_firmware_init(void) | ||
226 | { | ||
227 | pciehp_acpi_slot_detection_init(); | ||
228 | } | ||
229 | |||
222 | static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) | 230 | static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) |
223 | { | 231 | { |
232 | int retval; | ||
224 | u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | | 233 | u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | |
225 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | 234 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); |
226 | return acpi_get_hp_hw_control_from_firmware(dev, flags); | 235 | retval = acpi_get_hp_hw_control_from_firmware(dev, flags); |
236 | if (retval) | ||
237 | return retval; | ||
238 | return pciehp_acpi_slot_detection_check(dev); | ||
227 | } | 239 | } |
228 | 240 | ||
229 | static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, | 241 | static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, |
@@ -234,6 +246,7 @@ static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, | |||
234 | return 0; | 246 | return 0; |
235 | } | 247 | } |
236 | #else | 248 | #else |
249 | #define pciehp_firmware_init() do {} while (0) | ||
237 | #define pciehp_get_hp_hw_control_from_firmware(dev) 0 | 250 | #define pciehp_get_hp_hw_control_from_firmware(dev) 0 |
238 | #define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV) | 251 | #define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV) |
239 | #endif /* CONFIG_ACPI */ | 252 | #endif /* CONFIG_ACPI */ |
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c new file mode 100644 index 000000000000..438d795f9fe3 --- /dev/null +++ b/drivers/pci/hotplug/pciehp_acpi.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * ACPI related functions for PCI Express Hot Plug driver. | ||
3 | * | ||
4 | * Copyright (C) 2008 Kenji Kaneshige | ||
5 | * Copyright (C) 2008 Fujitsu Limited. | ||
6 | * | ||
7 | * All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or (at | ||
12 | * your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
17 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
18 | * details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/acpi.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/pci_hotplug.h> | ||
29 | #include "pciehp.h" | ||
30 | |||
31 | #define PCIEHP_DETECT_PCIE (0) | ||
32 | #define PCIEHP_DETECT_ACPI (1) | ||
33 | #define PCIEHP_DETECT_AUTO (2) | ||
34 | #define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO | ||
35 | |||
36 | static int slot_detection_mode; | ||
37 | static char *pciehp_detect_mode; | ||
38 | module_param(pciehp_detect_mode, charp, 0444); | ||
39 | MODULE_PARM_DESC(pciehp_detect_mode, | ||
40 | "Slot detection mode: pcie, acpi, auto\n" | ||
41 | " pcie - Use PCIe based slot detection\n" | ||
42 | " acpi - Use ACPI for slot detection\n" | ||
43 | " auto(default) - Auto select mode. Use acpi option if duplicate\n" | ||
44 | " slot ids are found. Otherwise, use pcie option\n"); | ||
45 | |||
46 | int pciehp_acpi_slot_detection_check(struct pci_dev *dev) | ||
47 | { | ||
48 | if (slot_detection_mode != PCIEHP_DETECT_ACPI) | ||
49 | return 0; | ||
50 | if (acpi_pci_detect_ejectable(dev->subordinate)) | ||
51 | return 0; | ||
52 | return -ENODEV; | ||
53 | } | ||
54 | |||
55 | static int __init parse_detect_mode(void) | ||
56 | { | ||
57 | if (!pciehp_detect_mode) | ||
58 | return PCIEHP_DETECT_DEFAULT; | ||
59 | if (!strcmp(pciehp_detect_mode, "pcie")) | ||
60 | return PCIEHP_DETECT_PCIE; | ||
61 | if (!strcmp(pciehp_detect_mode, "acpi")) | ||
62 | return PCIEHP_DETECT_ACPI; | ||
63 | if (!strcmp(pciehp_detect_mode, "auto")) | ||
64 | return PCIEHP_DETECT_AUTO; | ||
65 | warn("bad specifier '%s' for pciehp_detect_mode. Use default\n", | ||
66 | pciehp_detect_mode); | ||
67 | return PCIEHP_DETECT_DEFAULT; | ||
68 | } | ||
69 | |||
70 | static struct pcie_port_service_id __initdata port_pci_ids[] = { | ||
71 | { | ||
72 | .vendor = PCI_ANY_ID, | ||
73 | .device = PCI_ANY_ID, | ||
74 | .port_type = PCIE_ANY_PORT, | ||
75 | .service_type = PCIE_PORT_SERVICE_HP, | ||
76 | .driver_data = 0, | ||
77 | }, { /* end: all zeroes */ } | ||
78 | }; | ||
79 | |||
80 | static int __initdata dup_slot_id; | ||
81 | static int __initdata acpi_slot_detected; | ||
82 | static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); | ||
83 | |||
84 | /* Dummy driver for dumplicate name detection */ | ||
85 | static int __init dummy_probe(struct pcie_device *dev, | ||
86 | const struct pcie_port_service_id *id) | ||
87 | { | ||
88 | int pos; | ||
89 | u32 slot_cap; | ||
90 | struct slot *slot, *tmp; | ||
91 | struct pci_dev *pdev = dev->port; | ||
92 | struct pci_bus *pbus = pdev->subordinate; | ||
93 | if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL))) | ||
94 | return -ENOMEM; | ||
95 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ | ||
96 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) | ||
97 | return -ENODEV; | ||
98 | if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP))) | ||
99 | return -ENODEV; | ||
100 | pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); | ||
101 | slot->number = slot_cap >> 19; | ||
102 | list_for_each_entry(tmp, &dummy_slots, slot_list) { | ||
103 | if (tmp->number == slot->number) | ||
104 | dup_slot_id++; | ||
105 | } | ||
106 | list_add_tail(&slot->slot_list, &dummy_slots); | ||
107 | if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus)) | ||
108 | acpi_slot_detected = 1; | ||
109 | return -ENODEV; /* dummy driver always returns error */ | ||
110 | } | ||
111 | |||
112 | static struct pcie_port_service_driver __initdata dummy_driver = { | ||
113 | .name = "pciehp_dummy", | ||
114 | .id_table = port_pci_ids, | ||
115 | .probe = dummy_probe, | ||
116 | }; | ||
117 | |||
118 | static int __init select_detection_mode(void) | ||
119 | { | ||
120 | struct slot *slot, *tmp; | ||
121 | pcie_port_service_register(&dummy_driver); | ||
122 | pcie_port_service_unregister(&dummy_driver); | ||
123 | list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) { | ||
124 | list_del(&slot->slot_list); | ||
125 | kfree(slot); | ||
126 | } | ||
127 | if (acpi_slot_detected && dup_slot_id) | ||
128 | return PCIEHP_DETECT_ACPI; | ||
129 | return PCIEHP_DETECT_PCIE; | ||
130 | } | ||
131 | |||
132 | void __init pciehp_acpi_slot_detection_init(void) | ||
133 | { | ||
134 | slot_detection_mode = parse_detect_mode(); | ||
135 | if (slot_detection_mode != PCIEHP_DETECT_AUTO) | ||
136 | goto out; | ||
137 | slot_detection_mode = select_detection_mode(); | ||
138 | out: | ||
139 | if (slot_detection_mode == PCIEHP_DETECT_ACPI) | ||
140 | info("Using ACPI for slot detection.\n"); | ||
141 | } | ||
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 39cf248d24e3..5482d4ed8256 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -522,6 +522,7 @@ static int __init pcied_init(void) | |||
522 | { | 522 | { |
523 | int retval = 0; | 523 | int retval = 0; |
524 | 524 | ||
525 | pciehp_firmware_init(); | ||
525 | retval = pcie_port_service_register(&hpdriver_portdrv); | 526 | retval = pcie_port_service_register(&hpdriver_portdrv); |
526 | dbg("pcie_port_service_register = %d\n", retval); | 527 | dbg("pcie_port_service_register = %d\n", retval); |
527 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | 528 | info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index fead63c6b49e..ff4034502d24 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -178,15 +178,14 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) | |||
178 | "Issue of Slot Power Off command failed\n"); | 178 | "Issue of Slot Power Off command failed\n"); |
179 | return; | 179 | return; |
180 | } | 180 | } |
181 | /* | ||
182 | * After turning power off, we must wait for at least 1 second | ||
183 | * before taking any action that relies on power having been | ||
184 | * removed from the slot/adapter. | ||
185 | */ | ||
186 | msleep(1000); | ||
181 | } | 187 | } |
182 | 188 | ||
183 | /* | ||
184 | * After turning power off, we must wait for at least 1 second | ||
185 | * before taking any action that relies on power having been | ||
186 | * removed from the slot/adapter. | ||
187 | */ | ||
188 | msleep(1000); | ||
189 | |||
190 | if (PWR_LED(ctrl)) | 189 | if (PWR_LED(ctrl)) |
191 | pslot->hpc_ops->green_led_off(pslot); | 190 | pslot->hpc_ops->green_led_off(pslot); |
192 | 191 | ||
@@ -286,15 +285,14 @@ static int remove_board(struct slot *p_slot) | |||
286 | "Issue of Slot Disable command failed\n"); | 285 | "Issue of Slot Disable command failed\n"); |
287 | return retval; | 286 | return retval; |
288 | } | 287 | } |
288 | /* | ||
289 | * After turning power off, we must wait for at least 1 second | ||
290 | * before taking any action that relies on power having been | ||
291 | * removed from the slot/adapter. | ||
292 | */ | ||
293 | msleep(1000); | ||
289 | } | 294 | } |
290 | 295 | ||
291 | /* | ||
292 | * After turning power off, we must wait for at least 1 second | ||
293 | * before taking any action that relies on power having been | ||
294 | * removed from the slot/adapter. | ||
295 | */ | ||
296 | msleep(1000); | ||
297 | |||
298 | if (PWR_LED(ctrl)) | 296 | if (PWR_LED(ctrl)) |
299 | /* turn off Green LED */ | 297 | /* turn off Green LED */ |
300 | p_slot->hpc_ops->green_led_off(p_slot); | 298 | p_slot->hpc_ops->green_led_off(p_slot); |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index b643ca13e4f1..71a8012886b0 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -42,42 +42,6 @@ | |||
42 | 42 | ||
43 | static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); | 43 | static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); |
44 | 44 | ||
45 | struct ctrl_reg { | ||
46 | u8 cap_id; | ||
47 | u8 nxt_ptr; | ||
48 | u16 cap_reg; | ||
49 | u32 dev_cap; | ||
50 | u16 dev_ctrl; | ||
51 | u16 dev_status; | ||
52 | u32 lnk_cap; | ||
53 | u16 lnk_ctrl; | ||
54 | u16 lnk_status; | ||
55 | u32 slot_cap; | ||
56 | u16 slot_ctrl; | ||
57 | u16 slot_status; | ||
58 | u16 root_ctrl; | ||
59 | u16 rsvp; | ||
60 | u32 root_status; | ||
61 | } __attribute__ ((packed)); | ||
62 | |||
63 | /* offsets to the controller registers based on the above structure layout */ | ||
64 | enum ctrl_offsets { | ||
65 | PCIECAPID = offsetof(struct ctrl_reg, cap_id), | ||
66 | NXTCAPPTR = offsetof(struct ctrl_reg, nxt_ptr), | ||
67 | CAPREG = offsetof(struct ctrl_reg, cap_reg), | ||
68 | DEVCAP = offsetof(struct ctrl_reg, dev_cap), | ||
69 | DEVCTRL = offsetof(struct ctrl_reg, dev_ctrl), | ||
70 | DEVSTATUS = offsetof(struct ctrl_reg, dev_status), | ||
71 | LNKCAP = offsetof(struct ctrl_reg, lnk_cap), | ||
72 | LNKCTRL = offsetof(struct ctrl_reg, lnk_ctrl), | ||
73 | LNKSTATUS = offsetof(struct ctrl_reg, lnk_status), | ||
74 | SLOTCAP = offsetof(struct ctrl_reg, slot_cap), | ||
75 | SLOTCTRL = offsetof(struct ctrl_reg, slot_ctrl), | ||
76 | SLOTSTATUS = offsetof(struct ctrl_reg, slot_status), | ||
77 | ROOTCTRL = offsetof(struct ctrl_reg, root_ctrl), | ||
78 | ROOTSTATUS = offsetof(struct ctrl_reg, root_status), | ||
79 | }; | ||
80 | |||
81 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) | 45 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) |
82 | { | 46 | { |
83 | struct pci_dev *dev = ctrl->pci_dev; | 47 | struct pci_dev *dev = ctrl->pci_dev; |
@@ -102,95 +66,9 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) | |||
102 | return pci_write_config_dword(dev, ctrl->cap_base + reg, value); | 66 | return pci_write_config_dword(dev, ctrl->cap_base + reg, value); |
103 | } | 67 | } |
104 | 68 | ||
105 | /* Field definitions in PCI Express Capabilities Register */ | ||
106 | #define CAP_VER 0x000F | ||
107 | #define DEV_PORT_TYPE 0x00F0 | ||
108 | #define SLOT_IMPL 0x0100 | ||
109 | #define MSG_NUM 0x3E00 | ||
110 | |||
111 | /* Device or Port Type */ | ||
112 | #define NAT_ENDPT 0x00 | ||
113 | #define LEG_ENDPT 0x01 | ||
114 | #define ROOT_PORT 0x04 | ||
115 | #define UP_STREAM 0x05 | ||
116 | #define DN_STREAM 0x06 | ||
117 | #define PCIE_PCI_BRDG 0x07 | ||
118 | #define PCI_PCIE_BRDG 0x10 | ||
119 | |||
120 | /* Field definitions in Device Capabilities Register */ | ||
121 | #define DATTN_BUTTN_PRSN 0x1000 | ||
122 | #define DATTN_LED_PRSN 0x2000 | ||
123 | #define DPWR_LED_PRSN 0x4000 | ||
124 | |||
125 | /* Field definitions in Link Capabilities Register */ | ||
126 | #define MAX_LNK_SPEED 0x000F | ||
127 | #define MAX_LNK_WIDTH 0x03F0 | ||
128 | #define LINK_ACTIVE_REPORTING 0x00100000 | ||
129 | |||
130 | /* Link Width Encoding */ | ||
131 | #define LNK_X1 0x01 | ||
132 | #define LNK_X2 0x02 | ||
133 | #define LNK_X4 0x04 | ||
134 | #define LNK_X8 0x08 | ||
135 | #define LNK_X12 0x0C | ||
136 | #define LNK_X16 0x10 | ||
137 | #define LNK_X32 0x20 | ||
138 | |||
139 | /*Field definitions of Link Status Register */ | ||
140 | #define LNK_SPEED 0x000F | ||
141 | #define NEG_LINK_WD 0x03F0 | ||
142 | #define LNK_TRN_ERR 0x0400 | ||
143 | #define LNK_TRN 0x0800 | ||
144 | #define SLOT_CLK_CONF 0x1000 | ||
145 | #define LINK_ACTIVE 0x2000 | ||
146 | |||
147 | /* Field definitions in Slot Capabilities Register */ | ||
148 | #define ATTN_BUTTN_PRSN 0x00000001 | ||
149 | #define PWR_CTRL_PRSN 0x00000002 | ||
150 | #define MRL_SENS_PRSN 0x00000004 | ||
151 | #define ATTN_LED_PRSN 0x00000008 | ||
152 | #define PWR_LED_PRSN 0x00000010 | ||
153 | #define HP_SUPR_RM_SUP 0x00000020 | ||
154 | #define HP_CAP 0x00000040 | ||
155 | #define SLOT_PWR_VALUE 0x000003F8 | ||
156 | #define SLOT_PWR_LIMIT 0x00000C00 | ||
157 | #define PSN 0xFFF80000 /* PSN: Physical Slot Number */ | ||
158 | |||
159 | /* Field definitions in Slot Control Register */ | ||
160 | #define ATTN_BUTTN_ENABLE 0x0001 | ||
161 | #define PWR_FAULT_DETECT_ENABLE 0x0002 | ||
162 | #define MRL_DETECT_ENABLE 0x0004 | ||
163 | #define PRSN_DETECT_ENABLE 0x0008 | ||
164 | #define CMD_CMPL_INTR_ENABLE 0x0010 | ||
165 | #define HP_INTR_ENABLE 0x0020 | ||
166 | #define ATTN_LED_CTRL 0x00C0 | ||
167 | #define PWR_LED_CTRL 0x0300 | ||
168 | #define PWR_CTRL 0x0400 | ||
169 | #define EMI_CTRL 0x0800 | ||
170 | |||
171 | /* Attention indicator and Power indicator states */ | ||
172 | #define LED_ON 0x01 | ||
173 | #define LED_BLINK 0x10 | ||
174 | #define LED_OFF 0x11 | ||
175 | |||
176 | /* Power Control Command */ | 69 | /* Power Control Command */ |
177 | #define POWER_ON 0 | 70 | #define POWER_ON 0 |
178 | #define POWER_OFF 0x0400 | 71 | #define POWER_OFF PCI_EXP_SLTCTL_PCC |
179 | |||
180 | /* EMI Status defines */ | ||
181 | #define EMI_DISENGAGED 0 | ||
182 | #define EMI_ENGAGED 1 | ||
183 | |||
184 | /* Field definitions in Slot Status Register */ | ||
185 | #define ATTN_BUTTN_PRESSED 0x0001 | ||
186 | #define PWR_FAULT_DETECTED 0x0002 | ||
187 | #define MRL_SENS_CHANGED 0x0004 | ||
188 | #define PRSN_DETECT_CHANGED 0x0008 | ||
189 | #define CMD_COMPLETED 0x0010 | ||
190 | #define MRL_STATE 0x0020 | ||
191 | #define PRSN_STATE 0x0040 | ||
192 | #define EMI_STATE 0x0080 | ||
193 | #define EMI_STATUS_BIT 7 | ||
194 | 72 | ||
195 | static irqreturn_t pcie_isr(int irq, void *dev_id); | 73 | static irqreturn_t pcie_isr(int irq, void *dev_id); |
196 | static void start_int_poll_timer(struct controller *ctrl, int sec); | 74 | static void start_int_poll_timer(struct controller *ctrl, int sec); |
@@ -253,22 +131,20 @@ static inline void pciehp_free_irq(struct controller *ctrl) | |||
253 | static int pcie_poll_cmd(struct controller *ctrl) | 131 | static int pcie_poll_cmd(struct controller *ctrl) |
254 | { | 132 | { |
255 | u16 slot_status; | 133 | u16 slot_status; |
256 | int timeout = 1000; | 134 | int err, timeout = 1000; |
257 | 135 | ||
258 | if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { | 136 | err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); |
259 | if (slot_status & CMD_COMPLETED) { | 137 | if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { |
260 | pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); | 138 | pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); |
261 | return 1; | 139 | return 1; |
262 | } | ||
263 | } | 140 | } |
264 | while (timeout > 0) { | 141 | while (timeout > 0) { |
265 | msleep(10); | 142 | msleep(10); |
266 | timeout -= 10; | 143 | timeout -= 10; |
267 | if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { | 144 | err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); |
268 | if (slot_status & CMD_COMPLETED) { | 145 | if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { |
269 | pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); | 146 | pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); |
270 | return 1; | 147 | return 1; |
271 | } | ||
272 | } | 148 | } |
273 | } | 149 | } |
274 | return 0; /* timeout */ | 150 | return 0; /* timeout */ |
@@ -302,14 +178,14 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
302 | 178 | ||
303 | mutex_lock(&ctrl->ctrl_lock); | 179 | mutex_lock(&ctrl->ctrl_lock); |
304 | 180 | ||
305 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 181 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); |
306 | if (retval) { | 182 | if (retval) { |
307 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", | 183 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", |
308 | __func__); | 184 | __func__); |
309 | goto out; | 185 | goto out; |
310 | } | 186 | } |
311 | 187 | ||
312 | if (slot_status & CMD_COMPLETED) { | 188 | if (slot_status & PCI_EXP_SLTSTA_CC) { |
313 | if (!ctrl->no_cmd_complete) { | 189 | if (!ctrl->no_cmd_complete) { |
314 | /* | 190 | /* |
315 | * After 1 sec and CMD_COMPLETED still not set, just | 191 | * After 1 sec and CMD_COMPLETED still not set, just |
@@ -332,7 +208,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
332 | } | 208 | } |
333 | } | 209 | } |
334 | 210 | ||
335 | retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); | 211 | retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); |
336 | if (retval) { | 212 | if (retval) { |
337 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); | 213 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); |
338 | goto out; | 214 | goto out; |
@@ -342,7 +218,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
342 | slot_ctrl |= (cmd & mask); | 218 | slot_ctrl |= (cmd & mask); |
343 | ctrl->cmd_busy = 1; | 219 | ctrl->cmd_busy = 1; |
344 | smp_mb(); | 220 | smp_mb(); |
345 | retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); | 221 | retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl); |
346 | if (retval) | 222 | if (retval) |
347 | ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n"); | 223 | ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n"); |
348 | 224 | ||
@@ -356,8 +232,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
356 | * completed interrupt is not enabled, we need to poll | 232 | * completed interrupt is not enabled, we need to poll |
357 | * command completed event. | 233 | * command completed event. |
358 | */ | 234 | */ |
359 | if (!(slot_ctrl & HP_INTR_ENABLE) || | 235 | if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) || |
360 | !(slot_ctrl & CMD_CMPL_INTR_ENABLE)) | 236 | !(slot_ctrl & PCI_EXP_SLTCTL_CCIE)) |
361 | poll = 1; | 237 | poll = 1; |
362 | pcie_wait_cmd(ctrl, poll); | 238 | pcie_wait_cmd(ctrl, poll); |
363 | } | 239 | } |
@@ -370,9 +246,9 @@ static inline int check_link_active(struct controller *ctrl) | |||
370 | { | 246 | { |
371 | u16 link_status; | 247 | u16 link_status; |
372 | 248 | ||
373 | if (pciehp_readw(ctrl, LNKSTATUS, &link_status)) | 249 | if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &link_status)) |
374 | return 0; | 250 | return 0; |
375 | return !!(link_status & LINK_ACTIVE); | 251 | return !!(link_status & PCI_EXP_LNKSTA_DLLLA); |
376 | } | 252 | } |
377 | 253 | ||
378 | static void pcie_wait_link_active(struct controller *ctrl) | 254 | static void pcie_wait_link_active(struct controller *ctrl) |
@@ -412,15 +288,15 @@ static int hpc_check_lnk_status(struct controller *ctrl) | |||
412 | } else | 288 | } else |
413 | msleep(1000); | 289 | msleep(1000); |
414 | 290 | ||
415 | retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); | 291 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); |
416 | if (retval) { | 292 | if (retval) { |
417 | ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); | 293 | ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); |
418 | return retval; | 294 | return retval; |
419 | } | 295 | } |
420 | 296 | ||
421 | ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); | 297 | ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); |
422 | if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || | 298 | if ((lnk_status & PCI_EXP_LNKSTA_LT) || |
423 | !(lnk_status & NEG_LINK_WD)) { | 299 | !(lnk_status & PCI_EXP_LNKSTA_NLW)) { |
424 | ctrl_err(ctrl, "Link Training Error occurs \n"); | 300 | ctrl_err(ctrl, "Link Training Error occurs \n"); |
425 | retval = -1; | 301 | retval = -1; |
426 | return retval; | 302 | return retval; |
@@ -436,16 +312,16 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status) | |||
436 | u8 atten_led_state; | 312 | u8 atten_led_state; |
437 | int retval = 0; | 313 | int retval = 0; |
438 | 314 | ||
439 | retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); | 315 | retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); |
440 | if (retval) { | 316 | if (retval) { |
441 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); | 317 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); |
442 | return retval; | 318 | return retval; |
443 | } | 319 | } |
444 | 320 | ||
445 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", | 321 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", |
446 | __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); | 322 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); |
447 | 323 | ||
448 | atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; | 324 | atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6; |
449 | 325 | ||
450 | switch (atten_led_state) { | 326 | switch (atten_led_state) { |
451 | case 0: | 327 | case 0: |
@@ -475,15 +351,15 @@ static int hpc_get_power_status(struct slot *slot, u8 *status) | |||
475 | u8 pwr_state; | 351 | u8 pwr_state; |
476 | int retval = 0; | 352 | int retval = 0; |
477 | 353 | ||
478 | retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); | 354 | retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); |
479 | if (retval) { | 355 | if (retval) { |
480 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); | 356 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); |
481 | return retval; | 357 | return retval; |
482 | } | 358 | } |
483 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", | 359 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", |
484 | __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); | 360 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); |
485 | 361 | ||
486 | pwr_state = (slot_ctrl & PWR_CTRL) >> 10; | 362 | pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10; |
487 | 363 | ||
488 | switch (pwr_state) { | 364 | switch (pwr_state) { |
489 | case 0: | 365 | case 0: |
@@ -504,17 +380,15 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status) | |||
504 | { | 380 | { |
505 | struct controller *ctrl = slot->ctrl; | 381 | struct controller *ctrl = slot->ctrl; |
506 | u16 slot_status; | 382 | u16 slot_status; |
507 | int retval = 0; | 383 | int retval; |
508 | 384 | ||
509 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 385 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); |
510 | if (retval) { | 386 | if (retval) { |
511 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", | 387 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", |
512 | __func__); | 388 | __func__); |
513 | return retval; | 389 | return retval; |
514 | } | 390 | } |
515 | 391 | *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); | |
516 | *status = (((slot_status & MRL_STATE) >> 5) == 0) ? 0 : 1; | ||
517 | |||
518 | return 0; | 392 | return 0; |
519 | } | 393 | } |
520 | 394 | ||
@@ -522,18 +396,15 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status) | |||
522 | { | 396 | { |
523 | struct controller *ctrl = slot->ctrl; | 397 | struct controller *ctrl = slot->ctrl; |
524 | u16 slot_status; | 398 | u16 slot_status; |
525 | u8 card_state; | 399 | int retval; |
526 | int retval = 0; | ||
527 | 400 | ||
528 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 401 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); |
529 | if (retval) { | 402 | if (retval) { |
530 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", | 403 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", |
531 | __func__); | 404 | __func__); |
532 | return retval; | 405 | return retval; |
533 | } | 406 | } |
534 | card_state = (u8)((slot_status & PRSN_STATE) >> 6); | 407 | *status = !!(slot_status & PCI_EXP_SLTSTA_PDS); |
535 | *status = (card_state == 1) ? 1 : 0; | ||
536 | |||
537 | return 0; | 408 | return 0; |
538 | } | 409 | } |
539 | 410 | ||
@@ -541,32 +412,28 @@ static int hpc_query_power_fault(struct slot *slot) | |||
541 | { | 412 | { |
542 | struct controller *ctrl = slot->ctrl; | 413 | struct controller *ctrl = slot->ctrl; |
543 | u16 slot_status; | 414 | u16 slot_status; |
544 | u8 pwr_fault; | 415 | int retval; |
545 | int retval = 0; | ||
546 | 416 | ||
547 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 417 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); |
548 | if (retval) { | 418 | if (retval) { |
549 | ctrl_err(ctrl, "Cannot check for power fault\n"); | 419 | ctrl_err(ctrl, "Cannot check for power fault\n"); |
550 | return retval; | 420 | return retval; |
551 | } | 421 | } |
552 | pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); | 422 | return !!(slot_status & PCI_EXP_SLTSTA_PFD); |
553 | |||
554 | return pwr_fault; | ||
555 | } | 423 | } |
556 | 424 | ||
557 | static int hpc_get_emi_status(struct slot *slot, u8 *status) | 425 | static int hpc_get_emi_status(struct slot *slot, u8 *status) |
558 | { | 426 | { |
559 | struct controller *ctrl = slot->ctrl; | 427 | struct controller *ctrl = slot->ctrl; |
560 | u16 slot_status; | 428 | u16 slot_status; |
561 | int retval = 0; | 429 | int retval; |
562 | 430 | ||
563 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 431 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); |
564 | if (retval) { | 432 | if (retval) { |
565 | ctrl_err(ctrl, "Cannot check EMI status\n"); | 433 | ctrl_err(ctrl, "Cannot check EMI status\n"); |
566 | return retval; | 434 | return retval; |
567 | } | 435 | } |
568 | *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; | 436 | *status = !!(slot_status & PCI_EXP_SLTSTA_EIS); |
569 | |||
570 | return retval; | 437 | return retval; |
571 | } | 438 | } |
572 | 439 | ||
@@ -576,8 +443,8 @@ static int hpc_toggle_emi(struct slot *slot) | |||
576 | u16 cmd_mask; | 443 | u16 cmd_mask; |
577 | int rc; | 444 | int rc; |
578 | 445 | ||
579 | slot_cmd = EMI_CTRL; | 446 | slot_cmd = PCI_EXP_SLTCTL_EIC; |
580 | cmd_mask = EMI_CTRL; | 447 | cmd_mask = PCI_EXP_SLTCTL_EIC; |
581 | rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask); | 448 | rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask); |
582 | slot->last_emi_toggle = get_seconds(); | 449 | slot->last_emi_toggle = get_seconds(); |
583 | 450 | ||
@@ -591,7 +458,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) | |||
591 | u16 cmd_mask; | 458 | u16 cmd_mask; |
592 | int rc; | 459 | int rc; |
593 | 460 | ||
594 | cmd_mask = ATTN_LED_CTRL; | 461 | cmd_mask = PCI_EXP_SLTCTL_AIC; |
595 | switch (value) { | 462 | switch (value) { |
596 | case 0 : /* turn off */ | 463 | case 0 : /* turn off */ |
597 | slot_cmd = 0x00C0; | 464 | slot_cmd = 0x00C0; |
@@ -607,7 +474,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) | |||
607 | } | 474 | } |
608 | rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 475 | rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
609 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 476 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
610 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 477 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); |
611 | 478 | ||
612 | return rc; | 479 | return rc; |
613 | } | 480 | } |
@@ -619,10 +486,10 @@ static void hpc_set_green_led_on(struct slot *slot) | |||
619 | u16 cmd_mask; | 486 | u16 cmd_mask; |
620 | 487 | ||
621 | slot_cmd = 0x0100; | 488 | slot_cmd = 0x0100; |
622 | cmd_mask = PWR_LED_CTRL; | 489 | cmd_mask = PCI_EXP_SLTCTL_PIC; |
623 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 490 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
624 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 491 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
625 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 492 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); |
626 | } | 493 | } |
627 | 494 | ||
628 | static void hpc_set_green_led_off(struct slot *slot) | 495 | static void hpc_set_green_led_off(struct slot *slot) |
@@ -632,10 +499,10 @@ static void hpc_set_green_led_off(struct slot *slot) | |||
632 | u16 cmd_mask; | 499 | u16 cmd_mask; |
633 | 500 | ||
634 | slot_cmd = 0x0300; | 501 | slot_cmd = 0x0300; |
635 | cmd_mask = PWR_LED_CTRL; | 502 | cmd_mask = PCI_EXP_SLTCTL_PIC; |
636 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 503 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
637 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 504 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
638 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 505 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); |
639 | } | 506 | } |
640 | 507 | ||
641 | static void hpc_set_green_led_blink(struct slot *slot) | 508 | static void hpc_set_green_led_blink(struct slot *slot) |
@@ -645,10 +512,10 @@ static void hpc_set_green_led_blink(struct slot *slot) | |||
645 | u16 cmd_mask; | 512 | u16 cmd_mask; |
646 | 513 | ||
647 | slot_cmd = 0x0200; | 514 | slot_cmd = 0x0200; |
648 | cmd_mask = PWR_LED_CTRL; | 515 | cmd_mask = PCI_EXP_SLTCTL_PIC; |
649 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 516 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
650 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 517 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
651 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 518 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); |
652 | } | 519 | } |
653 | 520 | ||
654 | static int hpc_power_on_slot(struct slot * slot) | 521 | static int hpc_power_on_slot(struct slot * slot) |
@@ -662,15 +529,15 @@ static int hpc_power_on_slot(struct slot * slot) | |||
662 | ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); | 529 | ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); |
663 | 530 | ||
664 | /* Clear sticky power-fault bit from previous power failures */ | 531 | /* Clear sticky power-fault bit from previous power failures */ |
665 | retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); | 532 | retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); |
666 | if (retval) { | 533 | if (retval) { |
667 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", | 534 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", |
668 | __func__); | 535 | __func__); |
669 | return retval; | 536 | return retval; |
670 | } | 537 | } |
671 | slot_status &= PWR_FAULT_DETECTED; | 538 | slot_status &= PCI_EXP_SLTSTA_PFD; |
672 | if (slot_status) { | 539 | if (slot_status) { |
673 | retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status); | 540 | retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status); |
674 | if (retval) { | 541 | if (retval) { |
675 | ctrl_err(ctrl, | 542 | ctrl_err(ctrl, |
676 | "%s: Cannot write to SLOTSTATUS register\n", | 543 | "%s: Cannot write to SLOTSTATUS register\n", |
@@ -680,13 +547,13 @@ static int hpc_power_on_slot(struct slot * slot) | |||
680 | } | 547 | } |
681 | 548 | ||
682 | slot_cmd = POWER_ON; | 549 | slot_cmd = POWER_ON; |
683 | cmd_mask = PWR_CTRL; | 550 | cmd_mask = PCI_EXP_SLTCTL_PCC; |
684 | /* Enable detection that we turned off at slot power-off time */ | 551 | /* Enable detection that we turned off at slot power-off time */ |
685 | if (!pciehp_poll_mode) { | 552 | if (!pciehp_poll_mode) { |
686 | slot_cmd |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | | 553 | slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | |
687 | PRSN_DETECT_ENABLE); | 554 | PCI_EXP_SLTCTL_PDCE); |
688 | cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | | 555 | cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | |
689 | PRSN_DETECT_ENABLE); | 556 | PCI_EXP_SLTCTL_PDCE); |
690 | } | 557 | } |
691 | 558 | ||
692 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 559 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
@@ -696,7 +563,7 @@ static int hpc_power_on_slot(struct slot * slot) | |||
696 | return -1; | 563 | return -1; |
697 | } | 564 | } |
698 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 565 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
699 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 566 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); |
700 | 567 | ||
701 | return retval; | 568 | return retval; |
702 | } | 569 | } |
@@ -753,7 +620,7 @@ static int hpc_power_off_slot(struct slot * slot) | |||
753 | changed = pcie_mask_bad_dllp(ctrl); | 620 | changed = pcie_mask_bad_dllp(ctrl); |
754 | 621 | ||
755 | slot_cmd = POWER_OFF; | 622 | slot_cmd = POWER_OFF; |
756 | cmd_mask = PWR_CTRL; | 623 | cmd_mask = PCI_EXP_SLTCTL_PCC; |
757 | /* | 624 | /* |
758 | * If we get MRL or presence detect interrupts now, the isr | 625 | * If we get MRL or presence detect interrupts now, the isr |
759 | * will notice the sticky power-fault bit too and issue power | 626 | * will notice the sticky power-fault bit too and issue power |
@@ -762,10 +629,10 @@ static int hpc_power_off_slot(struct slot * slot) | |||
762 | * till the slot is powered on again. | 629 | * till the slot is powered on again. |
763 | */ | 630 | */ |
764 | if (!pciehp_poll_mode) { | 631 | if (!pciehp_poll_mode) { |
765 | slot_cmd &= ~(PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | | 632 | slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | |
766 | PRSN_DETECT_ENABLE); | 633 | PCI_EXP_SLTCTL_PDCE); |
767 | cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | | 634 | cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | |
768 | PRSN_DETECT_ENABLE); | 635 | PCI_EXP_SLTCTL_PDCE); |
769 | } | 636 | } |
770 | 637 | ||
771 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 638 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
@@ -775,7 +642,7 @@ static int hpc_power_off_slot(struct slot * slot) | |||
775 | goto out; | 642 | goto out; |
776 | } | 643 | } |
777 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 644 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", |
778 | __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 645 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); |
779 | out: | 646 | out: |
780 | if (changed) | 647 | if (changed) |
781 | pcie_unmask_bad_dllp(ctrl); | 648 | pcie_unmask_bad_dllp(ctrl); |
@@ -796,19 +663,19 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
796 | */ | 663 | */ |
797 | intr_loc = 0; | 664 | intr_loc = 0; |
798 | do { | 665 | do { |
799 | if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) { | 666 | if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) { |
800 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n", | 667 | ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n", |
801 | __func__); | 668 | __func__); |
802 | return IRQ_NONE; | 669 | return IRQ_NONE; |
803 | } | 670 | } |
804 | 671 | ||
805 | detected &= (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED | | 672 | detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | |
806 | MRL_SENS_CHANGED | PRSN_DETECT_CHANGED | | 673 | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | |
807 | CMD_COMPLETED); | 674 | PCI_EXP_SLTSTA_CC); |
808 | intr_loc |= detected; | 675 | intr_loc |= detected; |
809 | if (!intr_loc) | 676 | if (!intr_loc) |
810 | return IRQ_NONE; | 677 | return IRQ_NONE; |
811 | if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { | 678 | if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) { |
812 | ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", | 679 | ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", |
813 | __func__); | 680 | __func__); |
814 | return IRQ_NONE; | 681 | return IRQ_NONE; |
@@ -818,31 +685,31 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) | |||
818 | ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); | 685 | ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); |
819 | 686 | ||
820 | /* Check Command Complete Interrupt Pending */ | 687 | /* Check Command Complete Interrupt Pending */ |
821 | if (intr_loc & CMD_COMPLETED) { | 688 | if (intr_loc & PCI_EXP_SLTSTA_CC) { |
822 | ctrl->cmd_busy = 0; | 689 | ctrl->cmd_busy = 0; |
823 | smp_mb(); | 690 | smp_mb(); |
824 | wake_up(&ctrl->queue); | 691 | wake_up(&ctrl->queue); |
825 | } | 692 | } |
826 | 693 | ||
827 | if (!(intr_loc & ~CMD_COMPLETED)) | 694 | if (!(intr_loc & ~PCI_EXP_SLTSTA_CC)) |
828 | return IRQ_HANDLED; | 695 | return IRQ_HANDLED; |
829 | 696 | ||
830 | p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); | 697 | p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); |
831 | 698 | ||
832 | /* Check MRL Sensor Changed */ | 699 | /* Check MRL Sensor Changed */ |
833 | if (intr_loc & MRL_SENS_CHANGED) | 700 | if (intr_loc & PCI_EXP_SLTSTA_MRLSC) |
834 | pciehp_handle_switch_change(p_slot); | 701 | pciehp_handle_switch_change(p_slot); |
835 | 702 | ||
836 | /* Check Attention Button Pressed */ | 703 | /* Check Attention Button Pressed */ |
837 | if (intr_loc & ATTN_BUTTN_PRESSED) | 704 | if (intr_loc & PCI_EXP_SLTSTA_ABP) |
838 | pciehp_handle_attention_button(p_slot); | 705 | pciehp_handle_attention_button(p_slot); |
839 | 706 | ||
840 | /* Check Presence Detect Changed */ | 707 | /* Check Presence Detect Changed */ |
841 | if (intr_loc & PRSN_DETECT_CHANGED) | 708 | if (intr_loc & PCI_EXP_SLTSTA_PDC) |
842 | pciehp_handle_presence_change(p_slot); | 709 | pciehp_handle_presence_change(p_slot); |
843 | 710 | ||
844 | /* Check Power Fault Detected */ | 711 | /* Check Power Fault Detected */ |
845 | if (intr_loc & PWR_FAULT_DETECTED) | 712 | if (intr_loc & PCI_EXP_SLTSTA_PFD) |
846 | pciehp_handle_power_fault(p_slot); | 713 | pciehp_handle_power_fault(p_slot); |
847 | 714 | ||
848 | return IRQ_HANDLED; | 715 | return IRQ_HANDLED; |
@@ -855,7 +722,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
855 | u32 lnk_cap; | 722 | u32 lnk_cap; |
856 | int retval = 0; | 723 | int retval = 0; |
857 | 724 | ||
858 | retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); | 725 | retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); |
859 | if (retval) { | 726 | if (retval) { |
860 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); | 727 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); |
861 | return retval; | 728 | return retval; |
@@ -884,13 +751,13 @@ static int hpc_get_max_lnk_width(struct slot *slot, | |||
884 | u32 lnk_cap; | 751 | u32 lnk_cap; |
885 | int retval = 0; | 752 | int retval = 0; |
886 | 753 | ||
887 | retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); | 754 | retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); |
888 | if (retval) { | 755 | if (retval) { |
889 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); | 756 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); |
890 | return retval; | 757 | return retval; |
891 | } | 758 | } |
892 | 759 | ||
893 | switch ((lnk_cap & 0x03F0) >> 4){ | 760 | switch ((lnk_cap & PCI_EXP_LNKSTA_NLW) >> 4){ |
894 | case 0: | 761 | case 0: |
895 | lnk_wdth = PCIE_LNK_WIDTH_RESRV; | 762 | lnk_wdth = PCIE_LNK_WIDTH_RESRV; |
896 | break; | 763 | break; |
@@ -933,14 +800,14 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) | |||
933 | int retval = 0; | 800 | int retval = 0; |
934 | u16 lnk_status; | 801 | u16 lnk_status; |
935 | 802 | ||
936 | retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); | 803 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); |
937 | if (retval) { | 804 | if (retval) { |
938 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", | 805 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", |
939 | __func__); | 806 | __func__); |
940 | return retval; | 807 | return retval; |
941 | } | 808 | } |
942 | 809 | ||
943 | switch (lnk_status & 0x0F) { | 810 | switch (lnk_status & PCI_EXP_LNKSTA_CLS) { |
944 | case 1: | 811 | case 1: |
945 | lnk_speed = PCIE_2PT5GB; | 812 | lnk_speed = PCIE_2PT5GB; |
946 | break; | 813 | break; |
@@ -963,14 +830,14 @@ static int hpc_get_cur_lnk_width(struct slot *slot, | |||
963 | int retval = 0; | 830 | int retval = 0; |
964 | u16 lnk_status; | 831 | u16 lnk_status; |
965 | 832 | ||
966 | retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); | 833 | retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); |
967 | if (retval) { | 834 | if (retval) { |
968 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", | 835 | ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", |
969 | __func__); | 836 | __func__); |
970 | return retval; | 837 | return retval; |
971 | } | 838 | } |
972 | 839 | ||
973 | switch ((lnk_status & 0x03F0) >> 4){ | 840 | switch ((lnk_status & PCI_EXP_LNKSTA_NLW) >> 4){ |
974 | case 0: | 841 | case 0: |
975 | lnk_wdth = PCIE_LNK_WIDTH_RESRV; | 842 | lnk_wdth = PCIE_LNK_WIDTH_RESRV; |
976 | break; | 843 | break; |
@@ -1036,18 +903,19 @@ int pcie_enable_notification(struct controller *ctrl) | |||
1036 | { | 903 | { |
1037 | u16 cmd, mask; | 904 | u16 cmd, mask; |
1038 | 905 | ||
1039 | cmd = PRSN_DETECT_ENABLE; | 906 | cmd = PCI_EXP_SLTCTL_PDCE; |
1040 | if (ATTN_BUTTN(ctrl)) | 907 | if (ATTN_BUTTN(ctrl)) |
1041 | cmd |= ATTN_BUTTN_ENABLE; | 908 | cmd |= PCI_EXP_SLTCTL_ABPE; |
1042 | if (POWER_CTRL(ctrl)) | 909 | if (POWER_CTRL(ctrl)) |
1043 | cmd |= PWR_FAULT_DETECT_ENABLE; | 910 | cmd |= PCI_EXP_SLTCTL_PFDE; |
1044 | if (MRL_SENS(ctrl)) | 911 | if (MRL_SENS(ctrl)) |
1045 | cmd |= MRL_DETECT_ENABLE; | 912 | cmd |= PCI_EXP_SLTCTL_MRLSCE; |
1046 | if (!pciehp_poll_mode) | 913 | if (!pciehp_poll_mode) |
1047 | cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; | 914 | cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE; |
1048 | 915 | ||
1049 | mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | | 916 | mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | |
1050 | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; | 917 | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | |
918 | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); | ||
1051 | 919 | ||
1052 | if (pcie_write_cmd(ctrl, cmd, mask)) { | 920 | if (pcie_write_cmd(ctrl, cmd, mask)) { |
1053 | ctrl_err(ctrl, "Cannot enable software notification\n"); | 921 | ctrl_err(ctrl, "Cannot enable software notification\n"); |
@@ -1059,8 +927,9 @@ int pcie_enable_notification(struct controller *ctrl) | |||
1059 | static void pcie_disable_notification(struct controller *ctrl) | 927 | static void pcie_disable_notification(struct controller *ctrl) |
1060 | { | 928 | { |
1061 | u16 mask; | 929 | u16 mask; |
1062 | mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | | 930 | mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | |
1063 | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; | 931 | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | |
932 | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); | ||
1064 | if (pcie_write_cmd(ctrl, 0, mask)) | 933 | if (pcie_write_cmd(ctrl, 0, mask)) |
1065 | ctrl_warn(ctrl, "Cannot disable software notification\n"); | 934 | ctrl_warn(ctrl, "Cannot disable software notification\n"); |
1066 | } | 935 | } |
@@ -1157,9 +1026,9 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
1157 | EMI(ctrl) ? "yes" : "no"); | 1026 | EMI(ctrl) ? "yes" : "no"); |
1158 | ctrl_info(ctrl, " Command Completed : %3s\n", | 1027 | ctrl_info(ctrl, " Command Completed : %3s\n", |
1159 | NO_CMD_CMPL(ctrl) ? "no" : "yes"); | 1028 | NO_CMD_CMPL(ctrl) ? "no" : "yes"); |
1160 | pciehp_readw(ctrl, SLOTSTATUS, ®16); | 1029 | pciehp_readw(ctrl, PCI_EXP_SLTSTA, ®16); |
1161 | ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); | 1030 | ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); |
1162 | pciehp_readw(ctrl, SLOTCTRL, ®16); | 1031 | pciehp_readw(ctrl, PCI_EXP_SLTCTL, ®16); |
1163 | ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); | 1032 | ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); |
1164 | } | 1033 | } |
1165 | 1034 | ||
@@ -1183,7 +1052,7 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
1183 | ctrl_err(ctrl, "Cannot find PCI Express capability\n"); | 1052 | ctrl_err(ctrl, "Cannot find PCI Express capability\n"); |
1184 | goto abort_ctrl; | 1053 | goto abort_ctrl; |
1185 | } | 1054 | } |
1186 | if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { | 1055 | if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { |
1187 | ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); | 1056 | ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); |
1188 | goto abort_ctrl; | 1057 | goto abort_ctrl; |
1189 | } | 1058 | } |
@@ -1208,17 +1077,17 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
1208 | ctrl->no_cmd_complete = 1; | 1077 | ctrl->no_cmd_complete = 1; |
1209 | 1078 | ||
1210 | /* Check if Data Link Layer Link Active Reporting is implemented */ | 1079 | /* Check if Data Link Layer Link Active Reporting is implemented */ |
1211 | if (pciehp_readl(ctrl, LNKCAP, &link_cap)) { | 1080 | if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) { |
1212 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); | 1081 | ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); |
1213 | goto abort_ctrl; | 1082 | goto abort_ctrl; |
1214 | } | 1083 | } |
1215 | if (link_cap & LINK_ACTIVE_REPORTING) { | 1084 | if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { |
1216 | ctrl_dbg(ctrl, "Link Active Reporting supported\n"); | 1085 | ctrl_dbg(ctrl, "Link Active Reporting supported\n"); |
1217 | ctrl->link_active_reporting = 1; | 1086 | ctrl->link_active_reporting = 1; |
1218 | } | 1087 | } |
1219 | 1088 | ||
1220 | /* Clear all remaining event bits in Slot Status register */ | 1089 | /* Clear all remaining event bits in Slot Status register */ |
1221 | if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) | 1090 | if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) |
1222 | goto abort_ctrl; | 1091 | goto abort_ctrl; |
1223 | 1092 | ||
1224 | /* Disable sotfware notification */ | 1093 | /* Disable sotfware notification */ |
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 9c2a22fed18b..4e3e0382c16e 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c | |||
@@ -14,6 +14,9 @@ | |||
14 | * as published by the Free Software Foundation; either version | 14 | * as published by the Free Software Foundation; either version |
15 | * 2 of the License, or (at your option) any later version. | 15 | * 2 of the License, or (at your option) any later version. |
16 | */ | 16 | */ |
17 | |||
18 | #undef DEBUG | ||
19 | |||
17 | #include <linux/init.h> | 20 | #include <linux/init.h> |
18 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
19 | #include <linux/string.h> | 22 | #include <linux/string.h> |
@@ -151,20 +154,20 @@ static void dlpar_pci_add_bus(struct device_node *dn) | |||
151 | return; | 154 | return; |
152 | } | 155 | } |
153 | 156 | ||
157 | /* Scan below the new bridge */ | ||
154 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || | 158 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || |
155 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) | 159 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) |
156 | of_scan_pci_bridge(dn, dev); | 160 | of_scan_pci_bridge(dn, dev); |
157 | 161 | ||
158 | pcibios_fixup_new_pci_devices(dev->subordinate); | ||
159 | |||
160 | /* Claim new bus resources */ | ||
161 | pcibios_claim_one_bus(dev->bus); | ||
162 | |||
163 | /* Map IO space for child bus, which may or may not succeed */ | 162 | /* Map IO space for child bus, which may or may not succeed */ |
164 | pcibios_map_io_space(dev->subordinate); | 163 | pcibios_map_io_space(dev->subordinate); |
165 | 164 | ||
166 | /* Add new devices to global lists. Register in proc, sysfs. */ | 165 | /* Finish adding it : resource allocation, adding devices, etc... |
167 | pci_bus_add_devices(phb->bus); | 166 | * Note that we need to perform the finish pass on the -parent- |
167 | * bus of the EADS bridge so the bridge device itself gets | ||
168 | * properly added | ||
169 | */ | ||
170 | pcibios_finish_adding_to_bus(phb->bus); | ||
168 | } | 171 | } |
169 | 172 | ||
170 | static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn) | 173 | static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn) |
@@ -203,27 +206,6 @@ static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn) | |||
203 | return 0; | 206 | return 0; |
204 | } | 207 | } |
205 | 208 | ||
206 | static int dlpar_remove_root_bus(struct pci_controller *phb) | ||
207 | { | ||
208 | struct pci_bus *phb_bus; | ||
209 | int rc; | ||
210 | |||
211 | phb_bus = phb->bus; | ||
212 | if (!(list_empty(&phb_bus->children) && | ||
213 | list_empty(&phb_bus->devices))) { | ||
214 | return -EBUSY; | ||
215 | } | ||
216 | |||
217 | rc = pcibios_remove_root_bus(phb); | ||
218 | if (rc) | ||
219 | return -EIO; | ||
220 | |||
221 | device_unregister(phb_bus->bridge); | ||
222 | pci_remove_bus(phb_bus); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int dlpar_remove_phb(char *drc_name, struct device_node *dn) | 209 | static int dlpar_remove_phb(char *drc_name, struct device_node *dn) |
228 | { | 210 | { |
229 | struct slot *slot; | 211 | struct slot *slot; |
@@ -235,18 +217,15 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn) | |||
235 | 217 | ||
236 | /* If pci slot is hotplugable, use hotplug to remove it */ | 218 | /* If pci slot is hotplugable, use hotplug to remove it */ |
237 | slot = find_php_slot(dn); | 219 | slot = find_php_slot(dn); |
238 | if (slot) { | 220 | if (slot && rpaphp_deregister_slot(slot)) { |
239 | if (rpaphp_deregister_slot(slot)) { | 221 | printk(KERN_ERR "%s: unable to remove hotplug slot %s\n", |
240 | printk(KERN_ERR | 222 | __func__, drc_name); |
241 | "%s: unable to remove hotplug slot %s\n", | 223 | return -EIO; |
242 | __func__, drc_name); | ||
243 | return -EIO; | ||
244 | } | ||
245 | } | 224 | } |
246 | 225 | ||
247 | pdn = dn->data; | 226 | pdn = dn->data; |
248 | BUG_ON(!pdn || !pdn->phb); | 227 | BUG_ON(!pdn || !pdn->phb); |
249 | rc = dlpar_remove_root_bus(pdn->phb); | 228 | rc = remove_phb_dynamic(pdn->phb); |
250 | if (rc < 0) | 229 | if (rc < 0) |
251 | return rc; | 230 | return rc; |
252 | 231 | ||
@@ -378,26 +357,38 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) | |||
378 | if (!bus) | 357 | if (!bus) |
379 | return -EINVAL; | 358 | return -EINVAL; |
380 | 359 | ||
381 | /* If pci slot is hotplugable, use hotplug to remove it */ | 360 | pr_debug("PCI: Removing PCI slot below EADS bridge %s\n", |
361 | bus->self ? pci_name(bus->self) : "<!PHB!>"); | ||
362 | |||
382 | slot = find_php_slot(dn); | 363 | slot = find_php_slot(dn); |
383 | if (slot) { | 364 | if (slot) { |
365 | pr_debug("PCI: Removing hotplug slot for %04x:%02x...\n", | ||
366 | pci_domain_nr(bus), bus->number); | ||
367 | |||
384 | if (rpaphp_deregister_slot(slot)) { | 368 | if (rpaphp_deregister_slot(slot)) { |
385 | printk(KERN_ERR | 369 | printk(KERN_ERR |
386 | "%s: unable to remove hotplug slot %s\n", | 370 | "%s: unable to remove hotplug slot %s\n", |
387 | __func__, drc_name); | 371 | __func__, drc_name); |
388 | return -EIO; | 372 | return -EIO; |
389 | } | 373 | } |
390 | } else | 374 | } |
391 | pcibios_remove_pci_devices(bus); | 375 | |
376 | /* Remove all devices below slot */ | ||
377 | pcibios_remove_pci_devices(bus); | ||
392 | 378 | ||
379 | /* Unmap PCI IO space */ | ||
393 | if (pcibios_unmap_io_space(bus)) { | 380 | if (pcibios_unmap_io_space(bus)) { |
394 | printk(KERN_ERR "%s: failed to unmap bus range\n", | 381 | printk(KERN_ERR "%s: failed to unmap bus range\n", |
395 | __func__); | 382 | __func__); |
396 | return -ERANGE; | 383 | return -ERANGE; |
397 | } | 384 | } |
398 | 385 | ||
386 | /* Remove the EADS bridge device itself */ | ||
399 | BUG_ON(!bus->self); | 387 | BUG_ON(!bus->self); |
388 | pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); | ||
389 | eeh_remove_bus_device(bus->self); | ||
400 | pci_remove_bus_device(bus->self); | 390 | pci_remove_bus_device(bus->self); |
391 | |||
401 | return 0; | 392 | return 0; |
402 | } | 393 | } |
403 | 394 | ||
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 5c8baa43ac9c..235fb7a5a8a5 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/sysdev.h> | ||
31 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
32 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
33 | #include <linux/dmar.h> | 32 | #include <linux/dmar.h> |
@@ -35,6 +34,7 @@ | |||
35 | #include <linux/mempool.h> | 34 | #include <linux/mempool.h> |
36 | #include <linux/timer.h> | 35 | #include <linux/timer.h> |
37 | #include <linux/iova.h> | 36 | #include <linux/iova.h> |
37 | #include <linux/iommu.h> | ||
38 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/iommu.h> | 40 | #include <asm/iommu.h> |
@@ -54,6 +54,195 @@ | |||
54 | 54 | ||
55 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) | 55 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) |
56 | 56 | ||
57 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) | ||
58 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | ||
59 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | ||
60 | |||
61 | /* global iommu list, set NULL for ignored DMAR units */ | ||
62 | static struct intel_iommu **g_iommus; | ||
63 | |||
64 | /* | ||
65 | * 0: Present | ||
66 | * 1-11: Reserved | ||
67 | * 12-63: Context Ptr (12 - (haw-1)) | ||
68 | * 64-127: Reserved | ||
69 | */ | ||
70 | struct root_entry { | ||
71 | u64 val; | ||
72 | u64 rsvd1; | ||
73 | }; | ||
74 | #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) | ||
75 | static inline bool root_present(struct root_entry *root) | ||
76 | { | ||
77 | return (root->val & 1); | ||
78 | } | ||
79 | static inline void set_root_present(struct root_entry *root) | ||
80 | { | ||
81 | root->val |= 1; | ||
82 | } | ||
83 | static inline void set_root_value(struct root_entry *root, unsigned long value) | ||
84 | { | ||
85 | root->val |= value & VTD_PAGE_MASK; | ||
86 | } | ||
87 | |||
88 | static inline struct context_entry * | ||
89 | get_context_addr_from_root(struct root_entry *root) | ||
90 | { | ||
91 | return (struct context_entry *) | ||
92 | (root_present(root)?phys_to_virt( | ||
93 | root->val & VTD_PAGE_MASK) : | ||
94 | NULL); | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * low 64 bits: | ||
99 | * 0: present | ||
100 | * 1: fault processing disable | ||
101 | * 2-3: translation type | ||
102 | * 12-63: address space root | ||
103 | * high 64 bits: | ||
104 | * 0-2: address width | ||
105 | * 3-6: aval | ||
106 | * 8-23: domain id | ||
107 | */ | ||
108 | struct context_entry { | ||
109 | u64 lo; | ||
110 | u64 hi; | ||
111 | }; | ||
112 | |||
113 | static inline bool context_present(struct context_entry *context) | ||
114 | { | ||
115 | return (context->lo & 1); | ||
116 | } | ||
117 | static inline void context_set_present(struct context_entry *context) | ||
118 | { | ||
119 | context->lo |= 1; | ||
120 | } | ||
121 | |||
122 | static inline void context_set_fault_enable(struct context_entry *context) | ||
123 | { | ||
124 | context->lo &= (((u64)-1) << 2) | 1; | ||
125 | } | ||
126 | |||
127 | #define CONTEXT_TT_MULTI_LEVEL 0 | ||
128 | |||
129 | static inline void context_set_translation_type(struct context_entry *context, | ||
130 | unsigned long value) | ||
131 | { | ||
132 | context->lo &= (((u64)-1) << 4) | 3; | ||
133 | context->lo |= (value & 3) << 2; | ||
134 | } | ||
135 | |||
136 | static inline void context_set_address_root(struct context_entry *context, | ||
137 | unsigned long value) | ||
138 | { | ||
139 | context->lo |= value & VTD_PAGE_MASK; | ||
140 | } | ||
141 | |||
142 | static inline void context_set_address_width(struct context_entry *context, | ||
143 | unsigned long value) | ||
144 | { | ||
145 | context->hi |= value & 7; | ||
146 | } | ||
147 | |||
148 | static inline void context_set_domain_id(struct context_entry *context, | ||
149 | unsigned long value) | ||
150 | { | ||
151 | context->hi |= (value & ((1 << 16) - 1)) << 8; | ||
152 | } | ||
153 | |||
154 | static inline void context_clear_entry(struct context_entry *context) | ||
155 | { | ||
156 | context->lo = 0; | ||
157 | context->hi = 0; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * 0: readable | ||
162 | * 1: writable | ||
163 | * 2-6: reserved | ||
164 | * 7: super page | ||
165 | * 8-11: available | ||
166 | * 12-63: Host physcial address | ||
167 | */ | ||
168 | struct dma_pte { | ||
169 | u64 val; | ||
170 | }; | ||
171 | |||
172 | static inline void dma_clear_pte(struct dma_pte *pte) | ||
173 | { | ||
174 | pte->val = 0; | ||
175 | } | ||
176 | |||
177 | static inline void dma_set_pte_readable(struct dma_pte *pte) | ||
178 | { | ||
179 | pte->val |= DMA_PTE_READ; | ||
180 | } | ||
181 | |||
182 | static inline void dma_set_pte_writable(struct dma_pte *pte) | ||
183 | { | ||
184 | pte->val |= DMA_PTE_WRITE; | ||
185 | } | ||
186 | |||
187 | static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) | ||
188 | { | ||
189 | pte->val = (pte->val & ~3) | (prot & 3); | ||
190 | } | ||
191 | |||
192 | static inline u64 dma_pte_addr(struct dma_pte *pte) | ||
193 | { | ||
194 | return (pte->val & VTD_PAGE_MASK); | ||
195 | } | ||
196 | |||
197 | static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) | ||
198 | { | ||
199 | pte->val |= (addr & VTD_PAGE_MASK); | ||
200 | } | ||
201 | |||
202 | static inline bool dma_pte_present(struct dma_pte *pte) | ||
203 | { | ||
204 | return (pte->val & 3) != 0; | ||
205 | } | ||
206 | |||
207 | /* devices under the same p2p bridge are owned in one domain */ | ||
208 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) | ||
209 | |||
210 | /* domain represents a virtual machine, more than one devices | ||
211 | * across iommus may be owned in one domain, e.g. kvm guest. | ||
212 | */ | ||
213 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) | ||
214 | |||
215 | struct dmar_domain { | ||
216 | int id; /* domain id */ | ||
217 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ | ||
218 | |||
219 | struct list_head devices; /* all devices' list */ | ||
220 | struct iova_domain iovad; /* iova's that belong to this domain */ | ||
221 | |||
222 | struct dma_pte *pgd; /* virtual address */ | ||
223 | spinlock_t mapping_lock; /* page table lock */ | ||
224 | int gaw; /* max guest address width */ | ||
225 | |||
226 | /* adjusted guest address width, 0 is level 2 30-bit */ | ||
227 | int agaw; | ||
228 | |||
229 | int flags; /* flags to find out type of domain */ | ||
230 | |||
231 | int iommu_coherency;/* indicate coherency of iommu access */ | ||
232 | int iommu_count; /* reference count of iommu */ | ||
233 | spinlock_t iommu_lock; /* protect iommu set in domain */ | ||
234 | u64 max_addr; /* maximum mapped address */ | ||
235 | }; | ||
236 | |||
237 | /* PCI domain-device relationship */ | ||
238 | struct device_domain_info { | ||
239 | struct list_head link; /* link to domain siblings */ | ||
240 | struct list_head global; /* link to global list */ | ||
241 | u8 bus; /* PCI bus numer */ | ||
242 | u8 devfn; /* PCI devfn number */ | ||
243 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ | ||
244 | struct dmar_domain *domain; /* pointer to domain */ | ||
245 | }; | ||
57 | 246 | ||
58 | static void flush_unmaps_timeout(unsigned long data); | 247 | static void flush_unmaps_timeout(unsigned long data); |
59 | 248 | ||
@@ -88,6 +277,8 @@ static int intel_iommu_strict; | |||
88 | static DEFINE_SPINLOCK(device_domain_lock); | 277 | static DEFINE_SPINLOCK(device_domain_lock); |
89 | static LIST_HEAD(device_domain_list); | 278 | static LIST_HEAD(device_domain_list); |
90 | 279 | ||
280 | static struct iommu_ops intel_iommu_ops; | ||
281 | |||
91 | static int __init intel_iommu_setup(char *str) | 282 | static int __init intel_iommu_setup(char *str) |
92 | { | 283 | { |
93 | if (!str) | 284 | if (!str) |
@@ -184,6 +375,87 @@ void free_iova_mem(struct iova *iova) | |||
184 | kmem_cache_free(iommu_iova_cache, iova); | 375 | kmem_cache_free(iommu_iova_cache, iova); |
185 | } | 376 | } |
186 | 377 | ||
378 | |||
379 | static inline int width_to_agaw(int width); | ||
380 | |||
381 | /* calculate agaw for each iommu. | ||
382 | * "SAGAW" may be different across iommus, use a default agaw, and | ||
383 | * get a supported less agaw for iommus that don't support the default agaw. | ||
384 | */ | ||
385 | int iommu_calculate_agaw(struct intel_iommu *iommu) | ||
386 | { | ||
387 | unsigned long sagaw; | ||
388 | int agaw = -1; | ||
389 | |||
390 | sagaw = cap_sagaw(iommu->cap); | ||
391 | for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH); | ||
392 | agaw >= 0; agaw--) { | ||
393 | if (test_bit(agaw, &sagaw)) | ||
394 | break; | ||
395 | } | ||
396 | |||
397 | return agaw; | ||
398 | } | ||
399 | |||
400 | /* in native case, each domain is related to only one iommu */ | ||
401 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) | ||
402 | { | ||
403 | int iommu_id; | ||
404 | |||
405 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); | ||
406 | |||
407 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | ||
408 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) | ||
409 | return NULL; | ||
410 | |||
411 | return g_iommus[iommu_id]; | ||
412 | } | ||
413 | |||
414 | /* "Coherency" capability may be different across iommus */ | ||
415 | static void domain_update_iommu_coherency(struct dmar_domain *domain) | ||
416 | { | ||
417 | int i; | ||
418 | |||
419 | domain->iommu_coherency = 1; | ||
420 | |||
421 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | ||
422 | for (; i < g_num_of_iommus; ) { | ||
423 | if (!ecap_coherent(g_iommus[i]->ecap)) { | ||
424 | domain->iommu_coherency = 0; | ||
425 | break; | ||
426 | } | ||
427 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); | ||
428 | } | ||
429 | } | ||
430 | |||
431 | static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) | ||
432 | { | ||
433 | struct dmar_drhd_unit *drhd = NULL; | ||
434 | int i; | ||
435 | |||
436 | for_each_drhd_unit(drhd) { | ||
437 | if (drhd->ignored) | ||
438 | continue; | ||
439 | |||
440 | for (i = 0; i < drhd->devices_cnt; i++) | ||
441 | if (drhd->devices[i]->bus->number == bus && | ||
442 | drhd->devices[i]->devfn == devfn) | ||
443 | return drhd->iommu; | ||
444 | |||
445 | if (drhd->include_all) | ||
446 | return drhd->iommu; | ||
447 | } | ||
448 | |||
449 | return NULL; | ||
450 | } | ||
451 | |||
452 | static void domain_flush_cache(struct dmar_domain *domain, | ||
453 | void *addr, int size) | ||
454 | { | ||
455 | if (!domain->iommu_coherency) | ||
456 | clflush_cache_range(addr, size); | ||
457 | } | ||
458 | |||
187 | /* Gets context entry for a given bus and devfn */ | 459 | /* Gets context entry for a given bus and devfn */ |
188 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | 460 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, |
189 | u8 bus, u8 devfn) | 461 | u8 bus, u8 devfn) |
@@ -226,7 +498,7 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) | |||
226 | ret = 0; | 498 | ret = 0; |
227 | goto out; | 499 | goto out; |
228 | } | 500 | } |
229 | ret = context_present(context[devfn]); | 501 | ret = context_present(&context[devfn]); |
230 | out: | 502 | out: |
231 | spin_unlock_irqrestore(&iommu->lock, flags); | 503 | spin_unlock_irqrestore(&iommu->lock, flags); |
232 | return ret; | 504 | return ret; |
@@ -242,7 +514,7 @@ static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) | |||
242 | root = &iommu->root_entry[bus]; | 514 | root = &iommu->root_entry[bus]; |
243 | context = get_context_addr_from_root(root); | 515 | context = get_context_addr_from_root(root); |
244 | if (context) { | 516 | if (context) { |
245 | context_clear_entry(context[devfn]); | 517 | context_clear_entry(&context[devfn]); |
246 | __iommu_flush_cache(iommu, &context[devfn], \ | 518 | __iommu_flush_cache(iommu, &context[devfn], \ |
247 | sizeof(*context)); | 519 | sizeof(*context)); |
248 | } | 520 | } |
@@ -339,7 +611,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
339 | if (level == 1) | 611 | if (level == 1) |
340 | break; | 612 | break; |
341 | 613 | ||
342 | if (!dma_pte_present(*pte)) { | 614 | if (!dma_pte_present(pte)) { |
343 | tmp_page = alloc_pgtable_page(); | 615 | tmp_page = alloc_pgtable_page(); |
344 | 616 | ||
345 | if (!tmp_page) { | 617 | if (!tmp_page) { |
@@ -347,18 +619,17 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
347 | flags); | 619 | flags); |
348 | return NULL; | 620 | return NULL; |
349 | } | 621 | } |
350 | __iommu_flush_cache(domain->iommu, tmp_page, | 622 | domain_flush_cache(domain, tmp_page, PAGE_SIZE); |
351 | PAGE_SIZE); | 623 | dma_set_pte_addr(pte, virt_to_phys(tmp_page)); |
352 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); | ||
353 | /* | 624 | /* |
354 | * high level table always sets r/w, last level page | 625 | * high level table always sets r/w, last level page |
355 | * table control read/write | 626 | * table control read/write |
356 | */ | 627 | */ |
357 | dma_set_pte_readable(*pte); | 628 | dma_set_pte_readable(pte); |
358 | dma_set_pte_writable(*pte); | 629 | dma_set_pte_writable(pte); |
359 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 630 | domain_flush_cache(domain, pte, sizeof(*pte)); |
360 | } | 631 | } |
361 | parent = phys_to_virt(dma_pte_addr(*pte)); | 632 | parent = phys_to_virt(dma_pte_addr(pte)); |
362 | level--; | 633 | level--; |
363 | } | 634 | } |
364 | 635 | ||
@@ -381,9 +652,9 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, | |||
381 | if (level == total) | 652 | if (level == total) |
382 | return pte; | 653 | return pte; |
383 | 654 | ||
384 | if (!dma_pte_present(*pte)) | 655 | if (!dma_pte_present(pte)) |
385 | break; | 656 | break; |
386 | parent = phys_to_virt(dma_pte_addr(*pte)); | 657 | parent = phys_to_virt(dma_pte_addr(pte)); |
387 | total--; | 658 | total--; |
388 | } | 659 | } |
389 | return NULL; | 660 | return NULL; |
@@ -398,8 +669,8 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) | |||
398 | pte = dma_addr_level_pte(domain, addr, 1); | 669 | pte = dma_addr_level_pte(domain, addr, 1); |
399 | 670 | ||
400 | if (pte) { | 671 | if (pte) { |
401 | dma_clear_pte(*pte); | 672 | dma_clear_pte(pte); |
402 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 673 | domain_flush_cache(domain, pte, sizeof(*pte)); |
403 | } | 674 | } |
404 | } | 675 | } |
405 | 676 | ||
@@ -445,10 +716,9 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
445 | pte = dma_addr_level_pte(domain, tmp, level); | 716 | pte = dma_addr_level_pte(domain, tmp, level); |
446 | if (pte) { | 717 | if (pte) { |
447 | free_pgtable_page( | 718 | free_pgtable_page( |
448 | phys_to_virt(dma_pte_addr(*pte))); | 719 | phys_to_virt(dma_pte_addr(pte))); |
449 | dma_clear_pte(*pte); | 720 | dma_clear_pte(pte); |
450 | __iommu_flush_cache(domain->iommu, | 721 | domain_flush_cache(domain, pte, sizeof(*pte)); |
451 | pte, sizeof(*pte)); | ||
452 | } | 722 | } |
453 | tmp += level_size(level); | 723 | tmp += level_size(level); |
454 | } | 724 | } |
@@ -950,17 +1220,28 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
950 | 1220 | ||
951 | 1221 | ||
952 | static void domain_exit(struct dmar_domain *domain); | 1222 | static void domain_exit(struct dmar_domain *domain); |
1223 | static void vm_domain_exit(struct dmar_domain *domain); | ||
953 | 1224 | ||
954 | void free_dmar_iommu(struct intel_iommu *iommu) | 1225 | void free_dmar_iommu(struct intel_iommu *iommu) |
955 | { | 1226 | { |
956 | struct dmar_domain *domain; | 1227 | struct dmar_domain *domain; |
957 | int i; | 1228 | int i; |
1229 | unsigned long flags; | ||
958 | 1230 | ||
959 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); | 1231 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); |
960 | for (; i < cap_ndoms(iommu->cap); ) { | 1232 | for (; i < cap_ndoms(iommu->cap); ) { |
961 | domain = iommu->domains[i]; | 1233 | domain = iommu->domains[i]; |
962 | clear_bit(i, iommu->domain_ids); | 1234 | clear_bit(i, iommu->domain_ids); |
963 | domain_exit(domain); | 1235 | |
1236 | spin_lock_irqsave(&domain->iommu_lock, flags); | ||
1237 | if (--domain->iommu_count == 0) { | ||
1238 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | ||
1239 | vm_domain_exit(domain); | ||
1240 | else | ||
1241 | domain_exit(domain); | ||
1242 | } | ||
1243 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
1244 | |||
964 | i = find_next_bit(iommu->domain_ids, | 1245 | i = find_next_bit(iommu->domain_ids, |
965 | cap_ndoms(iommu->cap), i+1); | 1246 | cap_ndoms(iommu->cap), i+1); |
966 | } | 1247 | } |
@@ -978,6 +1259,17 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
978 | kfree(iommu->domains); | 1259 | kfree(iommu->domains); |
979 | kfree(iommu->domain_ids); | 1260 | kfree(iommu->domain_ids); |
980 | 1261 | ||
1262 | g_iommus[iommu->seq_id] = NULL; | ||
1263 | |||
1264 | /* if all iommus are freed, free g_iommus */ | ||
1265 | for (i = 0; i < g_num_of_iommus; i++) { | ||
1266 | if (g_iommus[i]) | ||
1267 | break; | ||
1268 | } | ||
1269 | |||
1270 | if (i == g_num_of_iommus) | ||
1271 | kfree(g_iommus); | ||
1272 | |||
981 | /* free context mapping */ | 1273 | /* free context mapping */ |
982 | free_context_table(iommu); | 1274 | free_context_table(iommu); |
983 | } | 1275 | } |
@@ -1006,7 +1298,9 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | |||
1006 | 1298 | ||
1007 | set_bit(num, iommu->domain_ids); | 1299 | set_bit(num, iommu->domain_ids); |
1008 | domain->id = num; | 1300 | domain->id = num; |
1009 | domain->iommu = iommu; | 1301 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); |
1302 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1303 | domain->flags = 0; | ||
1010 | iommu->domains[num] = domain; | 1304 | iommu->domains[num] = domain; |
1011 | spin_unlock_irqrestore(&iommu->lock, flags); | 1305 | spin_unlock_irqrestore(&iommu->lock, flags); |
1012 | 1306 | ||
@@ -1016,10 +1310,13 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | |||
1016 | static void iommu_free_domain(struct dmar_domain *domain) | 1310 | static void iommu_free_domain(struct dmar_domain *domain) |
1017 | { | 1311 | { |
1018 | unsigned long flags; | 1312 | unsigned long flags; |
1313 | struct intel_iommu *iommu; | ||
1314 | |||
1315 | iommu = domain_get_iommu(domain); | ||
1019 | 1316 | ||
1020 | spin_lock_irqsave(&domain->iommu->lock, flags); | 1317 | spin_lock_irqsave(&iommu->lock, flags); |
1021 | clear_bit(domain->id, domain->iommu->domain_ids); | 1318 | clear_bit(domain->id, iommu->domain_ids); |
1022 | spin_unlock_irqrestore(&domain->iommu->lock, flags); | 1319 | spin_unlock_irqrestore(&iommu->lock, flags); |
1023 | } | 1320 | } |
1024 | 1321 | ||
1025 | static struct iova_domain reserved_iova_list; | 1322 | static struct iova_domain reserved_iova_list; |
@@ -1094,11 +1391,12 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1094 | 1391 | ||
1095 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 1392 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
1096 | spin_lock_init(&domain->mapping_lock); | 1393 | spin_lock_init(&domain->mapping_lock); |
1394 | spin_lock_init(&domain->iommu_lock); | ||
1097 | 1395 | ||
1098 | domain_reserve_special_ranges(domain); | 1396 | domain_reserve_special_ranges(domain); |
1099 | 1397 | ||
1100 | /* calculate AGAW */ | 1398 | /* calculate AGAW */ |
1101 | iommu = domain->iommu; | 1399 | iommu = domain_get_iommu(domain); |
1102 | if (guest_width > cap_mgaw(iommu->cap)) | 1400 | if (guest_width > cap_mgaw(iommu->cap)) |
1103 | guest_width = cap_mgaw(iommu->cap); | 1401 | guest_width = cap_mgaw(iommu->cap); |
1104 | domain->gaw = guest_width; | 1402 | domain->gaw = guest_width; |
@@ -1115,6 +1413,13 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1115 | domain->agaw = agaw; | 1413 | domain->agaw = agaw; |
1116 | INIT_LIST_HEAD(&domain->devices); | 1414 | INIT_LIST_HEAD(&domain->devices); |
1117 | 1415 | ||
1416 | if (ecap_coherent(iommu->ecap)) | ||
1417 | domain->iommu_coherency = 1; | ||
1418 | else | ||
1419 | domain->iommu_coherency = 0; | ||
1420 | |||
1421 | domain->iommu_count = 1; | ||
1422 | |||
1118 | /* always allocate the top pgd */ | 1423 | /* always allocate the top pgd */ |
1119 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | 1424 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
1120 | if (!domain->pgd) | 1425 | if (!domain->pgd) |
@@ -1151,28 +1456,82 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1151 | u8 bus, u8 devfn) | 1456 | u8 bus, u8 devfn) |
1152 | { | 1457 | { |
1153 | struct context_entry *context; | 1458 | struct context_entry *context; |
1154 | struct intel_iommu *iommu = domain->iommu; | ||
1155 | unsigned long flags; | 1459 | unsigned long flags; |
1460 | struct intel_iommu *iommu; | ||
1461 | struct dma_pte *pgd; | ||
1462 | unsigned long num; | ||
1463 | unsigned long ndomains; | ||
1464 | int id; | ||
1465 | int agaw; | ||
1156 | 1466 | ||
1157 | pr_debug("Set context mapping for %02x:%02x.%d\n", | 1467 | pr_debug("Set context mapping for %02x:%02x.%d\n", |
1158 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 1468 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
1159 | BUG_ON(!domain->pgd); | 1469 | BUG_ON(!domain->pgd); |
1470 | |||
1471 | iommu = device_to_iommu(bus, devfn); | ||
1472 | if (!iommu) | ||
1473 | return -ENODEV; | ||
1474 | |||
1160 | context = device_to_context_entry(iommu, bus, devfn); | 1475 | context = device_to_context_entry(iommu, bus, devfn); |
1161 | if (!context) | 1476 | if (!context) |
1162 | return -ENOMEM; | 1477 | return -ENOMEM; |
1163 | spin_lock_irqsave(&iommu->lock, flags); | 1478 | spin_lock_irqsave(&iommu->lock, flags); |
1164 | if (context_present(*context)) { | 1479 | if (context_present(context)) { |
1165 | spin_unlock_irqrestore(&iommu->lock, flags); | 1480 | spin_unlock_irqrestore(&iommu->lock, flags); |
1166 | return 0; | 1481 | return 0; |
1167 | } | 1482 | } |
1168 | 1483 | ||
1169 | context_set_domain_id(*context, domain->id); | 1484 | id = domain->id; |
1170 | context_set_address_width(*context, domain->agaw); | 1485 | pgd = domain->pgd; |
1171 | context_set_address_root(*context, virt_to_phys(domain->pgd)); | 1486 | |
1172 | context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL); | 1487 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { |
1173 | context_set_fault_enable(*context); | 1488 | int found = 0; |
1174 | context_set_present(*context); | 1489 | |
1175 | __iommu_flush_cache(iommu, context, sizeof(*context)); | 1490 | /* find an available domain id for this device in iommu */ |
1491 | ndomains = cap_ndoms(iommu->cap); | ||
1492 | num = find_first_bit(iommu->domain_ids, ndomains); | ||
1493 | for (; num < ndomains; ) { | ||
1494 | if (iommu->domains[num] == domain) { | ||
1495 | id = num; | ||
1496 | found = 1; | ||
1497 | break; | ||
1498 | } | ||
1499 | num = find_next_bit(iommu->domain_ids, | ||
1500 | cap_ndoms(iommu->cap), num+1); | ||
1501 | } | ||
1502 | |||
1503 | if (found == 0) { | ||
1504 | num = find_first_zero_bit(iommu->domain_ids, ndomains); | ||
1505 | if (num >= ndomains) { | ||
1506 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
1507 | printk(KERN_ERR "IOMMU: no free domain ids\n"); | ||
1508 | return -EFAULT; | ||
1509 | } | ||
1510 | |||
1511 | set_bit(num, iommu->domain_ids); | ||
1512 | iommu->domains[num] = domain; | ||
1513 | id = num; | ||
1514 | } | ||
1515 | |||
1516 | /* Skip top levels of page tables for | ||
1517 | * iommu which has less agaw than default. | ||
1518 | */ | ||
1519 | for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { | ||
1520 | pgd = phys_to_virt(dma_pte_addr(pgd)); | ||
1521 | if (!dma_pte_present(pgd)) { | ||
1522 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
1523 | return -ENOMEM; | ||
1524 | } | ||
1525 | } | ||
1526 | } | ||
1527 | |||
1528 | context_set_domain_id(context, id); | ||
1529 | context_set_address_width(context, iommu->agaw); | ||
1530 | context_set_address_root(context, virt_to_phys(pgd)); | ||
1531 | context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL); | ||
1532 | context_set_fault_enable(context); | ||
1533 | context_set_present(context); | ||
1534 | domain_flush_cache(domain, context, sizeof(*context)); | ||
1176 | 1535 | ||
1177 | /* it's a non-present to present mapping */ | 1536 | /* it's a non-present to present mapping */ |
1178 | if (iommu->flush.flush_context(iommu, domain->id, | 1537 | if (iommu->flush.flush_context(iommu, domain->id, |
@@ -1183,6 +1542,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1183 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); | 1542 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); |
1184 | 1543 | ||
1185 | spin_unlock_irqrestore(&iommu->lock, flags); | 1544 | spin_unlock_irqrestore(&iommu->lock, flags); |
1545 | |||
1546 | spin_lock_irqsave(&domain->iommu_lock, flags); | ||
1547 | if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { | ||
1548 | domain->iommu_count++; | ||
1549 | domain_update_iommu_coherency(domain); | ||
1550 | } | ||
1551 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
1186 | return 0; | 1552 | return 0; |
1187 | } | 1553 | } |
1188 | 1554 | ||
@@ -1218,13 +1584,17 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) | |||
1218 | tmp->bus->number, tmp->devfn); | 1584 | tmp->bus->number, tmp->devfn); |
1219 | } | 1585 | } |
1220 | 1586 | ||
1221 | static int domain_context_mapped(struct dmar_domain *domain, | 1587 | static int domain_context_mapped(struct pci_dev *pdev) |
1222 | struct pci_dev *pdev) | ||
1223 | { | 1588 | { |
1224 | int ret; | 1589 | int ret; |
1225 | struct pci_dev *tmp, *parent; | 1590 | struct pci_dev *tmp, *parent; |
1591 | struct intel_iommu *iommu; | ||
1592 | |||
1593 | iommu = device_to_iommu(pdev->bus->number, pdev->devfn); | ||
1594 | if (!iommu) | ||
1595 | return -ENODEV; | ||
1226 | 1596 | ||
1227 | ret = device_context_mapped(domain->iommu, | 1597 | ret = device_context_mapped(iommu, |
1228 | pdev->bus->number, pdev->devfn); | 1598 | pdev->bus->number, pdev->devfn); |
1229 | if (!ret) | 1599 | if (!ret) |
1230 | return ret; | 1600 | return ret; |
@@ -1235,17 +1605,17 @@ static int domain_context_mapped(struct dmar_domain *domain, | |||
1235 | /* Secondary interface's bus number and devfn 0 */ | 1605 | /* Secondary interface's bus number and devfn 0 */ |
1236 | parent = pdev->bus->self; | 1606 | parent = pdev->bus->self; |
1237 | while (parent != tmp) { | 1607 | while (parent != tmp) { |
1238 | ret = device_context_mapped(domain->iommu, parent->bus->number, | 1608 | ret = device_context_mapped(iommu, parent->bus->number, |
1239 | parent->devfn); | 1609 | parent->devfn); |
1240 | if (!ret) | 1610 | if (!ret) |
1241 | return ret; | 1611 | return ret; |
1242 | parent = parent->bus->self; | 1612 | parent = parent->bus->self; |
1243 | } | 1613 | } |
1244 | if (tmp->is_pcie) | 1614 | if (tmp->is_pcie) |
1245 | return device_context_mapped(domain->iommu, | 1615 | return device_context_mapped(iommu, |
1246 | tmp->subordinate->number, 0); | 1616 | tmp->subordinate->number, 0); |
1247 | else | 1617 | else |
1248 | return device_context_mapped(domain->iommu, | 1618 | return device_context_mapped(iommu, |
1249 | tmp->bus->number, tmp->devfn); | 1619 | tmp->bus->number, tmp->devfn); |
1250 | } | 1620 | } |
1251 | 1621 | ||
@@ -1273,22 +1643,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1273 | /* We don't need lock here, nobody else | 1643 | /* We don't need lock here, nobody else |
1274 | * touches the iova range | 1644 | * touches the iova range |
1275 | */ | 1645 | */ |
1276 | BUG_ON(dma_pte_addr(*pte)); | 1646 | BUG_ON(dma_pte_addr(pte)); |
1277 | dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); | 1647 | dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); |
1278 | dma_set_pte_prot(*pte, prot); | 1648 | dma_set_pte_prot(pte, prot); |
1279 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 1649 | domain_flush_cache(domain, pte, sizeof(*pte)); |
1280 | start_pfn++; | 1650 | start_pfn++; |
1281 | index++; | 1651 | index++; |
1282 | } | 1652 | } |
1283 | return 0; | 1653 | return 0; |
1284 | } | 1654 | } |
1285 | 1655 | ||
1286 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | 1656 | static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) |
1287 | { | 1657 | { |
1288 | clear_context_table(domain->iommu, bus, devfn); | 1658 | if (!iommu) |
1289 | domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, | 1659 | return; |
1660 | |||
1661 | clear_context_table(iommu, bus, devfn); | ||
1662 | iommu->flush.flush_context(iommu, 0, 0, 0, | ||
1290 | DMA_CCMD_GLOBAL_INVL, 0); | 1663 | DMA_CCMD_GLOBAL_INVL, 0); |
1291 | domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, | 1664 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
1292 | DMA_TLB_GLOBAL_FLUSH, 0); | 1665 | DMA_TLB_GLOBAL_FLUSH, 0); |
1293 | } | 1666 | } |
1294 | 1667 | ||
@@ -1296,6 +1669,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
1296 | { | 1669 | { |
1297 | struct device_domain_info *info; | 1670 | struct device_domain_info *info; |
1298 | unsigned long flags; | 1671 | unsigned long flags; |
1672 | struct intel_iommu *iommu; | ||
1299 | 1673 | ||
1300 | spin_lock_irqsave(&device_domain_lock, flags); | 1674 | spin_lock_irqsave(&device_domain_lock, flags); |
1301 | while (!list_empty(&domain->devices)) { | 1675 | while (!list_empty(&domain->devices)) { |
@@ -1307,7 +1681,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
1307 | info->dev->dev.archdata.iommu = NULL; | 1681 | info->dev->dev.archdata.iommu = NULL; |
1308 | spin_unlock_irqrestore(&device_domain_lock, flags); | 1682 | spin_unlock_irqrestore(&device_domain_lock, flags); |
1309 | 1683 | ||
1310 | detach_domain_for_dev(info->domain, info->bus, info->devfn); | 1684 | iommu = device_to_iommu(info->bus, info->devfn); |
1685 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
1311 | free_devinfo_mem(info); | 1686 | free_devinfo_mem(info); |
1312 | 1687 | ||
1313 | spin_lock_irqsave(&device_domain_lock, flags); | 1688 | spin_lock_irqsave(&device_domain_lock, flags); |
@@ -1400,7 +1775,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1400 | info->dev = NULL; | 1775 | info->dev = NULL; |
1401 | info->domain = domain; | 1776 | info->domain = domain; |
1402 | /* This domain is shared by devices under p2p bridge */ | 1777 | /* This domain is shared by devices under p2p bridge */ |
1403 | domain->flags |= DOMAIN_FLAG_MULTIPLE_DEVICES; | 1778 | domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; |
1404 | 1779 | ||
1405 | /* pcie-to-pci bridge already has a domain, uses it */ | 1780 | /* pcie-to-pci bridge already has a domain, uses it */ |
1406 | found = NULL; | 1781 | found = NULL; |
@@ -1563,6 +1938,11 @@ static void __init iommu_prepare_gfx_mapping(void) | |||
1563 | printk(KERN_ERR "IOMMU: mapping reserved region failed\n"); | 1938 | printk(KERN_ERR "IOMMU: mapping reserved region failed\n"); |
1564 | } | 1939 | } |
1565 | } | 1940 | } |
1941 | #else /* !CONFIG_DMAR_GFX_WA */ | ||
1942 | static inline void iommu_prepare_gfx_mapping(void) | ||
1943 | { | ||
1944 | return; | ||
1945 | } | ||
1566 | #endif | 1946 | #endif |
1567 | 1947 | ||
1568 | #ifdef CONFIG_DMAR_FLOPPY_WA | 1948 | #ifdef CONFIG_DMAR_FLOPPY_WA |
@@ -1590,7 +1970,7 @@ static inline void iommu_prepare_isa(void) | |||
1590 | } | 1970 | } |
1591 | #endif /* !CONFIG_DMAR_FLPY_WA */ | 1971 | #endif /* !CONFIG_DMAR_FLPY_WA */ |
1592 | 1972 | ||
1593 | int __init init_dmars(void) | 1973 | static int __init init_dmars(void) |
1594 | { | 1974 | { |
1595 | struct dmar_drhd_unit *drhd; | 1975 | struct dmar_drhd_unit *drhd; |
1596 | struct dmar_rmrr_unit *rmrr; | 1976 | struct dmar_rmrr_unit *rmrr; |
@@ -1613,9 +1993,18 @@ int __init init_dmars(void) | |||
1613 | */ | 1993 | */ |
1614 | } | 1994 | } |
1615 | 1995 | ||
1996 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), | ||
1997 | GFP_KERNEL); | ||
1998 | if (!g_iommus) { | ||
1999 | printk(KERN_ERR "Allocating global iommu array failed\n"); | ||
2000 | ret = -ENOMEM; | ||
2001 | goto error; | ||
2002 | } | ||
2003 | |||
1616 | deferred_flush = kzalloc(g_num_of_iommus * | 2004 | deferred_flush = kzalloc(g_num_of_iommus * |
1617 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 2005 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
1618 | if (!deferred_flush) { | 2006 | if (!deferred_flush) { |
2007 | kfree(g_iommus); | ||
1619 | ret = -ENOMEM; | 2008 | ret = -ENOMEM; |
1620 | goto error; | 2009 | goto error; |
1621 | } | 2010 | } |
@@ -1625,6 +2014,7 @@ int __init init_dmars(void) | |||
1625 | continue; | 2014 | continue; |
1626 | 2015 | ||
1627 | iommu = drhd->iommu; | 2016 | iommu = drhd->iommu; |
2017 | g_iommus[iommu->seq_id] = iommu; | ||
1628 | 2018 | ||
1629 | ret = iommu_init_domains(iommu); | 2019 | ret = iommu_init_domains(iommu); |
1630 | if (ret) | 2020 | if (ret) |
@@ -1737,6 +2127,7 @@ error: | |||
1737 | iommu = drhd->iommu; | 2127 | iommu = drhd->iommu; |
1738 | free_iommu(iommu); | 2128 | free_iommu(iommu); |
1739 | } | 2129 | } |
2130 | kfree(g_iommus); | ||
1740 | return ret; | 2131 | return ret; |
1741 | } | 2132 | } |
1742 | 2133 | ||
@@ -1805,7 +2196,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
1805 | } | 2196 | } |
1806 | 2197 | ||
1807 | /* make sure context mapping is ok */ | 2198 | /* make sure context mapping is ok */ |
1808 | if (unlikely(!domain_context_mapped(domain, pdev))) { | 2199 | if (unlikely(!domain_context_mapped(pdev))) { |
1809 | ret = domain_context_mapping(domain, pdev); | 2200 | ret = domain_context_mapping(domain, pdev); |
1810 | if (ret) { | 2201 | if (ret) { |
1811 | printk(KERN_ERR | 2202 | printk(KERN_ERR |
@@ -1827,6 +2218,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
1827 | struct iova *iova; | 2218 | struct iova *iova; |
1828 | int prot = 0; | 2219 | int prot = 0; |
1829 | int ret; | 2220 | int ret; |
2221 | struct intel_iommu *iommu; | ||
1830 | 2222 | ||
1831 | BUG_ON(dir == DMA_NONE); | 2223 | BUG_ON(dir == DMA_NONE); |
1832 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2224 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
@@ -1836,6 +2228,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
1836 | if (!domain) | 2228 | if (!domain) |
1837 | return 0; | 2229 | return 0; |
1838 | 2230 | ||
2231 | iommu = domain_get_iommu(domain); | ||
1839 | size = aligned_size((u64)paddr, size); | 2232 | size = aligned_size((u64)paddr, size); |
1840 | 2233 | ||
1841 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2234 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
@@ -1849,7 +2242,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
1849 | * mappings.. | 2242 | * mappings.. |
1850 | */ | 2243 | */ |
1851 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ | 2244 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
1852 | !cap_zlr(domain->iommu->cap)) | 2245 | !cap_zlr(iommu->cap)) |
1853 | prot |= DMA_PTE_READ; | 2246 | prot |= DMA_PTE_READ; |
1854 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2247 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
1855 | prot |= DMA_PTE_WRITE; | 2248 | prot |= DMA_PTE_WRITE; |
@@ -1865,10 +2258,10 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
1865 | goto error; | 2258 | goto error; |
1866 | 2259 | ||
1867 | /* it's a non-present to present mapping */ | 2260 | /* it's a non-present to present mapping */ |
1868 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2261 | ret = iommu_flush_iotlb_psi(iommu, domain->id, |
1869 | start_paddr, size >> VTD_PAGE_SHIFT, 1); | 2262 | start_paddr, size >> VTD_PAGE_SHIFT, 1); |
1870 | if (ret) | 2263 | if (ret) |
1871 | iommu_flush_write_buffer(domain->iommu); | 2264 | iommu_flush_write_buffer(iommu); |
1872 | 2265 | ||
1873 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); | 2266 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); |
1874 | 2267 | ||
@@ -1895,10 +2288,11 @@ static void flush_unmaps(void) | |||
1895 | 2288 | ||
1896 | /* just flush them all */ | 2289 | /* just flush them all */ |
1897 | for (i = 0; i < g_num_of_iommus; i++) { | 2290 | for (i = 0; i < g_num_of_iommus; i++) { |
1898 | if (deferred_flush[i].next) { | 2291 | struct intel_iommu *iommu = g_iommus[i]; |
1899 | struct intel_iommu *iommu = | 2292 | if (!iommu) |
1900 | deferred_flush[i].domain[0]->iommu; | 2293 | continue; |
1901 | 2294 | ||
2295 | if (deferred_flush[i].next) { | ||
1902 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | 2296 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
1903 | DMA_TLB_GLOBAL_FLUSH, 0); | 2297 | DMA_TLB_GLOBAL_FLUSH, 0); |
1904 | for (j = 0; j < deferred_flush[i].next; j++) { | 2298 | for (j = 0; j < deferred_flush[i].next; j++) { |
@@ -1925,12 +2319,14 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
1925 | { | 2319 | { |
1926 | unsigned long flags; | 2320 | unsigned long flags; |
1927 | int next, iommu_id; | 2321 | int next, iommu_id; |
2322 | struct intel_iommu *iommu; | ||
1928 | 2323 | ||
1929 | spin_lock_irqsave(&async_umap_flush_lock, flags); | 2324 | spin_lock_irqsave(&async_umap_flush_lock, flags); |
1930 | if (list_size == HIGH_WATER_MARK) | 2325 | if (list_size == HIGH_WATER_MARK) |
1931 | flush_unmaps(); | 2326 | flush_unmaps(); |
1932 | 2327 | ||
1933 | iommu_id = dom->iommu->seq_id; | 2328 | iommu = domain_get_iommu(dom); |
2329 | iommu_id = iommu->seq_id; | ||
1934 | 2330 | ||
1935 | next = deferred_flush[iommu_id].next; | 2331 | next = deferred_flush[iommu_id].next; |
1936 | deferred_flush[iommu_id].domain[next] = dom; | 2332 | deferred_flush[iommu_id].domain[next] = dom; |
@@ -1952,12 +2348,15 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | |||
1952 | struct dmar_domain *domain; | 2348 | struct dmar_domain *domain; |
1953 | unsigned long start_addr; | 2349 | unsigned long start_addr; |
1954 | struct iova *iova; | 2350 | struct iova *iova; |
2351 | struct intel_iommu *iommu; | ||
1955 | 2352 | ||
1956 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2353 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
1957 | return; | 2354 | return; |
1958 | domain = find_domain(pdev); | 2355 | domain = find_domain(pdev); |
1959 | BUG_ON(!domain); | 2356 | BUG_ON(!domain); |
1960 | 2357 | ||
2358 | iommu = domain_get_iommu(domain); | ||
2359 | |||
1961 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); | 2360 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); |
1962 | if (!iova) | 2361 | if (!iova) |
1963 | return; | 2362 | return; |
@@ -1973,9 +2372,9 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | |||
1973 | /* free page tables */ | 2372 | /* free page tables */ |
1974 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2373 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
1975 | if (intel_iommu_strict) { | 2374 | if (intel_iommu_strict) { |
1976 | if (iommu_flush_iotlb_psi(domain->iommu, | 2375 | if (iommu_flush_iotlb_psi(iommu, |
1977 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) | 2376 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) |
1978 | iommu_flush_write_buffer(domain->iommu); | 2377 | iommu_flush_write_buffer(iommu); |
1979 | /* free iova */ | 2378 | /* free iova */ |
1980 | __free_iova(&domain->iovad, iova); | 2379 | __free_iova(&domain->iovad, iova); |
1981 | } else { | 2380 | } else { |
@@ -2036,11 +2435,15 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2036 | size_t size = 0; | 2435 | size_t size = 0; |
2037 | void *addr; | 2436 | void *addr; |
2038 | struct scatterlist *sg; | 2437 | struct scatterlist *sg; |
2438 | struct intel_iommu *iommu; | ||
2039 | 2439 | ||
2040 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2440 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
2041 | return; | 2441 | return; |
2042 | 2442 | ||
2043 | domain = find_domain(pdev); | 2443 | domain = find_domain(pdev); |
2444 | BUG_ON(!domain); | ||
2445 | |||
2446 | iommu = domain_get_iommu(domain); | ||
2044 | 2447 | ||
2045 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); | 2448 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); |
2046 | if (!iova) | 2449 | if (!iova) |
@@ -2057,9 +2460,9 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2057 | /* free page tables */ | 2460 | /* free page tables */ |
2058 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2461 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
2059 | 2462 | ||
2060 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, | 2463 | if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr, |
2061 | size >> VTD_PAGE_SHIFT, 0)) | 2464 | size >> VTD_PAGE_SHIFT, 0)) |
2062 | iommu_flush_write_buffer(domain->iommu); | 2465 | iommu_flush_write_buffer(iommu); |
2063 | 2466 | ||
2064 | /* free iova */ | 2467 | /* free iova */ |
2065 | __free_iova(&domain->iovad, iova); | 2468 | __free_iova(&domain->iovad, iova); |
@@ -2093,6 +2496,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2093 | int ret; | 2496 | int ret; |
2094 | struct scatterlist *sg; | 2497 | struct scatterlist *sg; |
2095 | unsigned long start_addr; | 2498 | unsigned long start_addr; |
2499 | struct intel_iommu *iommu; | ||
2096 | 2500 | ||
2097 | BUG_ON(dir == DMA_NONE); | 2501 | BUG_ON(dir == DMA_NONE); |
2098 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2502 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
@@ -2102,6 +2506,8 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2102 | if (!domain) | 2506 | if (!domain) |
2103 | return 0; | 2507 | return 0; |
2104 | 2508 | ||
2509 | iommu = domain_get_iommu(domain); | ||
2510 | |||
2105 | for_each_sg(sglist, sg, nelems, i) { | 2511 | for_each_sg(sglist, sg, nelems, i) { |
2106 | addr = SG_ENT_VIRT_ADDRESS(sg); | 2512 | addr = SG_ENT_VIRT_ADDRESS(sg); |
2107 | addr = (void *)virt_to_phys(addr); | 2513 | addr = (void *)virt_to_phys(addr); |
@@ -2119,7 +2525,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2119 | * mappings.. | 2525 | * mappings.. |
2120 | */ | 2526 | */ |
2121 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ | 2527 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
2122 | !cap_zlr(domain->iommu->cap)) | 2528 | !cap_zlr(iommu->cap)) |
2123 | prot |= DMA_PTE_READ; | 2529 | prot |= DMA_PTE_READ; |
2124 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2530 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
2125 | prot |= DMA_PTE_WRITE; | 2531 | prot |= DMA_PTE_WRITE; |
@@ -2151,9 +2557,9 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2151 | } | 2557 | } |
2152 | 2558 | ||
2153 | /* it's a non-present to present mapping */ | 2559 | /* it's a non-present to present mapping */ |
2154 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2560 | if (iommu_flush_iotlb_psi(iommu, domain->id, |
2155 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) | 2561 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) |
2156 | iommu_flush_write_buffer(domain->iommu); | 2562 | iommu_flush_write_buffer(iommu); |
2157 | return nelems; | 2563 | return nelems; |
2158 | } | 2564 | } |
2159 | 2565 | ||
@@ -2325,10 +2731,220 @@ int __init intel_iommu_init(void) | |||
2325 | init_timer(&unmap_timer); | 2731 | init_timer(&unmap_timer); |
2326 | force_iommu = 1; | 2732 | force_iommu = 1; |
2327 | dma_ops = &intel_dma_ops; | 2733 | dma_ops = &intel_dma_ops; |
2734 | |||
2735 | register_iommu(&intel_iommu_ops); | ||
2736 | |||
2737 | return 0; | ||
2738 | } | ||
2739 | |||
2740 | static int vm_domain_add_dev_info(struct dmar_domain *domain, | ||
2741 | struct pci_dev *pdev) | ||
2742 | { | ||
2743 | struct device_domain_info *info; | ||
2744 | unsigned long flags; | ||
2745 | |||
2746 | info = alloc_devinfo_mem(); | ||
2747 | if (!info) | ||
2748 | return -ENOMEM; | ||
2749 | |||
2750 | info->bus = pdev->bus->number; | ||
2751 | info->devfn = pdev->devfn; | ||
2752 | info->dev = pdev; | ||
2753 | info->domain = domain; | ||
2754 | |||
2755 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2756 | list_add(&info->link, &domain->devices); | ||
2757 | list_add(&info->global, &device_domain_list); | ||
2758 | pdev->dev.archdata.iommu = info; | ||
2759 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2760 | |||
2761 | return 0; | ||
2762 | } | ||
2763 | |||
2764 | static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, | ||
2765 | struct pci_dev *pdev) | ||
2766 | { | ||
2767 | struct device_domain_info *info; | ||
2768 | struct intel_iommu *iommu; | ||
2769 | unsigned long flags; | ||
2770 | int found = 0; | ||
2771 | struct list_head *entry, *tmp; | ||
2772 | |||
2773 | iommu = device_to_iommu(pdev->bus->number, pdev->devfn); | ||
2774 | if (!iommu) | ||
2775 | return; | ||
2776 | |||
2777 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2778 | list_for_each_safe(entry, tmp, &domain->devices) { | ||
2779 | info = list_entry(entry, struct device_domain_info, link); | ||
2780 | if (info->bus == pdev->bus->number && | ||
2781 | info->devfn == pdev->devfn) { | ||
2782 | list_del(&info->link); | ||
2783 | list_del(&info->global); | ||
2784 | if (info->dev) | ||
2785 | info->dev->dev.archdata.iommu = NULL; | ||
2786 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2787 | |||
2788 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
2789 | free_devinfo_mem(info); | ||
2790 | |||
2791 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2792 | |||
2793 | if (found) | ||
2794 | break; | ||
2795 | else | ||
2796 | continue; | ||
2797 | } | ||
2798 | |||
2799 | /* if there is no other devices under the same iommu | ||
2800 | * owned by this domain, clear this iommu in iommu_bmp | ||
2801 | * update iommu count and coherency | ||
2802 | */ | ||
2803 | if (device_to_iommu(info->bus, info->devfn) == iommu) | ||
2804 | found = 1; | ||
2805 | } | ||
2806 | |||
2807 | if (found == 0) { | ||
2808 | unsigned long tmp_flags; | ||
2809 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); | ||
2810 | clear_bit(iommu->seq_id, &domain->iommu_bmp); | ||
2811 | domain->iommu_count--; | ||
2812 | domain_update_iommu_coherency(domain); | ||
2813 | spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); | ||
2814 | } | ||
2815 | |||
2816 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2817 | } | ||
2818 | |||
2819 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | ||
2820 | { | ||
2821 | struct device_domain_info *info; | ||
2822 | struct intel_iommu *iommu; | ||
2823 | unsigned long flags1, flags2; | ||
2824 | |||
2825 | spin_lock_irqsave(&device_domain_lock, flags1); | ||
2826 | while (!list_empty(&domain->devices)) { | ||
2827 | info = list_entry(domain->devices.next, | ||
2828 | struct device_domain_info, link); | ||
2829 | list_del(&info->link); | ||
2830 | list_del(&info->global); | ||
2831 | if (info->dev) | ||
2832 | info->dev->dev.archdata.iommu = NULL; | ||
2833 | |||
2834 | spin_unlock_irqrestore(&device_domain_lock, flags1); | ||
2835 | |||
2836 | iommu = device_to_iommu(info->bus, info->devfn); | ||
2837 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
2838 | |||
2839 | /* clear this iommu in iommu_bmp, update iommu count | ||
2840 | * and coherency | ||
2841 | */ | ||
2842 | spin_lock_irqsave(&domain->iommu_lock, flags2); | ||
2843 | if (test_and_clear_bit(iommu->seq_id, | ||
2844 | &domain->iommu_bmp)) { | ||
2845 | domain->iommu_count--; | ||
2846 | domain_update_iommu_coherency(domain); | ||
2847 | } | ||
2848 | spin_unlock_irqrestore(&domain->iommu_lock, flags2); | ||
2849 | |||
2850 | free_devinfo_mem(info); | ||
2851 | spin_lock_irqsave(&device_domain_lock, flags1); | ||
2852 | } | ||
2853 | spin_unlock_irqrestore(&device_domain_lock, flags1); | ||
2854 | } | ||
2855 | |||
2856 | /* domain id for virtual machine, it won't be set in context */ | ||
2857 | static unsigned long vm_domid; | ||
2858 | |||
2859 | static int vm_domain_min_agaw(struct dmar_domain *domain) | ||
2860 | { | ||
2861 | int i; | ||
2862 | int min_agaw = domain->agaw; | ||
2863 | |||
2864 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | ||
2865 | for (; i < g_num_of_iommus; ) { | ||
2866 | if (min_agaw > g_iommus[i]->agaw) | ||
2867 | min_agaw = g_iommus[i]->agaw; | ||
2868 | |||
2869 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); | ||
2870 | } | ||
2871 | |||
2872 | return min_agaw; | ||
2873 | } | ||
2874 | |||
2875 | static struct dmar_domain *iommu_alloc_vm_domain(void) | ||
2876 | { | ||
2877 | struct dmar_domain *domain; | ||
2878 | |||
2879 | domain = alloc_domain_mem(); | ||
2880 | if (!domain) | ||
2881 | return NULL; | ||
2882 | |||
2883 | domain->id = vm_domid++; | ||
2884 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | ||
2885 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; | ||
2886 | |||
2887 | return domain; | ||
2888 | } | ||
2889 | |||
2890 | static int vm_domain_init(struct dmar_domain *domain, int guest_width) | ||
2891 | { | ||
2892 | int adjust_width; | ||
2893 | |||
2894 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | ||
2895 | spin_lock_init(&domain->mapping_lock); | ||
2896 | spin_lock_init(&domain->iommu_lock); | ||
2897 | |||
2898 | domain_reserve_special_ranges(domain); | ||
2899 | |||
2900 | /* calculate AGAW */ | ||
2901 | domain->gaw = guest_width; | ||
2902 | adjust_width = guestwidth_to_adjustwidth(guest_width); | ||
2903 | domain->agaw = width_to_agaw(adjust_width); | ||
2904 | |||
2905 | INIT_LIST_HEAD(&domain->devices); | ||
2906 | |||
2907 | domain->iommu_count = 0; | ||
2908 | domain->iommu_coherency = 0; | ||
2909 | domain->max_addr = 0; | ||
2910 | |||
2911 | /* always allocate the top pgd */ | ||
2912 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | ||
2913 | if (!domain->pgd) | ||
2914 | return -ENOMEM; | ||
2915 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); | ||
2328 | return 0; | 2916 | return 0; |
2329 | } | 2917 | } |
2330 | 2918 | ||
2331 | void intel_iommu_domain_exit(struct dmar_domain *domain) | 2919 | static void iommu_free_vm_domain(struct dmar_domain *domain) |
2920 | { | ||
2921 | unsigned long flags; | ||
2922 | struct dmar_drhd_unit *drhd; | ||
2923 | struct intel_iommu *iommu; | ||
2924 | unsigned long i; | ||
2925 | unsigned long ndomains; | ||
2926 | |||
2927 | for_each_drhd_unit(drhd) { | ||
2928 | if (drhd->ignored) | ||
2929 | continue; | ||
2930 | iommu = drhd->iommu; | ||
2931 | |||
2932 | ndomains = cap_ndoms(iommu->cap); | ||
2933 | i = find_first_bit(iommu->domain_ids, ndomains); | ||
2934 | for (; i < ndomains; ) { | ||
2935 | if (iommu->domains[i] == domain) { | ||
2936 | spin_lock_irqsave(&iommu->lock, flags); | ||
2937 | clear_bit(i, iommu->domain_ids); | ||
2938 | iommu->domains[i] = NULL; | ||
2939 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
2940 | break; | ||
2941 | } | ||
2942 | i = find_next_bit(iommu->domain_ids, ndomains, i+1); | ||
2943 | } | ||
2944 | } | ||
2945 | } | ||
2946 | |||
2947 | static void vm_domain_exit(struct dmar_domain *domain) | ||
2332 | { | 2948 | { |
2333 | u64 end; | 2949 | u64 end; |
2334 | 2950 | ||
@@ -2336,6 +2952,9 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) | |||
2336 | if (!domain) | 2952 | if (!domain) |
2337 | return; | 2953 | return; |
2338 | 2954 | ||
2955 | vm_domain_remove_all_dev_info(domain); | ||
2956 | /* destroy iovas */ | ||
2957 | put_iova_domain(&domain->iovad); | ||
2339 | end = DOMAIN_MAX_ADDR(domain->gaw); | 2958 | end = DOMAIN_MAX_ADDR(domain->gaw); |
2340 | end = end & (~VTD_PAGE_MASK); | 2959 | end = end & (~VTD_PAGE_MASK); |
2341 | 2960 | ||
@@ -2345,94 +2964,167 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) | |||
2345 | /* free page tables */ | 2964 | /* free page tables */ |
2346 | dma_pte_free_pagetable(domain, 0, end); | 2965 | dma_pte_free_pagetable(domain, 0, end); |
2347 | 2966 | ||
2348 | iommu_free_domain(domain); | 2967 | iommu_free_vm_domain(domain); |
2349 | free_domain_mem(domain); | 2968 | free_domain_mem(domain); |
2350 | } | 2969 | } |
2351 | EXPORT_SYMBOL_GPL(intel_iommu_domain_exit); | ||
2352 | 2970 | ||
2353 | struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev) | 2971 | static int intel_iommu_domain_init(struct iommu_domain *domain) |
2354 | { | 2972 | { |
2355 | struct dmar_drhd_unit *drhd; | 2973 | struct dmar_domain *dmar_domain; |
2356 | struct dmar_domain *domain; | ||
2357 | struct intel_iommu *iommu; | ||
2358 | |||
2359 | drhd = dmar_find_matched_drhd_unit(pdev); | ||
2360 | if (!drhd) { | ||
2361 | printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n"); | ||
2362 | return NULL; | ||
2363 | } | ||
2364 | 2974 | ||
2365 | iommu = drhd->iommu; | 2975 | dmar_domain = iommu_alloc_vm_domain(); |
2366 | if (!iommu) { | 2976 | if (!dmar_domain) { |
2367 | printk(KERN_ERR | ||
2368 | "intel_iommu_domain_alloc: iommu == NULL\n"); | ||
2369 | return NULL; | ||
2370 | } | ||
2371 | domain = iommu_alloc_domain(iommu); | ||
2372 | if (!domain) { | ||
2373 | printk(KERN_ERR | 2977 | printk(KERN_ERR |
2374 | "intel_iommu_domain_alloc: domain == NULL\n"); | 2978 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
2375 | return NULL; | 2979 | return -ENOMEM; |
2376 | } | 2980 | } |
2377 | if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 2981 | if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
2378 | printk(KERN_ERR | 2982 | printk(KERN_ERR |
2379 | "intel_iommu_domain_alloc: domain_init() failed\n"); | 2983 | "intel_iommu_domain_init() failed\n"); |
2380 | intel_iommu_domain_exit(domain); | 2984 | vm_domain_exit(dmar_domain); |
2381 | return NULL; | 2985 | return -ENOMEM; |
2382 | } | 2986 | } |
2383 | return domain; | 2987 | domain->priv = dmar_domain; |
2988 | |||
2989 | return 0; | ||
2384 | } | 2990 | } |
2385 | EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc); | ||
2386 | 2991 | ||
2387 | int intel_iommu_context_mapping( | 2992 | static void intel_iommu_domain_destroy(struct iommu_domain *domain) |
2388 | struct dmar_domain *domain, struct pci_dev *pdev) | ||
2389 | { | 2993 | { |
2390 | int rc; | 2994 | struct dmar_domain *dmar_domain = domain->priv; |
2391 | rc = domain_context_mapping(domain, pdev); | 2995 | |
2392 | return rc; | 2996 | domain->priv = NULL; |
2997 | vm_domain_exit(dmar_domain); | ||
2393 | } | 2998 | } |
2394 | EXPORT_SYMBOL_GPL(intel_iommu_context_mapping); | ||
2395 | 2999 | ||
2396 | int intel_iommu_page_mapping( | 3000 | static int intel_iommu_attach_device(struct iommu_domain *domain, |
2397 | struct dmar_domain *domain, dma_addr_t iova, | 3001 | struct device *dev) |
2398 | u64 hpa, size_t size, int prot) | ||
2399 | { | 3002 | { |
2400 | int rc; | 3003 | struct dmar_domain *dmar_domain = domain->priv; |
2401 | rc = domain_page_mapping(domain, iova, hpa, size, prot); | 3004 | struct pci_dev *pdev = to_pci_dev(dev); |
2402 | return rc; | 3005 | struct intel_iommu *iommu; |
3006 | int addr_width; | ||
3007 | u64 end; | ||
3008 | int ret; | ||
3009 | |||
3010 | /* normally pdev is not mapped */ | ||
3011 | if (unlikely(domain_context_mapped(pdev))) { | ||
3012 | struct dmar_domain *old_domain; | ||
3013 | |||
3014 | old_domain = find_domain(pdev); | ||
3015 | if (old_domain) { | ||
3016 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | ||
3017 | vm_domain_remove_one_dev_info(old_domain, pdev); | ||
3018 | else | ||
3019 | domain_remove_dev_info(old_domain); | ||
3020 | } | ||
3021 | } | ||
3022 | |||
3023 | iommu = device_to_iommu(pdev->bus->number, pdev->devfn); | ||
3024 | if (!iommu) | ||
3025 | return -ENODEV; | ||
3026 | |||
3027 | /* check if this iommu agaw is sufficient for max mapped address */ | ||
3028 | addr_width = agaw_to_width(iommu->agaw); | ||
3029 | end = DOMAIN_MAX_ADDR(addr_width); | ||
3030 | end = end & VTD_PAGE_MASK; | ||
3031 | if (end < dmar_domain->max_addr) { | ||
3032 | printk(KERN_ERR "%s: iommu agaw (%d) is not " | ||
3033 | "sufficient for the mapped address (%llx)\n", | ||
3034 | __func__, iommu->agaw, dmar_domain->max_addr); | ||
3035 | return -EFAULT; | ||
3036 | } | ||
3037 | |||
3038 | ret = domain_context_mapping(dmar_domain, pdev); | ||
3039 | if (ret) | ||
3040 | return ret; | ||
3041 | |||
3042 | ret = vm_domain_add_dev_info(dmar_domain, pdev); | ||
3043 | return ret; | ||
2403 | } | 3044 | } |
2404 | EXPORT_SYMBOL_GPL(intel_iommu_page_mapping); | ||
2405 | 3045 | ||
2406 | void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | 3046 | static void intel_iommu_detach_device(struct iommu_domain *domain, |
3047 | struct device *dev) | ||
2407 | { | 3048 | { |
2408 | detach_domain_for_dev(domain, bus, devfn); | 3049 | struct dmar_domain *dmar_domain = domain->priv; |
3050 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3051 | |||
3052 | vm_domain_remove_one_dev_info(dmar_domain, pdev); | ||
2409 | } | 3053 | } |
2410 | EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); | ||
2411 | 3054 | ||
2412 | struct dmar_domain * | 3055 | static int intel_iommu_map_range(struct iommu_domain *domain, |
2413 | intel_iommu_find_domain(struct pci_dev *pdev) | 3056 | unsigned long iova, phys_addr_t hpa, |
3057 | size_t size, int iommu_prot) | ||
2414 | { | 3058 | { |
2415 | return find_domain(pdev); | 3059 | struct dmar_domain *dmar_domain = domain->priv; |
3060 | u64 max_addr; | ||
3061 | int addr_width; | ||
3062 | int prot = 0; | ||
3063 | int ret; | ||
3064 | |||
3065 | if (iommu_prot & IOMMU_READ) | ||
3066 | prot |= DMA_PTE_READ; | ||
3067 | if (iommu_prot & IOMMU_WRITE) | ||
3068 | prot |= DMA_PTE_WRITE; | ||
3069 | |||
3070 | max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); | ||
3071 | if (dmar_domain->max_addr < max_addr) { | ||
3072 | int min_agaw; | ||
3073 | u64 end; | ||
3074 | |||
3075 | /* check if minimum agaw is sufficient for mapped address */ | ||
3076 | min_agaw = vm_domain_min_agaw(dmar_domain); | ||
3077 | addr_width = agaw_to_width(min_agaw); | ||
3078 | end = DOMAIN_MAX_ADDR(addr_width); | ||
3079 | end = end & VTD_PAGE_MASK; | ||
3080 | if (end < max_addr) { | ||
3081 | printk(KERN_ERR "%s: iommu agaw (%d) is not " | ||
3082 | "sufficient for the mapped address (%llx)\n", | ||
3083 | __func__, min_agaw, max_addr); | ||
3084 | return -EFAULT; | ||
3085 | } | ||
3086 | dmar_domain->max_addr = max_addr; | ||
3087 | } | ||
3088 | |||
3089 | ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); | ||
3090 | return ret; | ||
2416 | } | 3091 | } |
2417 | EXPORT_SYMBOL_GPL(intel_iommu_find_domain); | ||
2418 | 3092 | ||
2419 | int intel_iommu_found(void) | 3093 | static void intel_iommu_unmap_range(struct iommu_domain *domain, |
3094 | unsigned long iova, size_t size) | ||
2420 | { | 3095 | { |
2421 | return g_num_of_iommus; | 3096 | struct dmar_domain *dmar_domain = domain->priv; |
3097 | dma_addr_t base; | ||
3098 | |||
3099 | /* The address might not be aligned */ | ||
3100 | base = iova & VTD_PAGE_MASK; | ||
3101 | size = VTD_PAGE_ALIGN(size); | ||
3102 | dma_pte_clear_range(dmar_domain, base, base + size); | ||
3103 | |||
3104 | if (dmar_domain->max_addr == base + size) | ||
3105 | dmar_domain->max_addr = base; | ||
2422 | } | 3106 | } |
2423 | EXPORT_SYMBOL_GPL(intel_iommu_found); | ||
2424 | 3107 | ||
2425 | u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) | 3108 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
3109 | unsigned long iova) | ||
2426 | { | 3110 | { |
3111 | struct dmar_domain *dmar_domain = domain->priv; | ||
2427 | struct dma_pte *pte; | 3112 | struct dma_pte *pte; |
2428 | u64 pfn; | 3113 | u64 phys = 0; |
2429 | |||
2430 | pfn = 0; | ||
2431 | pte = addr_to_dma_pte(domain, iova); | ||
2432 | 3114 | ||
3115 | pte = addr_to_dma_pte(dmar_domain, iova); | ||
2433 | if (pte) | 3116 | if (pte) |
2434 | pfn = dma_pte_addr(*pte); | 3117 | phys = dma_pte_addr(pte); |
2435 | 3118 | ||
2436 | return pfn >> VTD_PAGE_SHIFT; | 3119 | return phys; |
2437 | } | 3120 | } |
2438 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); | 3121 | |
3122 | static struct iommu_ops intel_iommu_ops = { | ||
3123 | .domain_init = intel_iommu_domain_init, | ||
3124 | .domain_destroy = intel_iommu_domain_destroy, | ||
3125 | .attach_dev = intel_iommu_attach_device, | ||
3126 | .detach_dev = intel_iommu_detach_device, | ||
3127 | .map = intel_iommu_map_range, | ||
3128 | .unmap = intel_iommu_unmap_range, | ||
3129 | .iova_to_phys = intel_iommu_iova_to_phys, | ||
3130 | }; | ||
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 2de5a3238c94..f78371b22529 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include <linux/irq.h> | 6 | #include <linux/irq.h> |
7 | #include <asm/io_apic.h> | 7 | #include <asm/io_apic.h> |
8 | #include <asm/smp.h> | ||
8 | #include <linux/intel-iommu.h> | 9 | #include <linux/intel-iommu.h> |
9 | #include "intr_remapping.h" | 10 | #include "intr_remapping.h" |
10 | 11 | ||
@@ -19,17 +20,75 @@ struct irq_2_iommu { | |||
19 | u8 irte_mask; | 20 | u8 irte_mask; |
20 | }; | 21 | }; |
21 | 22 | ||
22 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | 23 | #ifdef CONFIG_SPARSE_IRQ |
24 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) | ||
25 | { | ||
26 | struct irq_2_iommu *iommu; | ||
27 | int node; | ||
28 | |||
29 | node = cpu_to_node(cpu); | ||
30 | |||
31 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | ||
32 | printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); | ||
33 | |||
34 | return iommu; | ||
35 | } | ||
23 | 36 | ||
24 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 37 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
25 | { | 38 | { |
26 | return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; | 39 | struct irq_desc *desc; |
40 | |||
41 | desc = irq_to_desc(irq); | ||
42 | |||
43 | if (WARN_ON_ONCE(!desc)) | ||
44 | return NULL; | ||
45 | |||
46 | return desc->irq_2_iommu; | ||
47 | } | ||
48 | |||
49 | static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) | ||
50 | { | ||
51 | struct irq_desc *desc; | ||
52 | struct irq_2_iommu *irq_iommu; | ||
53 | |||
54 | /* | ||
55 | * alloc irq desc if not allocated already. | ||
56 | */ | ||
57 | desc = irq_to_desc_alloc_cpu(irq, cpu); | ||
58 | if (!desc) { | ||
59 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
60 | return NULL; | ||
61 | } | ||
62 | |||
63 | irq_iommu = desc->irq_2_iommu; | ||
64 | |||
65 | if (!irq_iommu) | ||
66 | desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); | ||
67 | |||
68 | return desc->irq_2_iommu; | ||
27 | } | 69 | } |
28 | 70 | ||
29 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | 71 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
30 | { | 72 | { |
73 | return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); | ||
74 | } | ||
75 | |||
76 | #else /* !CONFIG_SPARSE_IRQ */ | ||
77 | |||
78 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | ||
79 | |||
80 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
81 | { | ||
82 | if (irq < nr_irqs) | ||
83 | return &irq_2_iommuX[irq]; | ||
84 | |||
85 | return NULL; | ||
86 | } | ||
87 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
88 | { | ||
31 | return irq_2_iommu(irq); | 89 | return irq_2_iommu(irq); |
32 | } | 90 | } |
91 | #endif | ||
33 | 92 | ||
34 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 93 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
35 | 94 | ||
@@ -86,9 +145,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
86 | if (!count) | 145 | if (!count) |
87 | return -1; | 146 | return -1; |
88 | 147 | ||
148 | #ifndef CONFIG_SPARSE_IRQ | ||
89 | /* protect irq_2_iommu_alloc later */ | 149 | /* protect irq_2_iommu_alloc later */ |
90 | if (irq >= nr_irqs) | 150 | if (irq >= nr_irqs) |
91 | return -1; | 151 | return -1; |
152 | #endif | ||
92 | 153 | ||
93 | /* | 154 | /* |
94 | * start the IRTE search from index 0. | 155 | * start the IRTE search from index 0. |
@@ -130,6 +191,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
130 | table->base[i].present = 1; | 191 | table->base[i].present = 1; |
131 | 192 | ||
132 | irq_iommu = irq_2_iommu_alloc(irq); | 193 | irq_iommu = irq_2_iommu_alloc(irq); |
194 | if (!irq_iommu) { | ||
195 | spin_unlock(&irq_2_ir_lock); | ||
196 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
197 | return -1; | ||
198 | } | ||
199 | |||
133 | irq_iommu->iommu = iommu; | 200 | irq_iommu->iommu = iommu; |
134 | irq_iommu->irte_index = index; | 201 | irq_iommu->irte_index = index; |
135 | irq_iommu->sub_handle = 0; | 202 | irq_iommu->sub_handle = 0; |
@@ -177,6 +244,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
177 | 244 | ||
178 | irq_iommu = irq_2_iommu_alloc(irq); | 245 | irq_iommu = irq_2_iommu_alloc(irq); |
179 | 246 | ||
247 | if (!irq_iommu) { | ||
248 | spin_unlock(&irq_2_ir_lock); | ||
249 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
250 | return -1; | ||
251 | } | ||
252 | |||
180 | irq_iommu->iommu = iommu; | 253 | irq_iommu->iommu = iommu; |
181 | irq_iommu->irte_index = index; | 254 | irq_iommu->irte_index = index; |
182 | irq_iommu->sub_handle = subhandle; | 255 | irq_iommu->sub_handle = subhandle; |
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index 6441dfa969a3..de01174aff06 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c | |||
@@ -15,7 +15,7 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason) | |||
15 | 15 | ||
16 | dev_printk(KERN_ERR, &pdev->dev, | 16 | dev_printk(KERN_ERR, &pdev->dev, |
17 | "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n", | 17 | "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n", |
18 | parent->dev.bus_id, parent->vendor, parent->device); | 18 | dev_name(&parent->dev), parent->vendor, parent->device); |
19 | dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason); | 19 | dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason); |
20 | dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n"); | 20 | dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n"); |
21 | WARN_ON(1); | 21 | WARN_ON(1); |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 74801f7df9c9..b4a90badd0a6 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -103,11 +103,11 @@ static void msix_set_enable(struct pci_dev *dev, int enable) | |||
103 | } | 103 | } |
104 | } | 104 | } |
105 | 105 | ||
106 | static void msix_flush_writes(unsigned int irq) | 106 | static void msix_flush_writes(struct irq_desc *desc) |
107 | { | 107 | { |
108 | struct msi_desc *entry; | 108 | struct msi_desc *entry; |
109 | 109 | ||
110 | entry = get_irq_msi(irq); | 110 | entry = get_irq_desc_msi(desc); |
111 | BUG_ON(!entry || !entry->dev); | 111 | BUG_ON(!entry || !entry->dev); |
112 | switch (entry->msi_attrib.type) { | 112 | switch (entry->msi_attrib.type) { |
113 | case PCI_CAP_ID_MSI: | 113 | case PCI_CAP_ID_MSI: |
@@ -135,11 +135,11 @@ static void msix_flush_writes(unsigned int irq) | |||
135 | * Returns 1 if it succeeded in masking the interrupt and 0 if the device | 135 | * Returns 1 if it succeeded in masking the interrupt and 0 if the device |
136 | * doesn't support MSI masking. | 136 | * doesn't support MSI masking. |
137 | */ | 137 | */ |
138 | static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) | 138 | static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) |
139 | { | 139 | { |
140 | struct msi_desc *entry; | 140 | struct msi_desc *entry; |
141 | 141 | ||
142 | entry = get_irq_msi(irq); | 142 | entry = get_irq_desc_msi(desc); |
143 | BUG_ON(!entry || !entry->dev); | 143 | BUG_ON(!entry || !entry->dev); |
144 | switch (entry->msi_attrib.type) { | 144 | switch (entry->msi_attrib.type) { |
145 | case PCI_CAP_ID_MSI: | 145 | case PCI_CAP_ID_MSI: |
@@ -172,9 +172,9 @@ static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) | |||
172 | return 1; | 172 | return 1; |
173 | } | 173 | } |
174 | 174 | ||
175 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 175 | void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) |
176 | { | 176 | { |
177 | struct msi_desc *entry = get_irq_msi(irq); | 177 | struct msi_desc *entry = get_irq_desc_msi(desc); |
178 | switch(entry->msi_attrib.type) { | 178 | switch(entry->msi_attrib.type) { |
179 | case PCI_CAP_ID_MSI: | 179 | case PCI_CAP_ID_MSI: |
180 | { | 180 | { |
@@ -211,9 +211,16 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg) | |||
211 | } | 211 | } |
212 | } | 212 | } |
213 | 213 | ||
214 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 214 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
215 | { | ||
216 | struct irq_desc *desc = irq_to_desc(irq); | ||
217 | |||
218 | read_msi_msg_desc(desc, msg); | ||
219 | } | ||
220 | |||
221 | void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | ||
215 | { | 222 | { |
216 | struct msi_desc *entry = get_irq_msi(irq); | 223 | struct msi_desc *entry = get_irq_desc_msi(desc); |
217 | switch (entry->msi_attrib.type) { | 224 | switch (entry->msi_attrib.type) { |
218 | case PCI_CAP_ID_MSI: | 225 | case PCI_CAP_ID_MSI: |
219 | { | 226 | { |
@@ -252,21 +259,31 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg) | |||
252 | entry->msg = *msg; | 259 | entry->msg = *msg; |
253 | } | 260 | } |
254 | 261 | ||
262 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | ||
263 | { | ||
264 | struct irq_desc *desc = irq_to_desc(irq); | ||
265 | |||
266 | write_msi_msg_desc(desc, msg); | ||
267 | } | ||
268 | |||
255 | void mask_msi_irq(unsigned int irq) | 269 | void mask_msi_irq(unsigned int irq) |
256 | { | 270 | { |
257 | msi_set_mask_bits(irq, 1, 1); | 271 | struct irq_desc *desc = irq_to_desc(irq); |
258 | msix_flush_writes(irq); | 272 | |
273 | msi_set_mask_bits(desc, 1, 1); | ||
274 | msix_flush_writes(desc); | ||
259 | } | 275 | } |
260 | 276 | ||
261 | void unmask_msi_irq(unsigned int irq) | 277 | void unmask_msi_irq(unsigned int irq) |
262 | { | 278 | { |
263 | msi_set_mask_bits(irq, 1, 0); | 279 | struct irq_desc *desc = irq_to_desc(irq); |
264 | msix_flush_writes(irq); | 280 | |
281 | msi_set_mask_bits(desc, 1, 0); | ||
282 | msix_flush_writes(desc); | ||
265 | } | 283 | } |
266 | 284 | ||
267 | static int msi_free_irqs(struct pci_dev* dev); | 285 | static int msi_free_irqs(struct pci_dev* dev); |
268 | 286 | ||
269 | |||
270 | static struct msi_desc* alloc_msi_entry(void) | 287 | static struct msi_desc* alloc_msi_entry(void) |
271 | { | 288 | { |
272 | struct msi_desc *entry; | 289 | struct msi_desc *entry; |
@@ -303,9 +320,11 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
303 | pci_intx_for_msi(dev, 0); | 320 | pci_intx_for_msi(dev, 0); |
304 | msi_set_enable(dev, 0); | 321 | msi_set_enable(dev, 0); |
305 | write_msi_msg(dev->irq, &entry->msg); | 322 | write_msi_msg(dev->irq, &entry->msg); |
306 | if (entry->msi_attrib.maskbit) | 323 | if (entry->msi_attrib.maskbit) { |
307 | msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask, | 324 | struct irq_desc *desc = irq_to_desc(dev->irq); |
325 | msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask, | ||
308 | entry->msi_attrib.masked); | 326 | entry->msi_attrib.masked); |
327 | } | ||
309 | 328 | ||
310 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | 329 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); |
311 | control &= ~PCI_MSI_FLAGS_QSIZE; | 330 | control &= ~PCI_MSI_FLAGS_QSIZE; |
@@ -327,8 +346,9 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
327 | msix_set_enable(dev, 0); | 346 | msix_set_enable(dev, 0); |
328 | 347 | ||
329 | list_for_each_entry(entry, &dev->msi_list, list) { | 348 | list_for_each_entry(entry, &dev->msi_list, list) { |
349 | struct irq_desc *desc = irq_to_desc(entry->irq); | ||
330 | write_msi_msg(entry->irq, &entry->msg); | 350 | write_msi_msg(entry->irq, &entry->msg); |
331 | msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked); | 351 | msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); |
332 | } | 352 | } |
333 | 353 | ||
334 | BUG_ON(list_empty(&dev->msi_list)); | 354 | BUG_ON(list_empty(&dev->msi_list)); |
@@ -596,7 +616,8 @@ void pci_msi_shutdown(struct pci_dev* dev) | |||
596 | /* Return the the pci reset with msi irqs unmasked */ | 616 | /* Return the the pci reset with msi irqs unmasked */ |
597 | if (entry->msi_attrib.maskbit) { | 617 | if (entry->msi_attrib.maskbit) { |
598 | u32 mask = entry->msi_attrib.maskbits_mask; | 618 | u32 mask = entry->msi_attrib.maskbits_mask; |
599 | msi_set_mask_bits(dev->irq, mask, ~mask); | 619 | struct irq_desc *desc = irq_to_desc(dev->irq); |
620 | msi_set_mask_bits(desc, mask, ~mask); | ||
600 | } | 621 | } |
601 | if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) | 622 | if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) |
602 | return; | 623 | return; |
@@ -755,28 +776,19 @@ void pci_no_msi(void) | |||
755 | pci_msi_enable = 0; | 776 | pci_msi_enable = 0; |
756 | } | 777 | } |
757 | 778 | ||
758 | void pci_msi_init_pci_dev(struct pci_dev *dev) | 779 | /** |
759 | { | 780 | * pci_msi_enabled - is MSI enabled? |
760 | INIT_LIST_HEAD(&dev->msi_list); | 781 | * |
761 | } | 782 | * Returns true if MSI has not been disabled by the command-line option |
762 | 783 | * pci=nomsi. | |
763 | #ifdef CONFIG_ACPI | 784 | **/ |
764 | #include <linux/acpi.h> | 785 | int pci_msi_enabled(void) |
765 | #include <linux/pci-acpi.h> | ||
766 | static void __devinit msi_acpi_init(void) | ||
767 | { | 786 | { |
768 | if (acpi_pci_disabled) | 787 | return pci_msi_enable; |
769 | return; | ||
770 | pci_osc_support_set(OSC_MSI_SUPPORT); | ||
771 | pcie_osc_support_set(OSC_MSI_SUPPORT); | ||
772 | } | 788 | } |
773 | #else | 789 | EXPORT_SYMBOL(pci_msi_enabled); |
774 | static inline void msi_acpi_init(void) { } | ||
775 | #endif /* CONFIG_ACPI */ | ||
776 | 790 | ||
777 | void __devinit msi_init(void) | 791 | void pci_msi_init_pci_dev(struct pci_dev *dev) |
778 | { | 792 | { |
779 | if (!pci_msi_enable) | 793 | INIT_LIST_HEAD(&dev->msi_list); |
780 | return; | ||
781 | msi_acpi_init(); | ||
782 | } | 794 | } |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 9d976d51d406..deea8a187eb8 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -22,13 +22,14 @@ struct acpi_osc_data { | |||
22 | acpi_handle handle; | 22 | acpi_handle handle; |
23 | u32 support_set; | 23 | u32 support_set; |
24 | u32 control_set; | 24 | u32 control_set; |
25 | u32 control_query; | ||
26 | int is_queried; | ||
25 | struct list_head sibiling; | 27 | struct list_head sibiling; |
26 | }; | 28 | }; |
27 | static LIST_HEAD(acpi_osc_data_list); | 29 | static LIST_HEAD(acpi_osc_data_list); |
28 | 30 | ||
29 | struct acpi_osc_args { | 31 | struct acpi_osc_args { |
30 | u32 capbuf[3]; | 32 | u32 capbuf[3]; |
31 | u32 ctrl_result; | ||
32 | }; | 33 | }; |
33 | 34 | ||
34 | static DEFINE_MUTEX(pci_acpi_lock); | 35 | static DEFINE_MUTEX(pci_acpi_lock); |
@@ -54,7 +55,7 @@ static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, | |||
54 | 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; | 55 | 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; |
55 | 56 | ||
56 | static acpi_status acpi_run_osc(acpi_handle handle, | 57 | static acpi_status acpi_run_osc(acpi_handle handle, |
57 | struct acpi_osc_args *osc_args) | 58 | struct acpi_osc_args *osc_args, u32 *retval) |
58 | { | 59 | { |
59 | acpi_status status; | 60 | acpi_status status; |
60 | struct acpi_object_list input; | 61 | struct acpi_object_list input; |
@@ -110,8 +111,7 @@ static acpi_status acpi_run_osc(acpi_handle handle, | |||
110 | goto out_kfree; | 111 | goto out_kfree; |
111 | } | 112 | } |
112 | out_success: | 113 | out_success: |
113 | osc_args->ctrl_result = | 114 | *retval = *((u32 *)(out_obj->buffer.pointer + 8)); |
114 | *((u32 *)(out_obj->buffer.pointer + 8)); | ||
115 | status = AE_OK; | 115 | status = AE_OK; |
116 | 116 | ||
117 | out_kfree: | 117 | out_kfree: |
@@ -119,11 +119,10 @@ out_kfree: | |||
119 | return status; | 119 | return status; |
120 | } | 120 | } |
121 | 121 | ||
122 | static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data, | 122 | static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data) |
123 | u32 *result) | ||
124 | { | 123 | { |
125 | acpi_status status; | 124 | acpi_status status; |
126 | u32 support_set; | 125 | u32 support_set, result; |
127 | struct acpi_osc_args osc_args; | 126 | struct acpi_osc_args osc_args; |
128 | 127 | ||
129 | /* do _OSC query for all possible controls */ | 128 | /* do _OSC query for all possible controls */ |
@@ -132,56 +131,45 @@ static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data, | |||
132 | osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; | 131 | osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; |
133 | osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; | 132 | osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; |
134 | 133 | ||
135 | status = acpi_run_osc(osc_data->handle, &osc_args); | 134 | status = acpi_run_osc(osc_data->handle, &osc_args, &result); |
136 | if (ACPI_SUCCESS(status)) { | 135 | if (ACPI_SUCCESS(status)) { |
137 | osc_data->support_set = support_set; | 136 | osc_data->support_set = support_set; |
138 | *result = osc_args.ctrl_result; | 137 | osc_data->control_query = result; |
138 | osc_data->is_queried = 1; | ||
139 | } | 139 | } |
140 | 140 | ||
141 | return status; | 141 | return status; |
142 | } | 142 | } |
143 | 143 | ||
144 | static acpi_status acpi_query_osc(acpi_handle handle, | 144 | /* |
145 | u32 level, void *context, void **retval) | 145 | * pci_acpi_osc_support: Invoke _OSC indicating support for the given feature |
146 | * @flags: Bitmask of flags to support | ||
147 | * | ||
148 | * See the ACPI spec for the definition of the flags | ||
149 | */ | ||
150 | int pci_acpi_osc_support(acpi_handle handle, u32 flags) | ||
146 | { | 151 | { |
147 | acpi_status status; | 152 | acpi_status status; |
148 | struct acpi_osc_data *osc_data; | ||
149 | u32 flags = (unsigned long)context, dummy; | ||
150 | acpi_handle tmp; | 153 | acpi_handle tmp; |
154 | struct acpi_osc_data *osc_data; | ||
155 | int rc = 0; | ||
151 | 156 | ||
152 | status = acpi_get_handle(handle, "_OSC", &tmp); | 157 | status = acpi_get_handle(handle, "_OSC", &tmp); |
153 | if (ACPI_FAILURE(status)) | 158 | if (ACPI_FAILURE(status)) |
154 | return AE_OK; | 159 | return -ENOTTY; |
155 | 160 | ||
156 | mutex_lock(&pci_acpi_lock); | 161 | mutex_lock(&pci_acpi_lock); |
157 | osc_data = acpi_get_osc_data(handle); | 162 | osc_data = acpi_get_osc_data(handle); |
158 | if (!osc_data) { | 163 | if (!osc_data) { |
159 | printk(KERN_ERR "acpi osc data array is full\n"); | 164 | printk(KERN_ERR "acpi osc data array is full\n"); |
165 | rc = -ENOMEM; | ||
160 | goto out; | 166 | goto out; |
161 | } | 167 | } |
162 | 168 | ||
163 | __acpi_query_osc(flags, osc_data, &dummy); | 169 | __acpi_query_osc(flags, osc_data); |
164 | out: | 170 | out: |
165 | mutex_unlock(&pci_acpi_lock); | 171 | mutex_unlock(&pci_acpi_lock); |
166 | return AE_OK; | 172 | return rc; |
167 | } | ||
168 | |||
169 | /** | ||
170 | * __pci_osc_support_set - register OS support to Firmware | ||
171 | * @flags: OS support bits | ||
172 | * @hid: hardware ID | ||
173 | * | ||
174 | * Update OS support fields and doing a _OSC Query to obtain an update | ||
175 | * from Firmware on supported control bits. | ||
176 | **/ | ||
177 | acpi_status __pci_osc_support_set(u32 flags, const char *hid) | ||
178 | { | ||
179 | if (!(flags & OSC_SUPPORT_MASKS)) | ||
180 | return AE_TYPE; | ||
181 | |||
182 | acpi_get_devices(hid, acpi_query_osc, | ||
183 | (void *)(unsigned long)flags, NULL); | ||
184 | return AE_OK; | ||
185 | } | 173 | } |
186 | 174 | ||
187 | /** | 175 | /** |
@@ -194,7 +182,7 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid) | |||
194 | acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) | 182 | acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) |
195 | { | 183 | { |
196 | acpi_status status; | 184 | acpi_status status; |
197 | u32 ctrlset, control_set, result; | 185 | u32 control_req, control_set, result; |
198 | acpi_handle tmp; | 186 | acpi_handle tmp; |
199 | struct acpi_osc_data *osc_data; | 187 | struct acpi_osc_data *osc_data; |
200 | struct acpi_osc_args osc_args; | 188 | struct acpi_osc_args osc_args; |
@@ -211,28 +199,34 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) | |||
211 | goto out; | 199 | goto out; |
212 | } | 200 | } |
213 | 201 | ||
214 | ctrlset = (flags & OSC_CONTROL_MASKS); | 202 | control_req = (flags & OSC_CONTROL_MASKS); |
215 | if (!ctrlset) { | 203 | if (!control_req) { |
216 | status = AE_TYPE; | 204 | status = AE_TYPE; |
217 | goto out; | 205 | goto out; |
218 | } | 206 | } |
219 | 207 | ||
220 | status = __acpi_query_osc(osc_data->support_set, osc_data, &result); | 208 | /* No need to evaluate _OSC if the control was already granted. */ |
221 | if (ACPI_FAILURE(status)) | 209 | if ((osc_data->control_set & control_req) == control_req) |
222 | goto out; | 210 | goto out; |
223 | 211 | ||
224 | if ((result & ctrlset) != ctrlset) { | 212 | if (!osc_data->is_queried) { |
213 | status = __acpi_query_osc(osc_data->support_set, osc_data); | ||
214 | if (ACPI_FAILURE(status)) | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | if ((osc_data->control_query & control_req) != control_req) { | ||
225 | status = AE_SUPPORT; | 219 | status = AE_SUPPORT; |
226 | goto out; | 220 | goto out; |
227 | } | 221 | } |
228 | 222 | ||
229 | control_set = osc_data->control_set | ctrlset; | 223 | control_set = osc_data->control_set | control_req; |
230 | osc_args.capbuf[OSC_QUERY_TYPE] = 0; | 224 | osc_args.capbuf[OSC_QUERY_TYPE] = 0; |
231 | osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set; | 225 | osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set; |
232 | osc_args.capbuf[OSC_CONTROL_TYPE] = control_set; | 226 | osc_args.capbuf[OSC_CONTROL_TYPE] = control_set; |
233 | status = acpi_run_osc(handle, &osc_args); | 227 | status = acpi_run_osc(handle, &osc_args, &result); |
234 | if (ACPI_SUCCESS(status)) | 228 | if (ACPI_SUCCESS(status)) |
235 | osc_data->control_set = control_set; | 229 | osc_data->control_set = result; |
236 | out: | 230 | out: |
237 | mutex_unlock(&pci_acpi_lock); | 231 | mutex_unlock(&pci_acpi_lock); |
238 | return status; | 232 | return status; |
@@ -373,7 +367,7 @@ static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle) | |||
373 | * The string should be the same as root bridge's name | 367 | * The string should be the same as root bridge's name |
374 | * Please look at 'pci_scan_bus_parented' | 368 | * Please look at 'pci_scan_bus_parented' |
375 | */ | 369 | */ |
376 | num = sscanf(dev->bus_id, "pci%04x:%02x", &seg, &bus); | 370 | num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus); |
377 | if (num != 2) | 371 | if (num != 2) |
378 | return -ENODEV; | 372 | return -ENODEV; |
379 | *handle = acpi_get_pci_rootbridge_handle(seg, bus); | 373 | *handle = acpi_get_pci_rootbridge_handle(seg, bus); |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index b4cdd690ae71..c697f2680856 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/cpu.h> | ||
19 | #include "pci.h" | 20 | #include "pci.h" |
20 | 21 | ||
21 | /* | 22 | /* |
@@ -48,7 +49,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) | |||
48 | subdevice=PCI_ANY_ID, class=0, class_mask=0; | 49 | subdevice=PCI_ANY_ID, class=0, class_mask=0; |
49 | unsigned long driver_data=0; | 50 | unsigned long driver_data=0; |
50 | int fields=0; | 51 | int fields=0; |
51 | int retval; | 52 | int retval=0; |
52 | 53 | ||
53 | fields = sscanf(buf, "%x %x %x %x %x %x %lx", | 54 | fields = sscanf(buf, "%x %x %x %x %x %x %lx", |
54 | &vendor, &device, &subvendor, &subdevice, | 55 | &vendor, &device, &subvendor, &subdevice, |
@@ -58,16 +59,18 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) | |||
58 | 59 | ||
59 | /* Only accept driver_data values that match an existing id_table | 60 | /* Only accept driver_data values that match an existing id_table |
60 | entry */ | 61 | entry */ |
61 | retval = -EINVAL; | 62 | if (ids) { |
62 | while (ids->vendor || ids->subvendor || ids->class_mask) { | 63 | retval = -EINVAL; |
63 | if (driver_data == ids->driver_data) { | 64 | while (ids->vendor || ids->subvendor || ids->class_mask) { |
64 | retval = 0; | 65 | if (driver_data == ids->driver_data) { |
65 | break; | 66 | retval = 0; |
67 | break; | ||
68 | } | ||
69 | ids++; | ||
66 | } | 70 | } |
67 | ids++; | 71 | if (retval) /* No match */ |
72 | return retval; | ||
68 | } | 73 | } |
69 | if (retval) /* No match */ | ||
70 | return retval; | ||
71 | 74 | ||
72 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); | 75 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); |
73 | if (!dynid) | 76 | if (!dynid) |
@@ -183,32 +186,43 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv, | |||
183 | return pci_match_id(drv->id_table, dev); | 186 | return pci_match_id(drv->id_table, dev); |
184 | } | 187 | } |
185 | 188 | ||
189 | struct drv_dev_and_id { | ||
190 | struct pci_driver *drv; | ||
191 | struct pci_dev *dev; | ||
192 | const struct pci_device_id *id; | ||
193 | }; | ||
194 | |||
195 | static long local_pci_probe(void *_ddi) | ||
196 | { | ||
197 | struct drv_dev_and_id *ddi = _ddi; | ||
198 | |||
199 | return ddi->drv->probe(ddi->dev, ddi->id); | ||
200 | } | ||
201 | |||
186 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | 202 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, |
187 | const struct pci_device_id *id) | 203 | const struct pci_device_id *id) |
188 | { | 204 | { |
189 | int error; | 205 | int error, node; |
190 | #ifdef CONFIG_NUMA | 206 | struct drv_dev_and_id ddi = { drv, dev, id }; |
191 | /* Execute driver initialization on node where the | ||
192 | device's bus is attached to. This way the driver likely | ||
193 | allocates its local memory on the right node without | ||
194 | any need to change it. */ | ||
195 | struct mempolicy *oldpol; | ||
196 | cpumask_t oldmask = current->cpus_allowed; | ||
197 | int node = dev_to_node(&dev->dev); | ||
198 | 207 | ||
208 | /* Execute driver initialization on node where the device's | ||
209 | bus is attached to. This way the driver likely allocates | ||
210 | its local memory on the right node without any need to | ||
211 | change it. */ | ||
212 | node = dev_to_node(&dev->dev); | ||
199 | if (node >= 0) { | 213 | if (node >= 0) { |
214 | int cpu; | ||
200 | node_to_cpumask_ptr(nodecpumask, node); | 215 | node_to_cpumask_ptr(nodecpumask, node); |
201 | set_cpus_allowed_ptr(current, nodecpumask); | 216 | |
202 | } | 217 | get_online_cpus(); |
203 | /* And set default memory allocation policy */ | 218 | cpu = cpumask_any_and(nodecpumask, cpu_online_mask); |
204 | oldpol = current->mempolicy; | 219 | if (cpu < nr_cpu_ids) |
205 | current->mempolicy = NULL; /* fall back to system default policy */ | 220 | error = work_on_cpu(cpu, local_pci_probe, &ddi); |
206 | #endif | 221 | else |
207 | error = drv->probe(dev, id); | 222 | error = local_pci_probe(&ddi); |
208 | #ifdef CONFIG_NUMA | 223 | put_online_cpus(); |
209 | set_cpus_allowed_ptr(current, &oldmask); | 224 | } else |
210 | current->mempolicy = oldpol; | 225 | error = local_pci_probe(&ddi); |
211 | #endif | ||
212 | return error; | 226 | return error; |
213 | } | 227 | } |
214 | 228 | ||
@@ -302,11 +316,10 @@ static void pci_device_shutdown(struct device *dev) | |||
302 | 316 | ||
303 | /* | 317 | /* |
304 | * Default "suspend" method for devices that have no driver provided suspend, | 318 | * Default "suspend" method for devices that have no driver provided suspend, |
305 | * or not even a driver at all. | 319 | * or not even a driver at all (second part). |
306 | */ | 320 | */ |
307 | static void pci_default_pm_suspend(struct pci_dev *pci_dev) | 321 | static void pci_pm_set_unknown_state(struct pci_dev *pci_dev) |
308 | { | 322 | { |
309 | pci_save_state(pci_dev); | ||
310 | /* | 323 | /* |
311 | * mark its power state as "unknown", since we don't know if | 324 | * mark its power state as "unknown", since we don't know if |
312 | * e.g. the BIOS will change its device state when we suspend. | 325 | * e.g. the BIOS will change its device state when we suspend. |
@@ -317,14 +330,12 @@ static void pci_default_pm_suspend(struct pci_dev *pci_dev) | |||
317 | 330 | ||
318 | /* | 331 | /* |
319 | * Default "resume" method for devices that have no driver provided resume, | 332 | * Default "resume" method for devices that have no driver provided resume, |
320 | * or not even a driver at all. | 333 | * or not even a driver at all (second part). |
321 | */ | 334 | */ |
322 | static int pci_default_pm_resume(struct pci_dev *pci_dev) | 335 | static int pci_pm_reenable_device(struct pci_dev *pci_dev) |
323 | { | 336 | { |
324 | int retval = 0; | 337 | int retval; |
325 | 338 | ||
326 | /* restore the PCI config space */ | ||
327 | pci_restore_state(pci_dev); | ||
328 | /* if the device was enabled before suspend, reenable */ | 339 | /* if the device was enabled before suspend, reenable */ |
329 | retval = pci_reenable_device(pci_dev); | 340 | retval = pci_reenable_device(pci_dev); |
330 | /* | 341 | /* |
@@ -347,8 +358,16 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) | |||
347 | i = drv->suspend(pci_dev, state); | 358 | i = drv->suspend(pci_dev, state); |
348 | suspend_report_result(drv->suspend, i); | 359 | suspend_report_result(drv->suspend, i); |
349 | } else { | 360 | } else { |
350 | pci_default_pm_suspend(pci_dev); | 361 | pci_save_state(pci_dev); |
362 | /* | ||
363 | * This is for compatibility with existing code with legacy PM | ||
364 | * support. | ||
365 | */ | ||
366 | pci_pm_set_unknown_state(pci_dev); | ||
351 | } | 367 | } |
368 | |||
369 | pci_fixup_device(pci_fixup_suspend, pci_dev); | ||
370 | |||
352 | return i; | 371 | return i; |
353 | } | 372 | } |
354 | 373 | ||
@@ -365,30 +384,130 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) | |||
365 | return i; | 384 | return i; |
366 | } | 385 | } |
367 | 386 | ||
387 | static int pci_legacy_resume_early(struct device *dev) | ||
388 | { | ||
389 | int error = 0; | ||
390 | struct pci_dev * pci_dev = to_pci_dev(dev); | ||
391 | struct pci_driver * drv = pci_dev->driver; | ||
392 | |||
393 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | ||
394 | |||
395 | if (drv && drv->resume_early) | ||
396 | error = drv->resume_early(pci_dev); | ||
397 | return error; | ||
398 | } | ||
399 | |||
368 | static int pci_legacy_resume(struct device *dev) | 400 | static int pci_legacy_resume(struct device *dev) |
369 | { | 401 | { |
370 | int error; | 402 | int error; |
371 | struct pci_dev * pci_dev = to_pci_dev(dev); | 403 | struct pci_dev * pci_dev = to_pci_dev(dev); |
372 | struct pci_driver * drv = pci_dev->driver; | 404 | struct pci_driver * drv = pci_dev->driver; |
373 | 405 | ||
374 | if (drv && drv->resume) | 406 | pci_fixup_device(pci_fixup_resume, pci_dev); |
407 | |||
408 | if (drv && drv->resume) { | ||
375 | error = drv->resume(pci_dev); | 409 | error = drv->resume(pci_dev); |
376 | else | 410 | } else { |
377 | error = pci_default_pm_resume(pci_dev); | 411 | /* restore the PCI config space */ |
412 | pci_restore_state(pci_dev); | ||
413 | error = pci_pm_reenable_device(pci_dev); | ||
414 | } | ||
378 | return error; | 415 | return error; |
379 | } | 416 | } |
380 | 417 | ||
381 | static int pci_legacy_resume_early(struct device *dev) | 418 | /* Auxiliary functions used by the new power management framework */ |
419 | |||
420 | static int pci_restore_standard_config(struct pci_dev *pci_dev) | ||
382 | { | 421 | { |
422 | struct pci_dev *parent = pci_dev->bus->self; | ||
383 | int error = 0; | 423 | int error = 0; |
384 | struct pci_dev * pci_dev = to_pci_dev(dev); | ||
385 | struct pci_driver * drv = pci_dev->driver; | ||
386 | 424 | ||
387 | if (drv && drv->resume_early) | 425 | /* Check if the device's bus is operational */ |
388 | error = drv->resume_early(pci_dev); | 426 | if (!parent || parent->current_state == PCI_D0) { |
427 | pci_restore_state(pci_dev); | ||
428 | pci_update_current_state(pci_dev, PCI_D0); | ||
429 | } else { | ||
430 | dev_warn(&pci_dev->dev, "unable to restore config, " | ||
431 | "bridge %s in low power state D%d\n", pci_name(parent), | ||
432 | parent->current_state); | ||
433 | pci_dev->current_state = PCI_UNKNOWN; | ||
434 | error = -EAGAIN; | ||
435 | } | ||
436 | |||
389 | return error; | 437 | return error; |
390 | } | 438 | } |
391 | 439 | ||
440 | static bool pci_is_bridge(struct pci_dev *pci_dev) | ||
441 | { | ||
442 | return !!(pci_dev->subordinate); | ||
443 | } | ||
444 | |||
445 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) | ||
446 | { | ||
447 | if (pci_restore_standard_config(pci_dev)) | ||
448 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | ||
449 | } | ||
450 | |||
451 | static int pci_pm_default_resume(struct pci_dev *pci_dev) | ||
452 | { | ||
453 | /* | ||
454 | * pci_restore_standard_config() should have been called once already, | ||
455 | * but it would have failed if the device's parent bridge had not been | ||
456 | * in power state D0 at that time. Check it and try again if necessary. | ||
457 | */ | ||
458 | if (pci_dev->current_state == PCI_UNKNOWN) { | ||
459 | int error = pci_restore_standard_config(pci_dev); | ||
460 | if (error) | ||
461 | return error; | ||
462 | } | ||
463 | |||
464 | pci_fixup_device(pci_fixup_resume, pci_dev); | ||
465 | |||
466 | if (!pci_is_bridge(pci_dev)) | ||
467 | pci_enable_wake(pci_dev, PCI_D0, false); | ||
468 | |||
469 | return pci_pm_reenable_device(pci_dev); | ||
470 | } | ||
471 | |||
472 | static void pci_pm_default_suspend_generic(struct pci_dev *pci_dev) | ||
473 | { | ||
474 | /* If device is enabled at this point, disable it */ | ||
475 | pci_disable_enabled_device(pci_dev); | ||
476 | /* | ||
477 | * Save state with interrupts enabled, because in principle the bus the | ||
478 | * device is on may be put into a low power state after this code runs. | ||
479 | */ | ||
480 | pci_save_state(pci_dev); | ||
481 | } | ||
482 | |||
483 | static void pci_pm_default_suspend(struct pci_dev *pci_dev) | ||
484 | { | ||
485 | pci_pm_default_suspend_generic(pci_dev); | ||
486 | |||
487 | if (!pci_is_bridge(pci_dev)) | ||
488 | pci_prepare_to_sleep(pci_dev); | ||
489 | |||
490 | pci_fixup_device(pci_fixup_suspend, pci_dev); | ||
491 | } | ||
492 | |||
493 | static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) | ||
494 | { | ||
495 | struct pci_driver *drv = pci_dev->driver; | ||
496 | bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume | ||
497 | || drv->resume_early); | ||
498 | |||
499 | /* | ||
500 | * Legacy PM support is used by default, so warn if the new framework is | ||
501 | * supported as well. Drivers are supposed to support either the | ||
502 | * former, or the latter, but not both at the same time. | ||
503 | */ | ||
504 | WARN_ON(ret && drv->driver.pm); | ||
505 | |||
506 | return ret; | ||
507 | } | ||
508 | |||
509 | /* New power management framework */ | ||
510 | |||
392 | static int pci_pm_prepare(struct device *dev) | 511 | static int pci_pm_prepare(struct device *dev) |
393 | { | 512 | { |
394 | struct device_driver *drv = dev->driver; | 513 | struct device_driver *drv = dev->driver; |
@@ -416,17 +535,16 @@ static int pci_pm_suspend(struct device *dev) | |||
416 | struct device_driver *drv = dev->driver; | 535 | struct device_driver *drv = dev->driver; |
417 | int error = 0; | 536 | int error = 0; |
418 | 537 | ||
419 | if (drv && drv->pm) { | 538 | if (pci_has_legacy_pm_support(pci_dev)) |
420 | if (drv->pm->suspend) { | 539 | return pci_legacy_suspend(dev, PMSG_SUSPEND); |
421 | error = drv->pm->suspend(dev); | 540 | |
422 | suspend_report_result(drv->pm->suspend, error); | 541 | if (drv && drv->pm && drv->pm->suspend) { |
423 | } else { | 542 | error = drv->pm->suspend(dev); |
424 | pci_default_pm_suspend(pci_dev); | 543 | suspend_report_result(drv->pm->suspend, error); |
425 | } | ||
426 | } else { | ||
427 | error = pci_legacy_suspend(dev, PMSG_SUSPEND); | ||
428 | } | 544 | } |
429 | pci_fixup_device(pci_fixup_suspend, pci_dev); | 545 | |
546 | if (!error) | ||
547 | pci_pm_default_suspend(pci_dev); | ||
430 | 548 | ||
431 | return error; | 549 | return error; |
432 | } | 550 | } |
@@ -434,53 +552,53 @@ static int pci_pm_suspend(struct device *dev) | |||
434 | static int pci_pm_suspend_noirq(struct device *dev) | 552 | static int pci_pm_suspend_noirq(struct device *dev) |
435 | { | 553 | { |
436 | struct pci_dev *pci_dev = to_pci_dev(dev); | 554 | struct pci_dev *pci_dev = to_pci_dev(dev); |
437 | struct pci_driver *drv = pci_dev->driver; | 555 | struct device_driver *drv = dev->driver; |
438 | int error = 0; | 556 | int error = 0; |
439 | 557 | ||
440 | if (drv && drv->pm) { | 558 | if (pci_has_legacy_pm_support(pci_dev)) |
441 | if (drv->pm->suspend_noirq) { | 559 | return pci_legacy_suspend_late(dev, PMSG_SUSPEND); |
442 | error = drv->pm->suspend_noirq(dev); | 560 | |
443 | suspend_report_result(drv->pm->suspend_noirq, error); | 561 | if (drv && drv->pm && drv->pm->suspend_noirq) { |
444 | } | 562 | error = drv->pm->suspend_noirq(dev); |
445 | } else { | 563 | suspend_report_result(drv->pm->suspend_noirq, error); |
446 | error = pci_legacy_suspend_late(dev, PMSG_SUSPEND); | ||
447 | } | 564 | } |
448 | 565 | ||
566 | if (!error) | ||
567 | pci_pm_set_unknown_state(pci_dev); | ||
568 | |||
449 | return error; | 569 | return error; |
450 | } | 570 | } |
451 | 571 | ||
452 | static int pci_pm_resume(struct device *dev) | 572 | static int pci_pm_resume_noirq(struct device *dev) |
453 | { | 573 | { |
454 | struct pci_dev *pci_dev = to_pci_dev(dev); | 574 | struct pci_dev *pci_dev = to_pci_dev(dev); |
455 | struct device_driver *drv = dev->driver; | 575 | struct device_driver *drv = dev->driver; |
456 | int error; | 576 | int error = 0; |
457 | 577 | ||
458 | pci_fixup_device(pci_fixup_resume, pci_dev); | 578 | if (pci_has_legacy_pm_support(pci_dev)) |
579 | return pci_legacy_resume_early(dev); | ||
459 | 580 | ||
460 | if (drv && drv->pm) { | 581 | pci_pm_default_resume_noirq(pci_dev); |
461 | error = drv->pm->resume ? drv->pm->resume(dev) : | 582 | |
462 | pci_default_pm_resume(pci_dev); | 583 | if (drv && drv->pm && drv->pm->resume_noirq) |
463 | } else { | 584 | error = drv->pm->resume_noirq(dev); |
464 | error = pci_legacy_resume(dev); | ||
465 | } | ||
466 | 585 | ||
467 | return error; | 586 | return error; |
468 | } | 587 | } |
469 | 588 | ||
470 | static int pci_pm_resume_noirq(struct device *dev) | 589 | static int pci_pm_resume(struct device *dev) |
471 | { | 590 | { |
472 | struct pci_dev *pci_dev = to_pci_dev(dev); | 591 | struct pci_dev *pci_dev = to_pci_dev(dev); |
473 | struct pci_driver *drv = pci_dev->driver; | 592 | struct device_driver *drv = dev->driver; |
474 | int error = 0; | 593 | int error = 0; |
475 | 594 | ||
476 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 595 | if (pci_has_legacy_pm_support(pci_dev)) |
596 | return pci_legacy_resume(dev); | ||
477 | 597 | ||
478 | if (drv && drv->pm) { | 598 | error = pci_pm_default_resume(pci_dev); |
479 | if (drv->pm->resume_noirq) | 599 | |
480 | error = drv->pm->resume_noirq(dev); | 600 | if (!error && drv && drv->pm && drv->pm->resume) |
481 | } else { | 601 | error = drv->pm->resume(dev); |
482 | error = pci_legacy_resume_early(dev); | ||
483 | } | ||
484 | 602 | ||
485 | return error; | 603 | return error; |
486 | } | 604 | } |
@@ -502,141 +620,140 @@ static int pci_pm_freeze(struct device *dev) | |||
502 | struct device_driver *drv = dev->driver; | 620 | struct device_driver *drv = dev->driver; |
503 | int error = 0; | 621 | int error = 0; |
504 | 622 | ||
505 | if (drv && drv->pm) { | 623 | if (pci_has_legacy_pm_support(pci_dev)) |
506 | if (drv->pm->freeze) { | 624 | return pci_legacy_suspend(dev, PMSG_FREEZE); |
507 | error = drv->pm->freeze(dev); | 625 | |
508 | suspend_report_result(drv->pm->freeze, error); | 626 | if (drv && drv->pm && drv->pm->freeze) { |
509 | } else { | 627 | error = drv->pm->freeze(dev); |
510 | pci_default_pm_suspend(pci_dev); | 628 | suspend_report_result(drv->pm->freeze, error); |
511 | } | ||
512 | } else { | ||
513 | error = pci_legacy_suspend(dev, PMSG_FREEZE); | ||
514 | pci_fixup_device(pci_fixup_suspend, pci_dev); | ||
515 | } | 629 | } |
516 | 630 | ||
631 | if (!error) | ||
632 | pci_pm_default_suspend_generic(pci_dev); | ||
633 | |||
517 | return error; | 634 | return error; |
518 | } | 635 | } |
519 | 636 | ||
520 | static int pci_pm_freeze_noirq(struct device *dev) | 637 | static int pci_pm_freeze_noirq(struct device *dev) |
521 | { | 638 | { |
522 | struct pci_dev *pci_dev = to_pci_dev(dev); | 639 | struct pci_dev *pci_dev = to_pci_dev(dev); |
523 | struct pci_driver *drv = pci_dev->driver; | 640 | struct device_driver *drv = dev->driver; |
524 | int error = 0; | 641 | int error = 0; |
525 | 642 | ||
526 | if (drv && drv->pm) { | 643 | if (pci_has_legacy_pm_support(pci_dev)) |
527 | if (drv->pm->freeze_noirq) { | 644 | return pci_legacy_suspend_late(dev, PMSG_FREEZE); |
528 | error = drv->pm->freeze_noirq(dev); | 645 | |
529 | suspend_report_result(drv->pm->freeze_noirq, error); | 646 | if (drv && drv->pm && drv->pm->freeze_noirq) { |
530 | } | 647 | error = drv->pm->freeze_noirq(dev); |
531 | } else { | 648 | suspend_report_result(drv->pm->freeze_noirq, error); |
532 | error = pci_legacy_suspend_late(dev, PMSG_FREEZE); | ||
533 | } | 649 | } |
534 | 650 | ||
651 | if (!error) | ||
652 | pci_pm_set_unknown_state(pci_dev); | ||
653 | |||
535 | return error; | 654 | return error; |
536 | } | 655 | } |
537 | 656 | ||
538 | static int pci_pm_thaw(struct device *dev) | 657 | static int pci_pm_thaw_noirq(struct device *dev) |
539 | { | 658 | { |
659 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
540 | struct device_driver *drv = dev->driver; | 660 | struct device_driver *drv = dev->driver; |
541 | int error = 0; | 661 | int error = 0; |
542 | 662 | ||
543 | if (drv && drv->pm) { | 663 | if (pci_has_legacy_pm_support(pci_dev)) |
544 | if (drv->pm->thaw) | 664 | return pci_legacy_resume_early(dev); |
545 | error = drv->pm->thaw(dev); | 665 | |
546 | } else { | 666 | pci_update_current_state(pci_dev, PCI_D0); |
547 | pci_fixup_device(pci_fixup_resume, to_pci_dev(dev)); | 667 | |
548 | error = pci_legacy_resume(dev); | 668 | if (drv && drv->pm && drv->pm->thaw_noirq) |
549 | } | 669 | error = drv->pm->thaw_noirq(dev); |
550 | 670 | ||
551 | return error; | 671 | return error; |
552 | } | 672 | } |
553 | 673 | ||
554 | static int pci_pm_thaw_noirq(struct device *dev) | 674 | static int pci_pm_thaw(struct device *dev) |
555 | { | 675 | { |
556 | struct pci_dev *pci_dev = to_pci_dev(dev); | 676 | struct pci_dev *pci_dev = to_pci_dev(dev); |
557 | struct pci_driver *drv = pci_dev->driver; | 677 | struct device_driver *drv = dev->driver; |
558 | int error = 0; | 678 | int error = 0; |
559 | 679 | ||
560 | if (drv && drv->pm) { | 680 | if (pci_has_legacy_pm_support(pci_dev)) |
561 | if (drv->pm->thaw_noirq) | 681 | return pci_legacy_resume(dev); |
562 | error = drv->pm->thaw_noirq(dev); | 682 | |
563 | } else { | 683 | pci_pm_reenable_device(pci_dev); |
564 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 684 | |
565 | error = pci_legacy_resume_early(dev); | 685 | if (drv && drv->pm && drv->pm->thaw) |
566 | } | 686 | error = drv->pm->thaw(dev); |
567 | 687 | ||
568 | return error; | 688 | return error; |
569 | } | 689 | } |
570 | 690 | ||
571 | static int pci_pm_poweroff(struct device *dev) | 691 | static int pci_pm_poweroff(struct device *dev) |
572 | { | 692 | { |
693 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
573 | struct device_driver *drv = dev->driver; | 694 | struct device_driver *drv = dev->driver; |
574 | int error = 0; | 695 | int error = 0; |
575 | 696 | ||
576 | pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); | 697 | if (pci_has_legacy_pm_support(pci_dev)) |
698 | return pci_legacy_suspend(dev, PMSG_HIBERNATE); | ||
577 | 699 | ||
578 | if (drv && drv->pm) { | 700 | if (drv && drv->pm && drv->pm->poweroff) { |
579 | if (drv->pm->poweroff) { | 701 | error = drv->pm->poweroff(dev); |
580 | error = drv->pm->poweroff(dev); | 702 | suspend_report_result(drv->pm->poweroff, error); |
581 | suspend_report_result(drv->pm->poweroff, error); | ||
582 | } | ||
583 | } else { | ||
584 | error = pci_legacy_suspend(dev, PMSG_HIBERNATE); | ||
585 | } | 703 | } |
586 | 704 | ||
705 | if (!error) | ||
706 | pci_pm_default_suspend(pci_dev); | ||
707 | |||
587 | return error; | 708 | return error; |
588 | } | 709 | } |
589 | 710 | ||
590 | static int pci_pm_poweroff_noirq(struct device *dev) | 711 | static int pci_pm_poweroff_noirq(struct device *dev) |
591 | { | 712 | { |
592 | struct pci_dev *pci_dev = to_pci_dev(dev); | 713 | struct device_driver *drv = dev->driver; |
593 | struct pci_driver *drv = pci_dev->driver; | ||
594 | int error = 0; | 714 | int error = 0; |
595 | 715 | ||
596 | if (drv && drv->pm) { | 716 | if (pci_has_legacy_pm_support(to_pci_dev(dev))) |
597 | if (drv->pm->poweroff_noirq) { | 717 | return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); |
598 | error = drv->pm->poweroff_noirq(dev); | 718 | |
599 | suspend_report_result(drv->pm->poweroff_noirq, error); | 719 | if (drv && drv->pm && drv->pm->poweroff_noirq) { |
600 | } | 720 | error = drv->pm->poweroff_noirq(dev); |
601 | } else { | 721 | suspend_report_result(drv->pm->poweroff_noirq, error); |
602 | error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE); | ||
603 | } | 722 | } |
604 | 723 | ||
605 | return error; | 724 | return error; |
606 | } | 725 | } |
607 | 726 | ||
608 | static int pci_pm_restore(struct device *dev) | 727 | static int pci_pm_restore_noirq(struct device *dev) |
609 | { | 728 | { |
610 | struct pci_dev *pci_dev = to_pci_dev(dev); | 729 | struct pci_dev *pci_dev = to_pci_dev(dev); |
611 | struct device_driver *drv = dev->driver; | 730 | struct device_driver *drv = dev->driver; |
612 | int error; | 731 | int error = 0; |
613 | 732 | ||
614 | if (drv && drv->pm) { | 733 | if (pci_has_legacy_pm_support(pci_dev)) |
615 | error = drv->pm->restore ? drv->pm->restore(dev) : | 734 | return pci_legacy_resume_early(dev); |
616 | pci_default_pm_resume(pci_dev); | 735 | |
617 | } else { | 736 | pci_pm_default_resume_noirq(pci_dev); |
618 | error = pci_legacy_resume(dev); | 737 | |
619 | } | 738 | if (drv && drv->pm && drv->pm->restore_noirq) |
620 | pci_fixup_device(pci_fixup_resume, pci_dev); | 739 | error = drv->pm->restore_noirq(dev); |
621 | 740 | ||
622 | return error; | 741 | return error; |
623 | } | 742 | } |
624 | 743 | ||
625 | static int pci_pm_restore_noirq(struct device *dev) | 744 | static int pci_pm_restore(struct device *dev) |
626 | { | 745 | { |
627 | struct pci_dev *pci_dev = to_pci_dev(dev); | 746 | struct pci_dev *pci_dev = to_pci_dev(dev); |
628 | struct pci_driver *drv = pci_dev->driver; | 747 | struct device_driver *drv = dev->driver; |
629 | int error = 0; | 748 | int error = 0; |
630 | 749 | ||
631 | pci_fixup_device(pci_fixup_resume, pci_dev); | 750 | if (pci_has_legacy_pm_support(pci_dev)) |
751 | return pci_legacy_resume(dev); | ||
632 | 752 | ||
633 | if (drv && drv->pm) { | 753 | error = pci_pm_default_resume(pci_dev); |
634 | if (drv->pm->restore_noirq) | 754 | |
635 | error = drv->pm->restore_noirq(dev); | 755 | if (!error && drv && drv->pm && drv->pm->restore) |
636 | } else { | 756 | error = drv->pm->restore(dev); |
637 | error = pci_legacy_resume_early(dev); | ||
638 | } | ||
639 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | ||
640 | 757 | ||
641 | return error; | 758 | return error; |
642 | } | 759 | } |
@@ -654,17 +771,15 @@ static int pci_pm_restore_noirq(struct device *dev) | |||
654 | 771 | ||
655 | #endif /* !CONFIG_HIBERNATION */ | 772 | #endif /* !CONFIG_HIBERNATION */ |
656 | 773 | ||
657 | struct pm_ext_ops pci_pm_ops = { | 774 | struct dev_pm_ops pci_dev_pm_ops = { |
658 | .base = { | 775 | .prepare = pci_pm_prepare, |
659 | .prepare = pci_pm_prepare, | 776 | .complete = pci_pm_complete, |
660 | .complete = pci_pm_complete, | 777 | .suspend = pci_pm_suspend, |
661 | .suspend = pci_pm_suspend, | 778 | .resume = pci_pm_resume, |
662 | .resume = pci_pm_resume, | 779 | .freeze = pci_pm_freeze, |
663 | .freeze = pci_pm_freeze, | 780 | .thaw = pci_pm_thaw, |
664 | .thaw = pci_pm_thaw, | 781 | .poweroff = pci_pm_poweroff, |
665 | .poweroff = pci_pm_poweroff, | 782 | .restore = pci_pm_restore, |
666 | .restore = pci_pm_restore, | ||
667 | }, | ||
668 | .suspend_noirq = pci_pm_suspend_noirq, | 783 | .suspend_noirq = pci_pm_suspend_noirq, |
669 | .resume_noirq = pci_pm_resume_noirq, | 784 | .resume_noirq = pci_pm_resume_noirq, |
670 | .freeze_noirq = pci_pm_freeze_noirq, | 785 | .freeze_noirq = pci_pm_freeze_noirq, |
@@ -673,7 +788,7 @@ struct pm_ext_ops pci_pm_ops = { | |||
673 | .restore_noirq = pci_pm_restore_noirq, | 788 | .restore_noirq = pci_pm_restore_noirq, |
674 | }; | 789 | }; |
675 | 790 | ||
676 | #define PCI_PM_OPS_PTR &pci_pm_ops | 791 | #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) |
677 | 792 | ||
678 | #else /* !CONFIG_PM_SLEEP */ | 793 | #else /* !CONFIG_PM_SLEEP */ |
679 | 794 | ||
@@ -703,9 +818,6 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner, | |||
703 | drv->driver.owner = owner; | 818 | drv->driver.owner = owner; |
704 | drv->driver.mod_name = mod_name; | 819 | drv->driver.mod_name = mod_name; |
705 | 820 | ||
706 | if (drv->pm) | ||
707 | drv->driver.pm = &drv->pm->base; | ||
708 | |||
709 | spin_lock_init(&drv->dynids.lock); | 821 | spin_lock_init(&drv->dynids.lock); |
710 | INIT_LIST_HEAD(&drv->dynids.list); | 822 | INIT_LIST_HEAD(&drv->dynids.list); |
711 | 823 | ||
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c new file mode 100644 index 000000000000..74fbec0bf6cb --- /dev/null +++ b/drivers/pci/pci-stub.c | |||
@@ -0,0 +1,47 @@ | |||
1 | /* pci-stub - simple stub driver to reserve a pci device | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. | ||
4 | * Author: | ||
5 | * Chris Wright | ||
6 | * | ||
7 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
8 | * | ||
9 | * Usage is simple, allocate a new id to the stub driver and bind the | ||
10 | * device to it. For example: | ||
11 | * | ||
12 | * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id | ||
13 | * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind | ||
14 | * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind | ||
15 | * # ls -l /sys/bus/pci/devices/0000:00:19.0/driver | ||
16 | * .../0000:00:19.0/driver -> ../../../bus/pci/drivers/pci-stub | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/pci.h> | ||
21 | |||
22 | static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) | ||
23 | { | ||
24 | return 0; | ||
25 | } | ||
26 | |||
27 | static struct pci_driver stub_driver = { | ||
28 | .name = "pci-stub", | ||
29 | .id_table = NULL, /* only dynamic id's */ | ||
30 | .probe = pci_stub_probe, | ||
31 | }; | ||
32 | |||
33 | static int __init pci_stub_init(void) | ||
34 | { | ||
35 | return pci_register_driver(&stub_driver); | ||
36 | } | ||
37 | |||
38 | static void __exit pci_stub_exit(void) | ||
39 | { | ||
40 | pci_unregister_driver(&stub_driver); | ||
41 | } | ||
42 | |||
43 | module_init(pci_stub_init); | ||
44 | module_exit(pci_stub_exit); | ||
45 | |||
46 | MODULE_LICENSE("GPL"); | ||
47 | MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>"); | ||
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5d72866897a8..c23619fb6c4b 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -58,13 +58,14 @@ static ssize_t broken_parity_status_store(struct device *dev, | |||
58 | const char *buf, size_t count) | 58 | const char *buf, size_t count) |
59 | { | 59 | { |
60 | struct pci_dev *pdev = to_pci_dev(dev); | 60 | struct pci_dev *pdev = to_pci_dev(dev); |
61 | ssize_t consumed = -EINVAL; | 61 | unsigned long val; |
62 | 62 | ||
63 | if ((count > 0) && (*buf == '0' || *buf == '1')) { | 63 | if (strict_strtoul(buf, 0, &val) < 0) |
64 | pdev->broken_parity_status = *buf == '1' ? 1 : 0; | 64 | return -EINVAL; |
65 | consumed = count; | 65 | |
66 | } | 66 | pdev->broken_parity_status = !!val; |
67 | return consumed; | 67 | |
68 | return count; | ||
68 | } | 69 | } |
69 | 70 | ||
70 | static ssize_t local_cpus_show(struct device *dev, | 71 | static ssize_t local_cpus_show(struct device *dev, |
@@ -74,7 +75,7 @@ static ssize_t local_cpus_show(struct device *dev, | |||
74 | int len; | 75 | int len; |
75 | 76 | ||
76 | mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); | 77 | mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); |
77 | len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); | 78 | len = cpumask_scnprintf(buf, PAGE_SIZE-2, &mask); |
78 | buf[len++] = '\n'; | 79 | buf[len++] = '\n'; |
79 | buf[len] = '\0'; | 80 | buf[len] = '\0'; |
80 | return len; | 81 | return len; |
@@ -88,7 +89,7 @@ static ssize_t local_cpulist_show(struct device *dev, | |||
88 | int len; | 89 | int len; |
89 | 90 | ||
90 | mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); | 91 | mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); |
91 | len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); | 92 | len = cpulist_scnprintf(buf, PAGE_SIZE-2, &mask); |
92 | buf[len++] = '\n'; | 93 | buf[len++] = '\n'; |
93 | buf[len] = '\0'; | 94 | buf[len] = '\0'; |
94 | return len; | 95 | return len; |
@@ -101,11 +102,13 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf) | |||
101 | struct pci_dev * pci_dev = to_pci_dev(dev); | 102 | struct pci_dev * pci_dev = to_pci_dev(dev); |
102 | char * str = buf; | 103 | char * str = buf; |
103 | int i; | 104 | int i; |
104 | int max = 7; | 105 | int max; |
105 | resource_size_t start, end; | 106 | resource_size_t start, end; |
106 | 107 | ||
107 | if (pci_dev->subordinate) | 108 | if (pci_dev->subordinate) |
108 | max = DEVICE_COUNT_RESOURCE; | 109 | max = DEVICE_COUNT_RESOURCE; |
110 | else | ||
111 | max = PCI_BRIDGE_RESOURCES; | ||
109 | 112 | ||
110 | for (i = 0; i < max; i++) { | 113 | for (i = 0; i < max; i++) { |
111 | struct resource *res = &pci_dev->resource[i]; | 114 | struct resource *res = &pci_dev->resource[i]; |
@@ -133,19 +136,23 @@ static ssize_t is_enabled_store(struct device *dev, | |||
133 | struct device_attribute *attr, const char *buf, | 136 | struct device_attribute *attr, const char *buf, |
134 | size_t count) | 137 | size_t count) |
135 | { | 138 | { |
136 | ssize_t result = -EINVAL; | ||
137 | struct pci_dev *pdev = to_pci_dev(dev); | 139 | struct pci_dev *pdev = to_pci_dev(dev); |
140 | unsigned long val; | ||
141 | ssize_t result = strict_strtoul(buf, 0, &val); | ||
142 | |||
143 | if (result < 0) | ||
144 | return result; | ||
138 | 145 | ||
139 | /* this can crash the machine when done on the "wrong" device */ | 146 | /* this can crash the machine when done on the "wrong" device */ |
140 | if (!capable(CAP_SYS_ADMIN)) | 147 | if (!capable(CAP_SYS_ADMIN)) |
141 | return count; | 148 | return -EPERM; |
142 | 149 | ||
143 | if (*buf == '0') { | 150 | if (!val) { |
144 | if (atomic_read(&pdev->enable_cnt) != 0) | 151 | if (atomic_read(&pdev->enable_cnt) != 0) |
145 | pci_disable_device(pdev); | 152 | pci_disable_device(pdev); |
146 | else | 153 | else |
147 | result = -EIO; | 154 | result = -EIO; |
148 | } else if (*buf == '1') | 155 | } else |
149 | result = pci_enable_device(pdev); | 156 | result = pci_enable_device(pdev); |
150 | 157 | ||
151 | return result < 0 ? result : count; | 158 | return result < 0 ? result : count; |
@@ -185,25 +192,28 @@ msi_bus_store(struct device *dev, struct device_attribute *attr, | |||
185 | const char *buf, size_t count) | 192 | const char *buf, size_t count) |
186 | { | 193 | { |
187 | struct pci_dev *pdev = to_pci_dev(dev); | 194 | struct pci_dev *pdev = to_pci_dev(dev); |
195 | unsigned long val; | ||
196 | |||
197 | if (strict_strtoul(buf, 0, &val) < 0) | ||
198 | return -EINVAL; | ||
188 | 199 | ||
189 | /* bad things may happen if the no_msi flag is changed | 200 | /* bad things may happen if the no_msi flag is changed |
190 | * while some drivers are loaded */ | 201 | * while some drivers are loaded */ |
191 | if (!capable(CAP_SYS_ADMIN)) | 202 | if (!capable(CAP_SYS_ADMIN)) |
192 | return count; | 203 | return -EPERM; |
193 | 204 | ||
205 | /* Maybe pci devices without subordinate busses shouldn't even have this | ||
206 | * attribute in the first place? */ | ||
194 | if (!pdev->subordinate) | 207 | if (!pdev->subordinate) |
195 | return count; | 208 | return count; |
196 | 209 | ||
197 | if (*buf == '0') { | 210 | /* Is the flag going to change, or keep the value it already had? */ |
198 | pdev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; | 211 | if (!(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) ^ |
199 | dev_warn(&pdev->dev, "forced subordinate bus to not support MSI," | 212 | !!val) { |
200 | " bad things could happen.\n"); | 213 | pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI; |
201 | } | ||
202 | 214 | ||
203 | if (*buf == '1') { | 215 | dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI," |
204 | pdev->subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; | 216 | " bad things could happen\n", val ? "" : " not"); |
205 | dev_warn(&pdev->dev, "forced subordinate bus to support MSI," | ||
206 | " bad things could happen.\n"); | ||
207 | } | 217 | } |
208 | 218 | ||
209 | return count; | 219 | return count; |
@@ -361,55 +371,33 @@ pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
361 | } | 371 | } |
362 | 372 | ||
363 | static ssize_t | 373 | static ssize_t |
364 | pci_read_vpd(struct kobject *kobj, struct bin_attribute *bin_attr, | 374 | read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, |
365 | char *buf, loff_t off, size_t count) | 375 | char *buf, loff_t off, size_t count) |
366 | { | 376 | { |
367 | struct pci_dev *dev = | 377 | struct pci_dev *dev = |
368 | to_pci_dev(container_of(kobj, struct device, kobj)); | 378 | to_pci_dev(container_of(kobj, struct device, kobj)); |
369 | int end; | ||
370 | int ret; | ||
371 | 379 | ||
372 | if (off > bin_attr->size) | 380 | if (off > bin_attr->size) |
373 | count = 0; | 381 | count = 0; |
374 | else if (count > bin_attr->size - off) | 382 | else if (count > bin_attr->size - off) |
375 | count = bin_attr->size - off; | 383 | count = bin_attr->size - off; |
376 | end = off + count; | ||
377 | |||
378 | while (off < end) { | ||
379 | ret = dev->vpd->ops->read(dev, off, end - off, buf); | ||
380 | if (ret < 0) | ||
381 | return ret; | ||
382 | buf += ret; | ||
383 | off += ret; | ||
384 | } | ||
385 | 384 | ||
386 | return count; | 385 | return pci_read_vpd(dev, off, count, buf); |
387 | } | 386 | } |
388 | 387 | ||
389 | static ssize_t | 388 | static ssize_t |
390 | pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr, | 389 | write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, |
391 | char *buf, loff_t off, size_t count) | 390 | char *buf, loff_t off, size_t count) |
392 | { | 391 | { |
393 | struct pci_dev *dev = | 392 | struct pci_dev *dev = |
394 | to_pci_dev(container_of(kobj, struct device, kobj)); | 393 | to_pci_dev(container_of(kobj, struct device, kobj)); |
395 | int end; | ||
396 | int ret; | ||
397 | 394 | ||
398 | if (off > bin_attr->size) | 395 | if (off > bin_attr->size) |
399 | count = 0; | 396 | count = 0; |
400 | else if (count > bin_attr->size - off) | 397 | else if (count > bin_attr->size - off) |
401 | count = bin_attr->size - off; | 398 | count = bin_attr->size - off; |
402 | end = off + count; | ||
403 | |||
404 | while (off < end) { | ||
405 | ret = dev->vpd->ops->write(dev, off, end - off, buf); | ||
406 | if (ret < 0) | ||
407 | return ret; | ||
408 | buf += ret; | ||
409 | off += ret; | ||
410 | } | ||
411 | 399 | ||
412 | return count; | 400 | return pci_write_vpd(dev, off, count, buf); |
413 | } | 401 | } |
414 | 402 | ||
415 | #ifdef HAVE_PCI_LEGACY | 403 | #ifdef HAVE_PCI_LEGACY |
@@ -569,7 +557,7 @@ void pci_remove_legacy_files(struct pci_bus *b) | |||
569 | 557 | ||
570 | #ifdef HAVE_PCI_MMAP | 558 | #ifdef HAVE_PCI_MMAP |
571 | 559 | ||
572 | static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) | 560 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) |
573 | { | 561 | { |
574 | unsigned long nr, start, size; | 562 | unsigned long nr, start, size; |
575 | 563 | ||
@@ -620,6 +608,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, | |||
620 | vma->vm_pgoff += start >> PAGE_SHIFT; | 608 | vma->vm_pgoff += start >> PAGE_SHIFT; |
621 | mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; | 609 | mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; |
622 | 610 | ||
611 | if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start)) | ||
612 | return -EINVAL; | ||
613 | |||
623 | return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); | 614 | return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); |
624 | } | 615 | } |
625 | 616 | ||
@@ -832,8 +823,8 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) | |||
832 | attr->size = dev->vpd->len; | 823 | attr->size = dev->vpd->len; |
833 | attr->attr.name = "vpd"; | 824 | attr->attr.name = "vpd"; |
834 | attr->attr.mode = S_IRUSR | S_IWUSR; | 825 | attr->attr.mode = S_IRUSR | S_IWUSR; |
835 | attr->read = pci_read_vpd; | 826 | attr->read = read_vpd_attr; |
836 | attr->write = pci_write_vpd; | 827 | attr->write = write_vpd_attr; |
837 | retval = sysfs_create_bin_file(&dev->dev.kobj, attr); | 828 | retval = sysfs_create_bin_file(&dev->dev.kobj, attr); |
838 | if (retval) { | 829 | if (retval) { |
839 | kfree(dev->vpd->attr); | 830 | kfree(dev->vpd->attr); |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 061d1ee0046a..c12f6c790698 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -56,6 +56,22 @@ unsigned char pci_bus_max_busnr(struct pci_bus* bus) | |||
56 | } | 56 | } |
57 | EXPORT_SYMBOL_GPL(pci_bus_max_busnr); | 57 | EXPORT_SYMBOL_GPL(pci_bus_max_busnr); |
58 | 58 | ||
59 | #ifdef CONFIG_HAS_IOMEM | ||
60 | void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) | ||
61 | { | ||
62 | /* | ||
63 | * Make sure the BAR is actually a memory resource, not an IO resource | ||
64 | */ | ||
65 | if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { | ||
66 | WARN_ON(1); | ||
67 | return NULL; | ||
68 | } | ||
69 | return ioremap_nocache(pci_resource_start(pdev, bar), | ||
70 | pci_resource_len(pdev, bar)); | ||
71 | } | ||
72 | EXPORT_SYMBOL_GPL(pci_ioremap_bar); | ||
73 | #endif | ||
74 | |||
59 | #if 0 | 75 | #if 0 |
60 | /** | 76 | /** |
61 | * pci_max_busnr - returns maximum PCI bus number | 77 | * pci_max_busnr - returns maximum PCI bus number |
@@ -360,25 +376,10 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
360 | static void | 376 | static void |
361 | pci_restore_bars(struct pci_dev *dev) | 377 | pci_restore_bars(struct pci_dev *dev) |
362 | { | 378 | { |
363 | int i, numres; | 379 | int i; |
364 | |||
365 | switch (dev->hdr_type) { | ||
366 | case PCI_HEADER_TYPE_NORMAL: | ||
367 | numres = 6; | ||
368 | break; | ||
369 | case PCI_HEADER_TYPE_BRIDGE: | ||
370 | numres = 2; | ||
371 | break; | ||
372 | case PCI_HEADER_TYPE_CARDBUS: | ||
373 | numres = 1; | ||
374 | break; | ||
375 | default: | ||
376 | /* Should never get here, but just in case... */ | ||
377 | return; | ||
378 | } | ||
379 | 380 | ||
380 | for (i = 0; i < numres; i ++) | 381 | for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) |
381 | pci_update_resource(dev, &dev->resource[i], i); | 382 | pci_update_resource(dev, i); |
382 | } | 383 | } |
383 | 384 | ||
384 | static struct pci_platform_pm_ops *pci_platform_pm; | 385 | static struct pci_platform_pm_ops *pci_platform_pm; |
@@ -524,14 +525,17 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
524 | * pci_update_current_state - Read PCI power state of given device from its | 525 | * pci_update_current_state - Read PCI power state of given device from its |
525 | * PCI PM registers and cache it | 526 | * PCI PM registers and cache it |
526 | * @dev: PCI device to handle. | 527 | * @dev: PCI device to handle. |
528 | * @state: State to cache in case the device doesn't have the PM capability | ||
527 | */ | 529 | */ |
528 | static void pci_update_current_state(struct pci_dev *dev) | 530 | void pci_update_current_state(struct pci_dev *dev, pci_power_t state) |
529 | { | 531 | { |
530 | if (dev->pm_cap) { | 532 | if (dev->pm_cap) { |
531 | u16 pmcsr; | 533 | u16 pmcsr; |
532 | 534 | ||
533 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); | 535 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
534 | dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); | 536 | dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); |
537 | } else { | ||
538 | dev->current_state = state; | ||
535 | } | 539 | } |
536 | } | 540 | } |
537 | 541 | ||
@@ -574,7 +578,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
574 | */ | 578 | */ |
575 | int ret = platform_pci_set_power_state(dev, PCI_D0); | 579 | int ret = platform_pci_set_power_state(dev, PCI_D0); |
576 | if (!ret) | 580 | if (!ret) |
577 | pci_update_current_state(dev); | 581 | pci_update_current_state(dev, PCI_D0); |
578 | } | 582 | } |
579 | /* This device is quirked not to be put into D3, so | 583 | /* This device is quirked not to be put into D3, so |
580 | don't put it in D3 */ | 584 | don't put it in D3 */ |
@@ -587,7 +591,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
587 | /* Allow the platform to finalize the transition */ | 591 | /* Allow the platform to finalize the transition */ |
588 | int ret = platform_pci_set_power_state(dev, state); | 592 | int ret = platform_pci_set_power_state(dev, state); |
589 | if (!ret) { | 593 | if (!ret) { |
590 | pci_update_current_state(dev); | 594 | pci_update_current_state(dev, state); |
591 | error = 0; | 595 | error = 0; |
592 | } | 596 | } |
593 | } | 597 | } |
@@ -640,19 +644,14 @@ static int pci_save_pcie_state(struct pci_dev *dev) | |||
640 | int pos, i = 0; | 644 | int pos, i = 0; |
641 | struct pci_cap_saved_state *save_state; | 645 | struct pci_cap_saved_state *save_state; |
642 | u16 *cap; | 646 | u16 *cap; |
643 | int found = 0; | ||
644 | 647 | ||
645 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 648 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); |
646 | if (pos <= 0) | 649 | if (pos <= 0) |
647 | return 0; | 650 | return 0; |
648 | 651 | ||
649 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); | 652 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); |
650 | if (!save_state) | ||
651 | save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL); | ||
652 | else | ||
653 | found = 1; | ||
654 | if (!save_state) { | 653 | if (!save_state) { |
655 | dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); | 654 | dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); |
656 | return -ENOMEM; | 655 | return -ENOMEM; |
657 | } | 656 | } |
658 | cap = (u16 *)&save_state->data[0]; | 657 | cap = (u16 *)&save_state->data[0]; |
@@ -661,9 +660,7 @@ static int pci_save_pcie_state(struct pci_dev *dev) | |||
661 | pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); | 660 | pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); |
662 | pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); | 661 | pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); |
663 | pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); | 662 | pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); |
664 | save_state->cap_nr = PCI_CAP_ID_EXP; | 663 | |
665 | if (!found) | ||
666 | pci_add_saved_cap(dev, save_state); | ||
667 | return 0; | 664 | return 0; |
668 | } | 665 | } |
669 | 666 | ||
@@ -688,30 +685,21 @@ static void pci_restore_pcie_state(struct pci_dev *dev) | |||
688 | 685 | ||
689 | static int pci_save_pcix_state(struct pci_dev *dev) | 686 | static int pci_save_pcix_state(struct pci_dev *dev) |
690 | { | 687 | { |
691 | int pos, i = 0; | 688 | int pos; |
692 | struct pci_cap_saved_state *save_state; | 689 | struct pci_cap_saved_state *save_state; |
693 | u16 *cap; | ||
694 | int found = 0; | ||
695 | 690 | ||
696 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 691 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
697 | if (pos <= 0) | 692 | if (pos <= 0) |
698 | return 0; | 693 | return 0; |
699 | 694 | ||
700 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); | 695 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); |
701 | if (!save_state) | ||
702 | save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL); | ||
703 | else | ||
704 | found = 1; | ||
705 | if (!save_state) { | 696 | if (!save_state) { |
706 | dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); | 697 | dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); |
707 | return -ENOMEM; | 698 | return -ENOMEM; |
708 | } | 699 | } |
709 | cap = (u16 *)&save_state->data[0]; | ||
710 | 700 | ||
711 | pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]); | 701 | pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data); |
712 | save_state->cap_nr = PCI_CAP_ID_PCIX; | 702 | |
713 | if (!found) | ||
714 | pci_add_saved_cap(dev, save_state); | ||
715 | return 0; | 703 | return 0; |
716 | } | 704 | } |
717 | 705 | ||
@@ -982,6 +970,32 @@ void pcim_pin_device(struct pci_dev *pdev) | |||
982 | */ | 970 | */ |
983 | void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} | 971 | void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} |
984 | 972 | ||
973 | static void do_pci_disable_device(struct pci_dev *dev) | ||
974 | { | ||
975 | u16 pci_command; | ||
976 | |||
977 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); | ||
978 | if (pci_command & PCI_COMMAND_MASTER) { | ||
979 | pci_command &= ~PCI_COMMAND_MASTER; | ||
980 | pci_write_config_word(dev, PCI_COMMAND, pci_command); | ||
981 | } | ||
982 | |||
983 | pcibios_disable_device(dev); | ||
984 | } | ||
985 | |||
986 | /** | ||
987 | * pci_disable_enabled_device - Disable device without updating enable_cnt | ||
988 | * @dev: PCI device to disable | ||
989 | * | ||
990 | * NOTE: This function is a backend of PCI power management routines and is | ||
991 | * not supposed to be called drivers. | ||
992 | */ | ||
993 | void pci_disable_enabled_device(struct pci_dev *dev) | ||
994 | { | ||
995 | if (atomic_read(&dev->enable_cnt)) | ||
996 | do_pci_disable_device(dev); | ||
997 | } | ||
998 | |||
985 | /** | 999 | /** |
986 | * pci_disable_device - Disable PCI device after use | 1000 | * pci_disable_device - Disable PCI device after use |
987 | * @dev: PCI device to be disabled | 1001 | * @dev: PCI device to be disabled |
@@ -996,7 +1010,6 @@ void | |||
996 | pci_disable_device(struct pci_dev *dev) | 1010 | pci_disable_device(struct pci_dev *dev) |
997 | { | 1011 | { |
998 | struct pci_devres *dr; | 1012 | struct pci_devres *dr; |
999 | u16 pci_command; | ||
1000 | 1013 | ||
1001 | dr = find_pci_dr(dev); | 1014 | dr = find_pci_dr(dev); |
1002 | if (dr) | 1015 | if (dr) |
@@ -1005,14 +1018,9 @@ pci_disable_device(struct pci_dev *dev) | |||
1005 | if (atomic_sub_return(1, &dev->enable_cnt) != 0) | 1018 | if (atomic_sub_return(1, &dev->enable_cnt) != 0) |
1006 | return; | 1019 | return; |
1007 | 1020 | ||
1008 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); | 1021 | do_pci_disable_device(dev); |
1009 | if (pci_command & PCI_COMMAND_MASTER) { | ||
1010 | pci_command &= ~PCI_COMMAND_MASTER; | ||
1011 | pci_write_config_word(dev, PCI_COMMAND, pci_command); | ||
1012 | } | ||
1013 | dev->is_busmaster = 0; | ||
1014 | 1022 | ||
1015 | pcibios_disable_device(dev); | 1023 | dev->is_busmaster = 0; |
1016 | } | 1024 | } |
1017 | 1025 | ||
1018 | /** | 1026 | /** |
@@ -1107,7 +1115,7 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) | |||
1107 | int error = 0; | 1115 | int error = 0; |
1108 | bool pme_done = false; | 1116 | bool pme_done = false; |
1109 | 1117 | ||
1110 | if (!device_may_wakeup(&dev->dev)) | 1118 | if (enable && !device_may_wakeup(&dev->dev)) |
1111 | return -EINVAL; | 1119 | return -EINVAL; |
1112 | 1120 | ||
1113 | /* | 1121 | /* |
@@ -1252,14 +1260,15 @@ void pci_pm_init(struct pci_dev *dev) | |||
1252 | /* find PCI PM capability in list */ | 1260 | /* find PCI PM capability in list */ |
1253 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); | 1261 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); |
1254 | if (!pm) | 1262 | if (!pm) |
1255 | return; | 1263 | goto Exit; |
1264 | |||
1256 | /* Check device's ability to generate PME# */ | 1265 | /* Check device's ability to generate PME# */ |
1257 | pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); | 1266 | pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); |
1258 | 1267 | ||
1259 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { | 1268 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
1260 | dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", | 1269 | dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", |
1261 | pmc & PCI_PM_CAP_VER_MASK); | 1270 | pmc & PCI_PM_CAP_VER_MASK); |
1262 | return; | 1271 | goto Exit; |
1263 | } | 1272 | } |
1264 | 1273 | ||
1265 | dev->pm_cap = pm; | 1274 | dev->pm_cap = pm; |
@@ -1298,6 +1307,74 @@ void pci_pm_init(struct pci_dev *dev) | |||
1298 | } else { | 1307 | } else { |
1299 | dev->pme_support = 0; | 1308 | dev->pme_support = 0; |
1300 | } | 1309 | } |
1310 | |||
1311 | Exit: | ||
1312 | pci_update_current_state(dev, PCI_D0); | ||
1313 | } | ||
1314 | |||
1315 | /** | ||
1316 | * platform_pci_wakeup_init - init platform wakeup if present | ||
1317 | * @dev: PCI device | ||
1318 | * | ||
1319 | * Some devices don't have PCI PM caps but can still generate wakeup | ||
1320 | * events through platform methods (like ACPI events). If @dev supports | ||
1321 | * platform wakeup events, set the device flag to indicate as much. This | ||
1322 | * may be redundant if the device also supports PCI PM caps, but double | ||
1323 | * initialization should be safe in that case. | ||
1324 | */ | ||
1325 | void platform_pci_wakeup_init(struct pci_dev *dev) | ||
1326 | { | ||
1327 | if (!platform_pci_can_wakeup(dev)) | ||
1328 | return; | ||
1329 | |||
1330 | device_set_wakeup_capable(&dev->dev, true); | ||
1331 | device_set_wakeup_enable(&dev->dev, false); | ||
1332 | platform_pci_sleep_wake(dev, false); | ||
1333 | } | ||
1334 | |||
1335 | /** | ||
1336 | * pci_add_save_buffer - allocate buffer for saving given capability registers | ||
1337 | * @dev: the PCI device | ||
1338 | * @cap: the capability to allocate the buffer for | ||
1339 | * @size: requested size of the buffer | ||
1340 | */ | ||
1341 | static int pci_add_cap_save_buffer( | ||
1342 | struct pci_dev *dev, char cap, unsigned int size) | ||
1343 | { | ||
1344 | int pos; | ||
1345 | struct pci_cap_saved_state *save_state; | ||
1346 | |||
1347 | pos = pci_find_capability(dev, cap); | ||
1348 | if (pos <= 0) | ||
1349 | return 0; | ||
1350 | |||
1351 | save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); | ||
1352 | if (!save_state) | ||
1353 | return -ENOMEM; | ||
1354 | |||
1355 | save_state->cap_nr = cap; | ||
1356 | pci_add_saved_cap(dev, save_state); | ||
1357 | |||
1358 | return 0; | ||
1359 | } | ||
1360 | |||
1361 | /** | ||
1362 | * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities | ||
1363 | * @dev: the PCI device | ||
1364 | */ | ||
1365 | void pci_allocate_cap_save_buffers(struct pci_dev *dev) | ||
1366 | { | ||
1367 | int error; | ||
1368 | |||
1369 | error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16)); | ||
1370 | if (error) | ||
1371 | dev_err(&dev->dev, | ||
1372 | "unable to preallocate PCI Express save buffer\n"); | ||
1373 | |||
1374 | error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); | ||
1375 | if (error) | ||
1376 | dev_err(&dev->dev, | ||
1377 | "unable to preallocate PCI-X save buffer\n"); | ||
1301 | } | 1378 | } |
1302 | 1379 | ||
1303 | /** | 1380 | /** |
@@ -1337,6 +1414,20 @@ void pci_enable_ari(struct pci_dev *dev) | |||
1337 | bridge->ari_enabled = 1; | 1414 | bridge->ari_enabled = 1; |
1338 | } | 1415 | } |
1339 | 1416 | ||
1417 | /** | ||
1418 | * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge | ||
1419 | * @dev: the PCI device | ||
1420 | * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) | ||
1421 | * | ||
1422 | * Perform INTx swizzling for a device behind one level of bridge. This is | ||
1423 | * required by section 9.1 of the PCI-to-PCI bridge specification for devices | ||
1424 | * behind bridges on add-in cards. | ||
1425 | */ | ||
1426 | u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) | ||
1427 | { | ||
1428 | return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; | ||
1429 | } | ||
1430 | |||
1340 | int | 1431 | int |
1341 | pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) | 1432 | pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) |
1342 | { | 1433 | { |
@@ -1345,9 +1436,9 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) | |||
1345 | pin = dev->pin; | 1436 | pin = dev->pin; |
1346 | if (!pin) | 1437 | if (!pin) |
1347 | return -1; | 1438 | return -1; |
1348 | pin--; | 1439 | |
1349 | while (dev->bus->self) { | 1440 | while (dev->bus->self) { |
1350 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; | 1441 | pin = pci_swizzle_interrupt_pin(dev, pin); |
1351 | dev = dev->bus->self; | 1442 | dev = dev->bus->self; |
1352 | } | 1443 | } |
1353 | *bridge = dev; | 1444 | *bridge = dev; |
@@ -1355,6 +1446,26 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) | |||
1355 | } | 1446 | } |
1356 | 1447 | ||
1357 | /** | 1448 | /** |
1449 | * pci_common_swizzle - swizzle INTx all the way to root bridge | ||
1450 | * @dev: the PCI device | ||
1451 | * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) | ||
1452 | * | ||
1453 | * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI | ||
1454 | * bridges all the way up to a PCI root bus. | ||
1455 | */ | ||
1456 | u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) | ||
1457 | { | ||
1458 | u8 pin = *pinp; | ||
1459 | |||
1460 | while (dev->bus->self) { | ||
1461 | pin = pci_swizzle_interrupt_pin(dev, pin); | ||
1462 | dev = dev->bus->self; | ||
1463 | } | ||
1464 | *pinp = pin; | ||
1465 | return PCI_SLOT(dev->devfn); | ||
1466 | } | ||
1467 | |||
1468 | /** | ||
1358 | * pci_release_region - Release a PCI bar | 1469 | * pci_release_region - Release a PCI bar |
1359 | * @pdev: PCI device whose resources were previously reserved by pci_request_region | 1470 | * @pdev: PCI device whose resources were previously reserved by pci_request_region |
1360 | * @bar: BAR to release | 1471 | * @bar: BAR to release |
@@ -1395,7 +1506,8 @@ void pci_release_region(struct pci_dev *pdev, int bar) | |||
1395 | * Returns 0 on success, or %EBUSY on error. A warning | 1506 | * Returns 0 on success, or %EBUSY on error. A warning |
1396 | * message is also printed on failure. | 1507 | * message is also printed on failure. |
1397 | */ | 1508 | */ |
1398 | int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) | 1509 | static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, |
1510 | int exclusive) | ||
1399 | { | 1511 | { |
1400 | struct pci_devres *dr; | 1512 | struct pci_devres *dr; |
1401 | 1513 | ||
@@ -1408,8 +1520,9 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) | |||
1408 | goto err_out; | 1520 | goto err_out; |
1409 | } | 1521 | } |
1410 | else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { | 1522 | else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { |
1411 | if (!request_mem_region(pci_resource_start(pdev, bar), | 1523 | if (!__request_mem_region(pci_resource_start(pdev, bar), |
1412 | pci_resource_len(pdev, bar), res_name)) | 1524 | pci_resource_len(pdev, bar), res_name, |
1525 | exclusive)) | ||
1413 | goto err_out; | 1526 | goto err_out; |
1414 | } | 1527 | } |
1415 | 1528 | ||
@@ -1428,6 +1541,47 @@ err_out: | |||
1428 | } | 1541 | } |
1429 | 1542 | ||
1430 | /** | 1543 | /** |
1544 | * pci_request_region - Reserved PCI I/O and memory resource | ||
1545 | * @pdev: PCI device whose resources are to be reserved | ||
1546 | * @bar: BAR to be reserved | ||
1547 | * @res_name: Name to be associated with resource. | ||
1548 | * | ||
1549 | * Mark the PCI region associated with PCI device @pdev BR @bar as | ||
1550 | * being reserved by owner @res_name. Do not access any | ||
1551 | * address inside the PCI regions unless this call returns | ||
1552 | * successfully. | ||
1553 | * | ||
1554 | * Returns 0 on success, or %EBUSY on error. A warning | ||
1555 | * message is also printed on failure. | ||
1556 | */ | ||
1557 | int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) | ||
1558 | { | ||
1559 | return __pci_request_region(pdev, bar, res_name, 0); | ||
1560 | } | ||
1561 | |||
1562 | /** | ||
1563 | * pci_request_region_exclusive - Reserved PCI I/O and memory resource | ||
1564 | * @pdev: PCI device whose resources are to be reserved | ||
1565 | * @bar: BAR to be reserved | ||
1566 | * @res_name: Name to be associated with resource. | ||
1567 | * | ||
1568 | * Mark the PCI region associated with PCI device @pdev BR @bar as | ||
1569 | * being reserved by owner @res_name. Do not access any | ||
1570 | * address inside the PCI regions unless this call returns | ||
1571 | * successfully. | ||
1572 | * | ||
1573 | * Returns 0 on success, or %EBUSY on error. A warning | ||
1574 | * message is also printed on failure. | ||
1575 | * | ||
1576 | * The key difference that _exclusive makes it that userspace is | ||
1577 | * explicitly not allowed to map the resource via /dev/mem or | ||
1578 | * sysfs. | ||
1579 | */ | ||
1580 | int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) | ||
1581 | { | ||
1582 | return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); | ||
1583 | } | ||
1584 | /** | ||
1431 | * pci_release_selected_regions - Release selected PCI I/O and memory resources | 1585 | * pci_release_selected_regions - Release selected PCI I/O and memory resources |
1432 | * @pdev: PCI device whose resources were previously reserved | 1586 | * @pdev: PCI device whose resources were previously reserved |
1433 | * @bars: Bitmask of BARs to be released | 1587 | * @bars: Bitmask of BARs to be released |
@@ -1444,20 +1598,14 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars) | |||
1444 | pci_release_region(pdev, i); | 1598 | pci_release_region(pdev, i); |
1445 | } | 1599 | } |
1446 | 1600 | ||
1447 | /** | 1601 | int __pci_request_selected_regions(struct pci_dev *pdev, int bars, |
1448 | * pci_request_selected_regions - Reserve selected PCI I/O and memory resources | 1602 | const char *res_name, int excl) |
1449 | * @pdev: PCI device whose resources are to be reserved | ||
1450 | * @bars: Bitmask of BARs to be requested | ||
1451 | * @res_name: Name to be associated with resource | ||
1452 | */ | ||
1453 | int pci_request_selected_regions(struct pci_dev *pdev, int bars, | ||
1454 | const char *res_name) | ||
1455 | { | 1603 | { |
1456 | int i; | 1604 | int i; |
1457 | 1605 | ||
1458 | for (i = 0; i < 6; i++) | 1606 | for (i = 0; i < 6; i++) |
1459 | if (bars & (1 << i)) | 1607 | if (bars & (1 << i)) |
1460 | if(pci_request_region(pdev, i, res_name)) | 1608 | if (__pci_request_region(pdev, i, res_name, excl)) |
1461 | goto err_out; | 1609 | goto err_out; |
1462 | return 0; | 1610 | return 0; |
1463 | 1611 | ||
@@ -1469,6 +1617,26 @@ err_out: | |||
1469 | return -EBUSY; | 1617 | return -EBUSY; |
1470 | } | 1618 | } |
1471 | 1619 | ||
1620 | |||
1621 | /** | ||
1622 | * pci_request_selected_regions - Reserve selected PCI I/O and memory resources | ||
1623 | * @pdev: PCI device whose resources are to be reserved | ||
1624 | * @bars: Bitmask of BARs to be requested | ||
1625 | * @res_name: Name to be associated with resource | ||
1626 | */ | ||
1627 | int pci_request_selected_regions(struct pci_dev *pdev, int bars, | ||
1628 | const char *res_name) | ||
1629 | { | ||
1630 | return __pci_request_selected_regions(pdev, bars, res_name, 0); | ||
1631 | } | ||
1632 | |||
1633 | int pci_request_selected_regions_exclusive(struct pci_dev *pdev, | ||
1634 | int bars, const char *res_name) | ||
1635 | { | ||
1636 | return __pci_request_selected_regions(pdev, bars, res_name, | ||
1637 | IORESOURCE_EXCLUSIVE); | ||
1638 | } | ||
1639 | |||
1472 | /** | 1640 | /** |
1473 | * pci_release_regions - Release reserved PCI I/O and memory resources | 1641 | * pci_release_regions - Release reserved PCI I/O and memory resources |
1474 | * @pdev: PCI device whose resources were previously reserved by pci_request_regions | 1642 | * @pdev: PCI device whose resources were previously reserved by pci_request_regions |
@@ -1502,27 +1670,66 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name) | |||
1502 | } | 1670 | } |
1503 | 1671 | ||
1504 | /** | 1672 | /** |
1673 | * pci_request_regions_exclusive - Reserved PCI I/O and memory resources | ||
1674 | * @pdev: PCI device whose resources are to be reserved | ||
1675 | * @res_name: Name to be associated with resource. | ||
1676 | * | ||
1677 | * Mark all PCI regions associated with PCI device @pdev as | ||
1678 | * being reserved by owner @res_name. Do not access any | ||
1679 | * address inside the PCI regions unless this call returns | ||
1680 | * successfully. | ||
1681 | * | ||
1682 | * pci_request_regions_exclusive() will mark the region so that | ||
1683 | * /dev/mem and the sysfs MMIO access will not be allowed. | ||
1684 | * | ||
1685 | * Returns 0 on success, or %EBUSY on error. A warning | ||
1686 | * message is also printed on failure. | ||
1687 | */ | ||
1688 | int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) | ||
1689 | { | ||
1690 | return pci_request_selected_regions_exclusive(pdev, | ||
1691 | ((1 << 6) - 1), res_name); | ||
1692 | } | ||
1693 | |||
1694 | static void __pci_set_master(struct pci_dev *dev, bool enable) | ||
1695 | { | ||
1696 | u16 old_cmd, cmd; | ||
1697 | |||
1698 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); | ||
1699 | if (enable) | ||
1700 | cmd = old_cmd | PCI_COMMAND_MASTER; | ||
1701 | else | ||
1702 | cmd = old_cmd & ~PCI_COMMAND_MASTER; | ||
1703 | if (cmd != old_cmd) { | ||
1704 | dev_dbg(&dev->dev, "%s bus mastering\n", | ||
1705 | enable ? "enabling" : "disabling"); | ||
1706 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
1707 | } | ||
1708 | dev->is_busmaster = enable; | ||
1709 | } | ||
1710 | |||
1711 | /** | ||
1505 | * pci_set_master - enables bus-mastering for device dev | 1712 | * pci_set_master - enables bus-mastering for device dev |
1506 | * @dev: the PCI device to enable | 1713 | * @dev: the PCI device to enable |
1507 | * | 1714 | * |
1508 | * Enables bus-mastering on the device and calls pcibios_set_master() | 1715 | * Enables bus-mastering on the device and calls pcibios_set_master() |
1509 | * to do the needed arch specific settings. | 1716 | * to do the needed arch specific settings. |
1510 | */ | 1717 | */ |
1511 | void | 1718 | void pci_set_master(struct pci_dev *dev) |
1512 | pci_set_master(struct pci_dev *dev) | ||
1513 | { | 1719 | { |
1514 | u16 cmd; | 1720 | __pci_set_master(dev, true); |
1515 | |||
1516 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
1517 | if (! (cmd & PCI_COMMAND_MASTER)) { | ||
1518 | dev_dbg(&dev->dev, "enabling bus mastering\n"); | ||
1519 | cmd |= PCI_COMMAND_MASTER; | ||
1520 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
1521 | } | ||
1522 | dev->is_busmaster = 1; | ||
1523 | pcibios_set_master(dev); | 1721 | pcibios_set_master(dev); |
1524 | } | 1722 | } |
1525 | 1723 | ||
1724 | /** | ||
1725 | * pci_clear_master - disables bus-mastering for device dev | ||
1726 | * @dev: the PCI device to disable | ||
1727 | */ | ||
1728 | void pci_clear_master(struct pci_dev *dev) | ||
1729 | { | ||
1730 | __pci_set_master(dev, false); | ||
1731 | } | ||
1732 | |||
1526 | #ifdef PCI_DISABLE_MWI | 1733 | #ifdef PCI_DISABLE_MWI |
1527 | int pci_set_mwi(struct pci_dev *dev) | 1734 | int pci_set_mwi(struct pci_dev *dev) |
1528 | { | 1735 | { |
@@ -1751,24 +1958,7 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) | |||
1751 | EXPORT_SYMBOL(pci_set_dma_seg_boundary); | 1958 | EXPORT_SYMBOL(pci_set_dma_seg_boundary); |
1752 | #endif | 1959 | #endif |
1753 | 1960 | ||
1754 | /** | 1961 | static int __pcie_flr(struct pci_dev *dev, int probe) |
1755 | * pci_execute_reset_function() - Reset a PCI device function | ||
1756 | * @dev: Device function to reset | ||
1757 | * | ||
1758 | * Some devices allow an individual function to be reset without affecting | ||
1759 | * other functions in the same device. The PCI device must be responsive | ||
1760 | * to PCI config space in order to use this function. | ||
1761 | * | ||
1762 | * The device function is presumed to be unused when this function is called. | ||
1763 | * Resetting the device will make the contents of PCI configuration space | ||
1764 | * random, so any caller of this must be prepared to reinitialise the | ||
1765 | * device including MSI, bus mastering, BARs, decoding IO and memory spaces, | ||
1766 | * etc. | ||
1767 | * | ||
1768 | * Returns 0 if the device function was successfully reset or -ENOTTY if the | ||
1769 | * device doesn't support resetting a single function. | ||
1770 | */ | ||
1771 | int pci_execute_reset_function(struct pci_dev *dev) | ||
1772 | { | 1962 | { |
1773 | u16 status; | 1963 | u16 status; |
1774 | u32 cap; | 1964 | u32 cap; |
@@ -1780,6 +1970,9 @@ int pci_execute_reset_function(struct pci_dev *dev) | |||
1780 | if (!(cap & PCI_EXP_DEVCAP_FLR)) | 1970 | if (!(cap & PCI_EXP_DEVCAP_FLR)) |
1781 | return -ENOTTY; | 1971 | return -ENOTTY; |
1782 | 1972 | ||
1973 | if (probe) | ||
1974 | return 0; | ||
1975 | |||
1783 | pci_block_user_cfg_access(dev); | 1976 | pci_block_user_cfg_access(dev); |
1784 | 1977 | ||
1785 | /* Wait for Transaction Pending bit clean */ | 1978 | /* Wait for Transaction Pending bit clean */ |
@@ -1802,6 +1995,80 @@ int pci_execute_reset_function(struct pci_dev *dev) | |||
1802 | pci_unblock_user_cfg_access(dev); | 1995 | pci_unblock_user_cfg_access(dev); |
1803 | return 0; | 1996 | return 0; |
1804 | } | 1997 | } |
1998 | |||
1999 | static int __pci_af_flr(struct pci_dev *dev, int probe) | ||
2000 | { | ||
2001 | int cappos = pci_find_capability(dev, PCI_CAP_ID_AF); | ||
2002 | u8 status; | ||
2003 | u8 cap; | ||
2004 | |||
2005 | if (!cappos) | ||
2006 | return -ENOTTY; | ||
2007 | pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap); | ||
2008 | if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) | ||
2009 | return -ENOTTY; | ||
2010 | |||
2011 | if (probe) | ||
2012 | return 0; | ||
2013 | |||
2014 | pci_block_user_cfg_access(dev); | ||
2015 | |||
2016 | /* Wait for Transaction Pending bit clean */ | ||
2017 | msleep(100); | ||
2018 | pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); | ||
2019 | if (status & PCI_AF_STATUS_TP) { | ||
2020 | dev_info(&dev->dev, "Busy after 100ms while trying to" | ||
2021 | " reset; sleeping for 1 second\n"); | ||
2022 | ssleep(1); | ||
2023 | pci_read_config_byte(dev, | ||
2024 | cappos + PCI_AF_STATUS, &status); | ||
2025 | if (status & PCI_AF_STATUS_TP) | ||
2026 | dev_info(&dev->dev, "Still busy after 1s; " | ||
2027 | "proceeding with reset anyway\n"); | ||
2028 | } | ||
2029 | pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); | ||
2030 | mdelay(100); | ||
2031 | |||
2032 | pci_unblock_user_cfg_access(dev); | ||
2033 | return 0; | ||
2034 | } | ||
2035 | |||
2036 | static int __pci_reset_function(struct pci_dev *pdev, int probe) | ||
2037 | { | ||
2038 | int res; | ||
2039 | |||
2040 | res = __pcie_flr(pdev, probe); | ||
2041 | if (res != -ENOTTY) | ||
2042 | return res; | ||
2043 | |||
2044 | res = __pci_af_flr(pdev, probe); | ||
2045 | if (res != -ENOTTY) | ||
2046 | return res; | ||
2047 | |||
2048 | return res; | ||
2049 | } | ||
2050 | |||
2051 | /** | ||
2052 | * pci_execute_reset_function() - Reset a PCI device function | ||
2053 | * @dev: Device function to reset | ||
2054 | * | ||
2055 | * Some devices allow an individual function to be reset without affecting | ||
2056 | * other functions in the same device. The PCI device must be responsive | ||
2057 | * to PCI config space in order to use this function. | ||
2058 | * | ||
2059 | * The device function is presumed to be unused when this function is called. | ||
2060 | * Resetting the device will make the contents of PCI configuration space | ||
2061 | * random, so any caller of this must be prepared to reinitialise the | ||
2062 | * device including MSI, bus mastering, BARs, decoding IO and memory spaces, | ||
2063 | * etc. | ||
2064 | * | ||
2065 | * Returns 0 if the device function was successfully reset or -ENOTTY if the | ||
2066 | * device doesn't support resetting a single function. | ||
2067 | */ | ||
2068 | int pci_execute_reset_function(struct pci_dev *dev) | ||
2069 | { | ||
2070 | return __pci_reset_function(dev, 0); | ||
2071 | } | ||
1805 | EXPORT_SYMBOL_GPL(pci_execute_reset_function); | 2072 | EXPORT_SYMBOL_GPL(pci_execute_reset_function); |
1806 | 2073 | ||
1807 | /** | 2074 | /** |
@@ -1822,15 +2089,10 @@ EXPORT_SYMBOL_GPL(pci_execute_reset_function); | |||
1822 | */ | 2089 | */ |
1823 | int pci_reset_function(struct pci_dev *dev) | 2090 | int pci_reset_function(struct pci_dev *dev) |
1824 | { | 2091 | { |
1825 | u32 cap; | 2092 | int r = __pci_reset_function(dev, 1); |
1826 | int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
1827 | int r; | ||
1828 | 2093 | ||
1829 | if (!exppos) | 2094 | if (r < 0) |
1830 | return -ENOTTY; | 2095 | return r; |
1831 | pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); | ||
1832 | if (!(cap & PCI_EXP_DEVCAP_FLR)) | ||
1833 | return -ENOTTY; | ||
1834 | 2096 | ||
1835 | if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) | 2097 | if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) |
1836 | disable_irq(dev->irq); | 2098 | disable_irq(dev->irq); |
@@ -2022,6 +2284,28 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags) | |||
2022 | return bars; | 2284 | return bars; |
2023 | } | 2285 | } |
2024 | 2286 | ||
2287 | /** | ||
2288 | * pci_resource_bar - get position of the BAR associated with a resource | ||
2289 | * @dev: the PCI device | ||
2290 | * @resno: the resource number | ||
2291 | * @type: the BAR type to be filled in | ||
2292 | * | ||
2293 | * Returns BAR position in config space, or 0 if the BAR is invalid. | ||
2294 | */ | ||
2295 | int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | ||
2296 | { | ||
2297 | if (resno < PCI_ROM_RESOURCE) { | ||
2298 | *type = pci_bar_unknown; | ||
2299 | return PCI_BASE_ADDRESS_0 + 4 * resno; | ||
2300 | } else if (resno == PCI_ROM_RESOURCE) { | ||
2301 | *type = pci_bar_mem32; | ||
2302 | return dev->rom_base_reg; | ||
2303 | } | ||
2304 | |||
2305 | dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); | ||
2306 | return 0; | ||
2307 | } | ||
2308 | |||
2025 | static void __devinit pci_no_domains(void) | 2309 | static void __devinit pci_no_domains(void) |
2026 | { | 2310 | { |
2027 | #ifdef CONFIG_PCI_DOMAINS | 2311 | #ifdef CONFIG_PCI_DOMAINS |
@@ -2029,6 +2313,19 @@ static void __devinit pci_no_domains(void) | |||
2029 | #endif | 2313 | #endif |
2030 | } | 2314 | } |
2031 | 2315 | ||
2316 | /** | ||
2317 | * pci_ext_cfg_enabled - can we access extended PCI config space? | ||
2318 | * @dev: The PCI device of the root bridge. | ||
2319 | * | ||
2320 | * Returns 1 if we can access PCI extended config space (offsets | ||
2321 | * greater than 0xff). This is the default implementation. Architecture | ||
2322 | * implementations can override this. | ||
2323 | */ | ||
2324 | int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) | ||
2325 | { | ||
2326 | return 1; | ||
2327 | } | ||
2328 | |||
2032 | static int __devinit pci_init(void) | 2329 | static int __devinit pci_init(void) |
2033 | { | 2330 | { |
2034 | struct pci_dev *dev = NULL; | 2331 | struct pci_dev *dev = NULL; |
@@ -2037,8 +2334,6 @@ static int __devinit pci_init(void) | |||
2037 | pci_fixup_device(pci_fixup_final, dev); | 2334 | pci_fixup_device(pci_fixup_final, dev); |
2038 | } | 2335 | } |
2039 | 2336 | ||
2040 | msi_init(); | ||
2041 | |||
2042 | return 0; | 2337 | return 0; |
2043 | } | 2338 | } |
2044 | 2339 | ||
@@ -2083,11 +2378,15 @@ EXPORT_SYMBOL(pci_find_capability); | |||
2083 | EXPORT_SYMBOL(pci_bus_find_capability); | 2378 | EXPORT_SYMBOL(pci_bus_find_capability); |
2084 | EXPORT_SYMBOL(pci_release_regions); | 2379 | EXPORT_SYMBOL(pci_release_regions); |
2085 | EXPORT_SYMBOL(pci_request_regions); | 2380 | EXPORT_SYMBOL(pci_request_regions); |
2381 | EXPORT_SYMBOL(pci_request_regions_exclusive); | ||
2086 | EXPORT_SYMBOL(pci_release_region); | 2382 | EXPORT_SYMBOL(pci_release_region); |
2087 | EXPORT_SYMBOL(pci_request_region); | 2383 | EXPORT_SYMBOL(pci_request_region); |
2384 | EXPORT_SYMBOL(pci_request_region_exclusive); | ||
2088 | EXPORT_SYMBOL(pci_release_selected_regions); | 2385 | EXPORT_SYMBOL(pci_release_selected_regions); |
2089 | EXPORT_SYMBOL(pci_request_selected_regions); | 2386 | EXPORT_SYMBOL(pci_request_selected_regions); |
2387 | EXPORT_SYMBOL(pci_request_selected_regions_exclusive); | ||
2090 | EXPORT_SYMBOL(pci_set_master); | 2388 | EXPORT_SYMBOL(pci_set_master); |
2389 | EXPORT_SYMBOL(pci_clear_master); | ||
2091 | EXPORT_SYMBOL(pci_set_mwi); | 2390 | EXPORT_SYMBOL(pci_set_mwi); |
2092 | EXPORT_SYMBOL(pci_try_set_mwi); | 2391 | EXPORT_SYMBOL(pci_try_set_mwi); |
2093 | EXPORT_SYMBOL(pci_clear_mwi); | 2392 | EXPORT_SYMBOL(pci_clear_mwi); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 9de87e9f98f5..1351bb4addde 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -10,6 +10,10 @@ extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); | |||
10 | extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); | 10 | extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); |
11 | extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); | 11 | extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); |
12 | extern void pci_cleanup_rom(struct pci_dev *dev); | 12 | extern void pci_cleanup_rom(struct pci_dev *dev); |
13 | #ifdef HAVE_PCI_MMAP | ||
14 | extern int pci_mmap_fits(struct pci_dev *pdev, int resno, | ||
15 | struct vm_area_struct *vma); | ||
16 | #endif | ||
13 | 17 | ||
14 | /** | 18 | /** |
15 | * Firmware PM callbacks | 19 | * Firmware PM callbacks |
@@ -40,7 +44,11 @@ struct pci_platform_pm_ops { | |||
40 | }; | 44 | }; |
41 | 45 | ||
42 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); | 46 | extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); |
47 | extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); | ||
48 | extern void pci_disable_enabled_device(struct pci_dev *dev); | ||
43 | extern void pci_pm_init(struct pci_dev *dev); | 49 | extern void pci_pm_init(struct pci_dev *dev); |
50 | extern void platform_pci_wakeup_init(struct pci_dev *dev); | ||
51 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); | ||
44 | 52 | ||
45 | extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); | 53 | extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); |
46 | extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); | 54 | extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); |
@@ -50,14 +58,14 @@ extern int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val); | |||
50 | extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val); | 58 | extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val); |
51 | 59 | ||
52 | struct pci_vpd_ops { | 60 | struct pci_vpd_ops { |
53 | int (*read)(struct pci_dev *dev, int pos, int size, char *buf); | 61 | ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); |
54 | int (*write)(struct pci_dev *dev, int pos, int size, const char *buf); | 62 | ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); |
55 | void (*release)(struct pci_dev *dev); | 63 | void (*release)(struct pci_dev *dev); |
56 | }; | 64 | }; |
57 | 65 | ||
58 | struct pci_vpd { | 66 | struct pci_vpd { |
59 | unsigned int len; | 67 | unsigned int len; |
60 | struct pci_vpd_ops *ops; | 68 | const struct pci_vpd_ops *ops; |
61 | struct bin_attribute *attr; /* descriptor for sysfs VPD entry */ | 69 | struct bin_attribute *attr; /* descriptor for sysfs VPD entry */ |
62 | }; | 70 | }; |
63 | 71 | ||
@@ -98,11 +106,9 @@ extern unsigned int pci_pm_d3_delay; | |||
98 | #ifdef CONFIG_PCI_MSI | 106 | #ifdef CONFIG_PCI_MSI |
99 | void pci_no_msi(void); | 107 | void pci_no_msi(void); |
100 | extern void pci_msi_init_pci_dev(struct pci_dev *dev); | 108 | extern void pci_msi_init_pci_dev(struct pci_dev *dev); |
101 | extern void __devinit msi_init(void); | ||
102 | #else | 109 | #else |
103 | static inline void pci_no_msi(void) { } | 110 | static inline void pci_no_msi(void) { } |
104 | static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } | 111 | static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } |
105 | static inline void msi_init(void) { } | ||
106 | #endif | 112 | #endif |
107 | 113 | ||
108 | #ifdef CONFIG_PCIEAER | 114 | #ifdef CONFIG_PCIEAER |
@@ -159,16 +165,28 @@ struct pci_slot_attribute { | |||
159 | }; | 165 | }; |
160 | #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) | 166 | #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) |
161 | 167 | ||
168 | enum pci_bar_type { | ||
169 | pci_bar_unknown, /* Standard PCI BAR probe */ | ||
170 | pci_bar_io, /* An io port BAR */ | ||
171 | pci_bar_mem32, /* A 32-bit memory BAR */ | ||
172 | pci_bar_mem64, /* A 64-bit memory BAR */ | ||
173 | }; | ||
174 | |||
175 | extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | ||
176 | struct resource *res, unsigned int reg); | ||
177 | extern int pci_resource_bar(struct pci_dev *dev, int resno, | ||
178 | enum pci_bar_type *type); | ||
179 | extern int pci_bus_add_child(struct pci_bus *bus); | ||
162 | extern void pci_enable_ari(struct pci_dev *dev); | 180 | extern void pci_enable_ari(struct pci_dev *dev); |
163 | /** | 181 | /** |
164 | * pci_ari_enabled - query ARI forwarding status | 182 | * pci_ari_enabled - query ARI forwarding status |
165 | * @dev: the PCI device | 183 | * @bus: the PCI bus |
166 | * | 184 | * |
167 | * Returns 1 if ARI forwarding is enabled, or 0 if not enabled; | 185 | * Returns 1 if ARI forwarding is enabled, or 0 if not enabled; |
168 | */ | 186 | */ |
169 | static inline int pci_ari_enabled(struct pci_dev *dev) | 187 | static inline int pci_ari_enabled(struct pci_bus *bus) |
170 | { | 188 | { |
171 | return dev->ari_enabled; | 189 | return bus->self && bus->self->ari_enabled; |
172 | } | 190 | } |
173 | 191 | ||
174 | #endif /* DRIVERS_PCI_H */ | 192 | #endif /* DRIVERS_PCI_H */ |
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 6dd7b13e9808..ebce26c37049 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
@@ -38,7 +38,6 @@ int aer_osc_setup(struct pcie_device *pciedev) | |||
38 | 38 | ||
39 | handle = acpi_find_root_bridge_handle(pdev); | 39 | handle = acpi_find_root_bridge_handle(pdev); |
40 | if (handle) { | 40 | if (handle) { |
41 | pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT); | ||
42 | status = pci_osc_control_set(handle, | 41 | status = pci_osc_control_set(handle, |
43 | OSC_PCI_EXPRESS_AER_CONTROL | | 42 | OSC_PCI_EXPRESS_AER_CONTROL | |
44 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | 43 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); |
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 3933d4f30e8c..0fc29ae80df8 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c | |||
@@ -233,7 +233,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | |||
233 | 233 | ||
234 | if (info->flags & AER_TLP_HEADER_VALID_FLAG) { | 234 | if (info->flags & AER_TLP_HEADER_VALID_FLAG) { |
235 | unsigned char *tlp = (unsigned char *) &info->tlp; | 235 | unsigned char *tlp = (unsigned char *) &info->tlp; |
236 | printk("%sTLB Header:\n", loglevel); | 236 | printk("%sTLP Header:\n", loglevel); |
237 | printk("%s%02x%02x%02x%02x %02x%02x%02x%02x" | 237 | printk("%s%02x%02x%02x%02x %02x%02x%02x%02x" |
238 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | 238 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", |
239 | loglevel, | 239 | loglevel, |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 9aad608bcf3f..586b6f75910d 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
20 | #include <linux/delay.h> | ||
20 | #include <linux/pci-aspm.h> | 21 | #include <linux/pci-aspm.h> |
21 | #include "../pci.h" | 22 | #include "../pci.h" |
22 | 23 | ||
@@ -33,6 +34,11 @@ struct endpoint_state { | |||
33 | struct pcie_link_state { | 34 | struct pcie_link_state { |
34 | struct list_head sibiling; | 35 | struct list_head sibiling; |
35 | struct pci_dev *pdev; | 36 | struct pci_dev *pdev; |
37 | bool downstream_has_switch; | ||
38 | |||
39 | struct pcie_link_state *parent; | ||
40 | struct list_head children; | ||
41 | struct list_head link; | ||
36 | 42 | ||
37 | /* ASPM state */ | 43 | /* ASPM state */ |
38 | unsigned int support_state; | 44 | unsigned int support_state; |
@@ -70,6 +76,8 @@ static const char *policy_str[] = { | |||
70 | [POLICY_POWERSAVE] = "powersave" | 76 | [POLICY_POWERSAVE] = "powersave" |
71 | }; | 77 | }; |
72 | 78 | ||
79 | #define LINK_RETRAIN_TIMEOUT HZ | ||
80 | |||
73 | static int policy_to_aspm_state(struct pci_dev *pdev) | 81 | static int policy_to_aspm_state(struct pci_dev *pdev) |
74 | { | 82 | { |
75 | struct pcie_link_state *link_state = pdev->link_state; | 83 | struct pcie_link_state *link_state = pdev->link_state; |
@@ -125,7 +133,7 @@ static void pcie_set_clock_pm(struct pci_dev *pdev, int enable) | |||
125 | link_state->clk_pm_enabled = !!enable; | 133 | link_state->clk_pm_enabled = !!enable; |
126 | } | 134 | } |
127 | 135 | ||
128 | static void pcie_check_clock_pm(struct pci_dev *pdev) | 136 | static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist) |
129 | { | 137 | { |
130 | int pos; | 138 | int pos; |
131 | u32 reg32; | 139 | u32 reg32; |
@@ -149,10 +157,26 @@ static void pcie_check_clock_pm(struct pci_dev *pdev) | |||
149 | if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) | 157 | if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) |
150 | enabled = 0; | 158 | enabled = 0; |
151 | } | 159 | } |
152 | link_state->clk_pm_capable = capable; | ||
153 | link_state->clk_pm_enabled = enabled; | 160 | link_state->clk_pm_enabled = enabled; |
154 | link_state->bios_clk_state = enabled; | 161 | link_state->bios_clk_state = enabled; |
155 | pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev)); | 162 | if (!blacklist) { |
163 | link_state->clk_pm_capable = capable; | ||
164 | pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev)); | ||
165 | } else { | ||
166 | link_state->clk_pm_capable = 0; | ||
167 | pcie_set_clock_pm(pdev, 0); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev) | ||
172 | { | ||
173 | struct pci_dev *child_dev; | ||
174 | |||
175 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | ||
176 | if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) | ||
177 | return true; | ||
178 | } | ||
179 | return false; | ||
156 | } | 180 | } |
157 | 181 | ||
158 | /* | 182 | /* |
@@ -217,16 +241,18 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) | |||
217 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); | 241 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); |
218 | 242 | ||
219 | /* Wait for link training end */ | 243 | /* Wait for link training end */ |
220 | /* break out after waiting for 1 second */ | 244 | /* break out after waiting for timeout */ |
221 | start_jiffies = jiffies; | 245 | start_jiffies = jiffies; |
222 | while ((jiffies - start_jiffies) < HZ) { | 246 | for (;;) { |
223 | pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16); | 247 | pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16); |
224 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) | 248 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) |
225 | break; | 249 | break; |
226 | cpu_relax(); | 250 | if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) |
251 | break; | ||
252 | msleep(1); | ||
227 | } | 253 | } |
228 | /* training failed -> recover */ | 254 | /* training failed -> recover */ |
229 | if ((jiffies - start_jiffies) >= HZ) { | 255 | if (reg16 & PCI_EXP_LNKSTA_LT) { |
230 | dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure" | 256 | dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure" |
231 | " common clock\n"); | 257 | " common clock\n"); |
232 | i = 0; | 258 | i = 0; |
@@ -419,9 +445,9 @@ static unsigned int pcie_aspm_check_state(struct pci_dev *pdev, | |||
419 | { | 445 | { |
420 | struct pci_dev *child_dev; | 446 | struct pci_dev *child_dev; |
421 | 447 | ||
422 | /* If no child, disable the link */ | 448 | /* If no child, ignore the link */ |
423 | if (list_empty(&pdev->subordinate->devices)) | 449 | if (list_empty(&pdev->subordinate->devices)) |
424 | return 0; | 450 | return state; |
425 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | 451 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { |
426 | if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { | 452 | if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { |
427 | /* | 453 | /* |
@@ -462,6 +488,9 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state) | |||
462 | int valid = 1; | 488 | int valid = 1; |
463 | struct pcie_link_state *link_state = pdev->link_state; | 489 | struct pcie_link_state *link_state = pdev->link_state; |
464 | 490 | ||
491 | /* If no child, disable the link */ | ||
492 | if (list_empty(&pdev->subordinate->devices)) | ||
493 | state = 0; | ||
465 | /* | 494 | /* |
466 | * if the downstream component has pci bridge function, don't do ASPM | 495 | * if the downstream component has pci bridge function, don't do ASPM |
467 | * now | 496 | * now |
@@ -493,20 +522,52 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state) | |||
493 | link_state->enabled_state = state; | 522 | link_state->enabled_state = state; |
494 | } | 523 | } |
495 | 524 | ||
525 | static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link) | ||
526 | { | ||
527 | struct pcie_link_state *root_port_link = link; | ||
528 | while (root_port_link->parent) | ||
529 | root_port_link = root_port_link->parent; | ||
530 | return root_port_link; | ||
531 | } | ||
532 | |||
533 | /* check the whole hierarchy, and configure each link in the hierarchy */ | ||
496 | static void __pcie_aspm_configure_link_state(struct pci_dev *pdev, | 534 | static void __pcie_aspm_configure_link_state(struct pci_dev *pdev, |
497 | unsigned int state) | 535 | unsigned int state) |
498 | { | 536 | { |
499 | struct pcie_link_state *link_state = pdev->link_state; | 537 | struct pcie_link_state *link_state = pdev->link_state; |
538 | struct pcie_link_state *root_port_link = get_root_port_link(link_state); | ||
539 | struct pcie_link_state *leaf; | ||
500 | 540 | ||
501 | if (link_state->support_state == 0) | ||
502 | return; | ||
503 | state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; | 541 | state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; |
504 | 542 | ||
505 | /* state 0 means disabling aspm */ | 543 | /* check all links who have specific root port link */ |
506 | state = pcie_aspm_check_state(pdev, state); | 544 | list_for_each_entry(leaf, &link_list, sibiling) { |
545 | if (!list_empty(&leaf->children) || | ||
546 | get_root_port_link(leaf) != root_port_link) | ||
547 | continue; | ||
548 | state = pcie_aspm_check_state(leaf->pdev, state); | ||
549 | } | ||
550 | /* check root port link too in case it hasn't children */ | ||
551 | state = pcie_aspm_check_state(root_port_link->pdev, state); | ||
552 | |||
507 | if (link_state->enabled_state == state) | 553 | if (link_state->enabled_state == state) |
508 | return; | 554 | return; |
509 | __pcie_aspm_config_link(pdev, state); | 555 | |
556 | /* | ||
557 | * we must change the hierarchy. See comments in | ||
558 | * __pcie_aspm_config_link for the order | ||
559 | **/ | ||
560 | if (state & PCIE_LINK_STATE_L1) { | ||
561 | list_for_each_entry(leaf, &link_list, sibiling) { | ||
562 | if (get_root_port_link(leaf) == root_port_link) | ||
563 | __pcie_aspm_config_link(leaf->pdev, state); | ||
564 | } | ||
565 | } else { | ||
566 | list_for_each_entry_reverse(leaf, &link_list, sibiling) { | ||
567 | if (get_root_port_link(leaf) == root_port_link) | ||
568 | __pcie_aspm_config_link(leaf->pdev, state); | ||
569 | } | ||
570 | } | ||
510 | } | 571 | } |
511 | 572 | ||
512 | /* | 573 | /* |
@@ -570,6 +631,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) | |||
570 | unsigned int state; | 631 | unsigned int state; |
571 | struct pcie_link_state *link_state; | 632 | struct pcie_link_state *link_state; |
572 | int error = 0; | 633 | int error = 0; |
634 | int blacklist; | ||
573 | 635 | ||
574 | if (aspm_disabled || !pdev->is_pcie || pdev->link_state) | 636 | if (aspm_disabled || !pdev->is_pcie || pdev->link_state) |
575 | return; | 637 | return; |
@@ -580,29 +642,58 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) | |||
580 | if (list_empty(&pdev->subordinate->devices)) | 642 | if (list_empty(&pdev->subordinate->devices)) |
581 | goto out; | 643 | goto out; |
582 | 644 | ||
583 | if (pcie_aspm_sanity_check(pdev)) | 645 | blacklist = !!pcie_aspm_sanity_check(pdev); |
584 | goto out; | ||
585 | 646 | ||
586 | mutex_lock(&aspm_lock); | 647 | mutex_lock(&aspm_lock); |
587 | 648 | ||
588 | link_state = kzalloc(sizeof(*link_state), GFP_KERNEL); | 649 | link_state = kzalloc(sizeof(*link_state), GFP_KERNEL); |
589 | if (!link_state) | 650 | if (!link_state) |
590 | goto unlock_out; | 651 | goto unlock_out; |
591 | pdev->link_state = link_state; | ||
592 | 652 | ||
593 | pcie_aspm_configure_common_clock(pdev); | 653 | link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev); |
654 | INIT_LIST_HEAD(&link_state->children); | ||
655 | INIT_LIST_HEAD(&link_state->link); | ||
656 | if (pdev->bus->self) {/* this is a switch */ | ||
657 | struct pcie_link_state *parent_link_state; | ||
594 | 658 | ||
595 | pcie_aspm_cap_init(pdev); | 659 | parent_link_state = pdev->bus->parent->self->link_state; |
660 | if (!parent_link_state) { | ||
661 | kfree(link_state); | ||
662 | goto unlock_out; | ||
663 | } | ||
664 | list_add(&link_state->link, &parent_link_state->children); | ||
665 | link_state->parent = parent_link_state; | ||
666 | } | ||
596 | 667 | ||
597 | /* config link state to avoid BIOS error */ | 668 | pdev->link_state = link_state; |
598 | state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev)); | ||
599 | __pcie_aspm_config_link(pdev, state); | ||
600 | 669 | ||
601 | pcie_check_clock_pm(pdev); | 670 | if (!blacklist) { |
671 | pcie_aspm_configure_common_clock(pdev); | ||
672 | pcie_aspm_cap_init(pdev); | ||
673 | } else { | ||
674 | link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; | ||
675 | link_state->bios_aspm_state = 0; | ||
676 | /* Set support state to 0, so we will disable ASPM later */ | ||
677 | link_state->support_state = 0; | ||
678 | } | ||
602 | 679 | ||
603 | link_state->pdev = pdev; | 680 | link_state->pdev = pdev; |
604 | list_add(&link_state->sibiling, &link_list); | 681 | list_add(&link_state->sibiling, &link_list); |
605 | 682 | ||
683 | if (link_state->downstream_has_switch) { | ||
684 | /* | ||
685 | * If link has switch, delay the link config. The leaf link | ||
686 | * initialization will config the whole hierarchy. but we must | ||
687 | * make sure BIOS doesn't set unsupported link state | ||
688 | **/ | ||
689 | state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state); | ||
690 | __pcie_aspm_config_link(pdev, state); | ||
691 | } else | ||
692 | __pcie_aspm_configure_link_state(pdev, | ||
693 | policy_to_aspm_state(pdev)); | ||
694 | |||
695 | pcie_check_clock_pm(pdev, blacklist); | ||
696 | |||
606 | unlock_out: | 697 | unlock_out: |
607 | if (error) | 698 | if (error) |
608 | free_link_state(pdev); | 699 | free_link_state(pdev); |
@@ -635,6 +726,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) | |||
635 | /* All functions are removed, so just disable ASPM for the link */ | 726 | /* All functions are removed, so just disable ASPM for the link */ |
636 | __pcie_aspm_config_one_dev(parent, 0); | 727 | __pcie_aspm_config_one_dev(parent, 0); |
637 | list_del(&link_state->sibiling); | 728 | list_del(&link_state->sibiling); |
729 | list_del(&link_state->link); | ||
638 | /* Clock PM is for endpoint device */ | 730 | /* Clock PM is for endpoint device */ |
639 | 731 | ||
640 | free_link_state(parent); | 732 | free_link_state(parent); |
@@ -857,24 +949,15 @@ void pcie_no_aspm(void) | |||
857 | aspm_disabled = 1; | 949 | aspm_disabled = 1; |
858 | } | 950 | } |
859 | 951 | ||
860 | #ifdef CONFIG_ACPI | 952 | /** |
861 | #include <acpi/acpi_bus.h> | 953 | * pcie_aspm_enabled - is PCIe ASPM enabled? |
862 | #include <linux/pci-acpi.h> | 954 | * |
863 | static void pcie_aspm_platform_init(void) | 955 | * Returns true if ASPM has not been disabled by the command-line option |
864 | { | 956 | * pcie_aspm=off. |
865 | pcie_osc_support_set(OSC_ACTIVE_STATE_PWR_SUPPORT| | 957 | **/ |
866 | OSC_CLOCK_PWR_CAPABILITY_SUPPORT); | 958 | int pcie_aspm_enabled(void) |
867 | } | ||
868 | #else | ||
869 | static inline void pcie_aspm_platform_init(void) { } | ||
870 | #endif | ||
871 | |||
872 | static int __init pcie_aspm_init(void) | ||
873 | { | 959 | { |
874 | if (aspm_disabled) | 960 | return !aspm_disabled; |
875 | return 0; | ||
876 | pcie_aspm_platform_init(); | ||
877 | return 0; | ||
878 | } | 961 | } |
962 | EXPORT_SYMBOL(pcie_aspm_enabled); | ||
879 | 963 | ||
880 | fs_initcall(pcie_aspm_init); | ||
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c index 359fe5568df1..eec89b767f9f 100644 --- a/drivers/pci/pcie/portdrv_bus.c +++ b/drivers/pci/pcie/portdrv_bus.c | |||
@@ -16,14 +16,10 @@ | |||
16 | #include "portdrv.h" | 16 | #include "portdrv.h" |
17 | 17 | ||
18 | static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); | 18 | static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); |
19 | static int pcie_port_bus_suspend(struct device *dev, pm_message_t state); | ||
20 | static int pcie_port_bus_resume(struct device *dev); | ||
21 | 19 | ||
22 | struct bus_type pcie_port_bus_type = { | 20 | struct bus_type pcie_port_bus_type = { |
23 | .name = "pci_express", | 21 | .name = "pci_express", |
24 | .match = pcie_port_bus_match, | 22 | .match = pcie_port_bus_match, |
25 | .suspend = pcie_port_bus_suspend, | ||
26 | .resume = pcie_port_bus_resume, | ||
27 | }; | 23 | }; |
28 | EXPORT_SYMBOL_GPL(pcie_port_bus_type); | 24 | EXPORT_SYMBOL_GPL(pcie_port_bus_type); |
29 | 25 | ||
@@ -49,32 +45,12 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) | |||
49 | return 1; | 45 | return 1; |
50 | } | 46 | } |
51 | 47 | ||
52 | static int pcie_port_bus_suspend(struct device *dev, pm_message_t state) | 48 | int pcie_port_bus_register(void) |
53 | { | 49 | { |
54 | struct pcie_device *pciedev; | 50 | return bus_register(&pcie_port_bus_type); |
55 | struct pcie_port_service_driver *driver; | ||
56 | |||
57 | if (!dev || !dev->driver) | ||
58 | return 0; | ||
59 | |||
60 | pciedev = to_pcie_device(dev); | ||
61 | driver = to_service_driver(dev->driver); | ||
62 | if (driver && driver->suspend) | ||
63 | driver->suspend(pciedev, state); | ||
64 | return 0; | ||
65 | } | 51 | } |
66 | 52 | ||
67 | static int pcie_port_bus_resume(struct device *dev) | 53 | void pcie_port_bus_unregister(void) |
68 | { | 54 | { |
69 | struct pcie_device *pciedev; | 55 | bus_unregister(&pcie_port_bus_type); |
70 | struct pcie_port_service_driver *driver; | ||
71 | |||
72 | if (!dev || !dev->driver) | ||
73 | return 0; | ||
74 | |||
75 | pciedev = to_pcie_device(dev); | ||
76 | driver = to_service_driver(dev->driver); | ||
77 | if (driver && driver->resume) | ||
78 | driver->resume(pciedev); | ||
79 | return 0; | ||
80 | } | 56 | } |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 2e091e014829..8b3f8c18032f 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -19,91 +19,15 @@ | |||
19 | 19 | ||
20 | extern int pcie_mch_quirk; /* MSI-quirk Indicator */ | 20 | extern int pcie_mch_quirk; /* MSI-quirk Indicator */ |
21 | 21 | ||
22 | static int pcie_port_probe_service(struct device *dev) | 22 | /** |
23 | { | 23 | * release_pcie_device - free PCI Express port service device structure |
24 | struct pcie_device *pciedev; | 24 | * @dev: Port service device to release |
25 | struct pcie_port_service_driver *driver; | 25 | * |
26 | int status; | 26 | * Invoked automatically when device is being removed in response to |
27 | 27 | * device_unregister(dev). Release all resources being claimed. | |
28 | if (!dev || !dev->driver) | ||
29 | return -ENODEV; | ||
30 | |||
31 | driver = to_service_driver(dev->driver); | ||
32 | if (!driver || !driver->probe) | ||
33 | return -ENODEV; | ||
34 | |||
35 | pciedev = to_pcie_device(dev); | ||
36 | status = driver->probe(pciedev, driver->id_table); | ||
37 | if (!status) { | ||
38 | dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", | ||
39 | driver->name); | ||
40 | get_device(dev); | ||
41 | } | ||
42 | return status; | ||
43 | } | ||
44 | |||
45 | static int pcie_port_remove_service(struct device *dev) | ||
46 | { | ||
47 | struct pcie_device *pciedev; | ||
48 | struct pcie_port_service_driver *driver; | ||
49 | |||
50 | if (!dev || !dev->driver) | ||
51 | return 0; | ||
52 | |||
53 | pciedev = to_pcie_device(dev); | ||
54 | driver = to_service_driver(dev->driver); | ||
55 | if (driver && driver->remove) { | ||
56 | dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", | ||
57 | driver->name); | ||
58 | driver->remove(pciedev); | ||
59 | put_device(dev); | ||
60 | } | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static void pcie_port_shutdown_service(struct device *dev) {} | ||
65 | |||
66 | static int pcie_port_suspend_service(struct device *dev, pm_message_t state) | ||
67 | { | ||
68 | struct pcie_device *pciedev; | ||
69 | struct pcie_port_service_driver *driver; | ||
70 | |||
71 | if (!dev || !dev->driver) | ||
72 | return 0; | ||
73 | |||
74 | pciedev = to_pcie_device(dev); | ||
75 | driver = to_service_driver(dev->driver); | ||
76 | if (driver && driver->suspend) | ||
77 | driver->suspend(pciedev, state); | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static int pcie_port_resume_service(struct device *dev) | ||
82 | { | ||
83 | struct pcie_device *pciedev; | ||
84 | struct pcie_port_service_driver *driver; | ||
85 | |||
86 | if (!dev || !dev->driver) | ||
87 | return 0; | ||
88 | |||
89 | pciedev = to_pcie_device(dev); | ||
90 | driver = to_service_driver(dev->driver); | ||
91 | |||
92 | if (driver && driver->resume) | ||
93 | driver->resume(pciedev); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * release_pcie_device | ||
99 | * | ||
100 | * Being invoked automatically when device is being removed | ||
101 | * in response to device_unregister(dev) call. | ||
102 | * Release all resources being claimed. | ||
103 | */ | 28 | */ |
104 | static void release_pcie_device(struct device *dev) | 29 | static void release_pcie_device(struct device *dev) |
105 | { | 30 | { |
106 | dev_printk(KERN_DEBUG, dev, "free port service\n"); | ||
107 | kfree(to_pcie_device(dev)); | 31 | kfree(to_pcie_device(dev)); |
108 | } | 32 | } |
109 | 33 | ||
@@ -128,7 +52,16 @@ static int is_msi_quirked(struct pci_dev *dev) | |||
128 | } | 52 | } |
129 | return quirk; | 53 | return quirk; |
130 | } | 54 | } |
131 | 55 | ||
56 | /** | ||
57 | * assign_interrupt_mode - choose interrupt mode for PCI Express port services | ||
58 | * (INTx, MSI-X, MSI) and set up vectors | ||
59 | * @dev: PCI Express port to handle | ||
60 | * @vectors: Array of interrupt vectors to populate | ||
61 | * @mask: Bitmask of port capabilities returned by get_port_device_capability() | ||
62 | * | ||
63 | * Return value: Interrupt mode associated with the port | ||
64 | */ | ||
132 | static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) | 65 | static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) |
133 | { | 66 | { |
134 | int i, pos, nvec, status = -EINVAL; | 67 | int i, pos, nvec, status = -EINVAL; |
@@ -150,7 +83,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) | |||
150 | if (pos) { | 83 | if (pos) { |
151 | struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = | 84 | struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = |
152 | {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; | 85 | {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; |
153 | dev_info(&dev->dev, "found MSI-X capability\n"); | ||
154 | status = pci_enable_msix(dev, msix_entries, nvec); | 86 | status = pci_enable_msix(dev, msix_entries, nvec); |
155 | if (!status) { | 87 | if (!status) { |
156 | int j = 0; | 88 | int j = 0; |
@@ -165,7 +97,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) | |||
165 | if (status) { | 97 | if (status) { |
166 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 98 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
167 | if (pos) { | 99 | if (pos) { |
168 | dev_info(&dev->dev, "found MSI capability\n"); | ||
169 | status = pci_enable_msi(dev); | 100 | status = pci_enable_msi(dev); |
170 | if (!status) { | 101 | if (!status) { |
171 | interrupt_mode = PCIE_PORT_MSI_MODE; | 102 | interrupt_mode = PCIE_PORT_MSI_MODE; |
@@ -177,6 +108,16 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) | |||
177 | return interrupt_mode; | 108 | return interrupt_mode; |
178 | } | 109 | } |
179 | 110 | ||
111 | /** | ||
112 | * get_port_device_capability - discover capabilities of a PCI Express port | ||
113 | * @dev: PCI Express port to examine | ||
114 | * | ||
115 | * The capabilities are read from the port's PCI Express configuration registers | ||
116 | * as described in PCI Express Base Specification 1.0a sections 7.8.2, 7.8.9 and | ||
117 | * 7.9 - 7.11. | ||
118 | * | ||
119 | * Return value: Bitmask of discovered port capabilities | ||
120 | */ | ||
180 | static int get_port_device_capability(struct pci_dev *dev) | 121 | static int get_port_device_capability(struct pci_dev *dev) |
181 | { | 122 | { |
182 | int services = 0, pos; | 123 | int services = 0, pos; |
@@ -204,6 +145,15 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
204 | return services; | 145 | return services; |
205 | } | 146 | } |
206 | 147 | ||
148 | /** | ||
149 | * pcie_device_init - initialize PCI Express port service device | ||
150 | * @dev: Port service device to initialize | ||
151 | * @parent: PCI Express port to associate the service device with | ||
152 | * @port_type: Type of the port | ||
153 | * @service_type: Type of service to associate with the service device | ||
154 | * @irq: Interrupt vector to associate with the service device | ||
155 | * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI) | ||
156 | */ | ||
207 | static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, | 157 | static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, |
208 | int port_type, int service_type, int irq, int irq_mode) | 158 | int port_type, int service_type, int irq, int irq_mode) |
209 | { | 159 | { |
@@ -224,11 +174,19 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, | |||
224 | device->driver = NULL; | 174 | device->driver = NULL; |
225 | device->driver_data = NULL; | 175 | device->driver_data = NULL; |
226 | device->release = release_pcie_device; /* callback to free pcie dev */ | 176 | device->release = release_pcie_device; /* callback to free pcie dev */ |
227 | snprintf(device->bus_id, sizeof(device->bus_id), "%s:pcie%02x", | 177 | dev_set_name(device, "%s:pcie%02x", |
228 | pci_name(parent), get_descriptor_id(port_type, service_type)); | 178 | pci_name(parent), get_descriptor_id(port_type, service_type)); |
229 | device->parent = &parent->dev; | 179 | device->parent = &parent->dev; |
230 | } | 180 | } |
231 | 181 | ||
182 | /** | ||
183 | * alloc_pcie_device - allocate PCI Express port service device structure | ||
184 | * @parent: PCI Express port to associate the service device with | ||
185 | * @port_type: Type of the port | ||
186 | * @service_type: Type of service to associate with the service device | ||
187 | * @irq: Interrupt vector to associate with the service device | ||
188 | * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI) | ||
189 | */ | ||
232 | static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, | 190 | static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, |
233 | int port_type, int service_type, int irq, int irq_mode) | 191 | int port_type, int service_type, int irq, int irq_mode) |
234 | { | 192 | { |
@@ -239,10 +197,13 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, | |||
239 | return NULL; | 197 | return NULL; |
240 | 198 | ||
241 | pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); | 199 | pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); |
242 | dev_printk(KERN_DEBUG, &device->device, "allocate port service\n"); | ||
243 | return device; | 200 | return device; |
244 | } | 201 | } |
245 | 202 | ||
203 | /** | ||
204 | * pcie_port_device_probe - check if device is a PCI Express port | ||
205 | * @dev: Device to check | ||
206 | */ | ||
246 | int pcie_port_device_probe(struct pci_dev *dev) | 207 | int pcie_port_device_probe(struct pci_dev *dev) |
247 | { | 208 | { |
248 | int pos, type; | 209 | int pos, type; |
@@ -260,6 +221,13 @@ int pcie_port_device_probe(struct pci_dev *dev) | |||
260 | return -ENODEV; | 221 | return -ENODEV; |
261 | } | 222 | } |
262 | 223 | ||
224 | /** | ||
225 | * pcie_port_device_register - register PCI Express port | ||
226 | * @dev: PCI Express port to register | ||
227 | * | ||
228 | * Allocate the port extension structure and register services associated with | ||
229 | * the port. | ||
230 | */ | ||
263 | int pcie_port_device_register(struct pci_dev *dev) | 231 | int pcie_port_device_register(struct pci_dev *dev) |
264 | { | 232 | { |
265 | struct pcie_port_device_ext *p_ext; | 233 | struct pcie_port_device_ext *p_ext; |
@@ -323,6 +291,11 @@ static int suspend_iter(struct device *dev, void *data) | |||
323 | return 0; | 291 | return 0; |
324 | } | 292 | } |
325 | 293 | ||
294 | /** | ||
295 | * pcie_port_device_suspend - suspend port services associated with a PCIe port | ||
296 | * @dev: PCI Express port to handle | ||
297 | * @state: Representation of system power management transition in progress | ||
298 | */ | ||
326 | int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state) | 299 | int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state) |
327 | { | 300 | { |
328 | return device_for_each_child(&dev->dev, &state, suspend_iter); | 301 | return device_for_each_child(&dev->dev, &state, suspend_iter); |
@@ -341,6 +314,10 @@ static int resume_iter(struct device *dev, void *data) | |||
341 | return 0; | 314 | return 0; |
342 | } | 315 | } |
343 | 316 | ||
317 | /** | ||
318 | * pcie_port_device_suspend - resume port services associated with a PCIe port | ||
319 | * @dev: PCI Express port to handle | ||
320 | */ | ||
344 | int pcie_port_device_resume(struct pci_dev *dev) | 321 | int pcie_port_device_resume(struct pci_dev *dev) |
345 | { | 322 | { |
346 | return device_for_each_child(&dev->dev, NULL, resume_iter); | 323 | return device_for_each_child(&dev->dev, NULL, resume_iter); |
@@ -363,6 +340,13 @@ static int remove_iter(struct device *dev, void *data) | |||
363 | return 0; | 340 | return 0; |
364 | } | 341 | } |
365 | 342 | ||
343 | /** | ||
344 | * pcie_port_device_remove - unregister PCI Express port service devices | ||
345 | * @dev: PCI Express port the service devices to unregister are associated with | ||
346 | * | ||
347 | * Remove PCI Express port service devices associated with given port and | ||
348 | * disable MSI-X or MSI for the port. | ||
349 | */ | ||
366 | void pcie_port_device_remove(struct pci_dev *dev) | 350 | void pcie_port_device_remove(struct pci_dev *dev) |
367 | { | 351 | { |
368 | struct device *device; | 352 | struct device *device; |
@@ -386,16 +370,80 @@ void pcie_port_device_remove(struct pci_dev *dev) | |||
386 | pci_disable_msi(dev); | 370 | pci_disable_msi(dev); |
387 | } | 371 | } |
388 | 372 | ||
389 | int pcie_port_bus_register(void) | 373 | /** |
374 | * pcie_port_probe_service - probe driver for given PCI Express port service | ||
375 | * @dev: PCI Express port service device to probe against | ||
376 | * | ||
377 | * If PCI Express port service driver is registered with | ||
378 | * pcie_port_service_register(), this function will be called by the driver core | ||
379 | * whenever match is found between the driver and a port service device. | ||
380 | */ | ||
381 | static int pcie_port_probe_service(struct device *dev) | ||
390 | { | 382 | { |
391 | return bus_register(&pcie_port_bus_type); | 383 | struct pcie_device *pciedev; |
384 | struct pcie_port_service_driver *driver; | ||
385 | int status; | ||
386 | |||
387 | if (!dev || !dev->driver) | ||
388 | return -ENODEV; | ||
389 | |||
390 | driver = to_service_driver(dev->driver); | ||
391 | if (!driver || !driver->probe) | ||
392 | return -ENODEV; | ||
393 | |||
394 | pciedev = to_pcie_device(dev); | ||
395 | status = driver->probe(pciedev, driver->id_table); | ||
396 | if (!status) { | ||
397 | dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", | ||
398 | driver->name); | ||
399 | get_device(dev); | ||
400 | } | ||
401 | return status; | ||
392 | } | 402 | } |
393 | 403 | ||
394 | void pcie_port_bus_unregister(void) | 404 | /** |
405 | * pcie_port_remove_service - detach driver from given PCI Express port service | ||
406 | * @dev: PCI Express port service device to handle | ||
407 | * | ||
408 | * If PCI Express port service driver is registered with | ||
409 | * pcie_port_service_register(), this function will be called by the driver core | ||
410 | * when device_unregister() is called for the port service device associated | ||
411 | * with the driver. | ||
412 | */ | ||
413 | static int pcie_port_remove_service(struct device *dev) | ||
395 | { | 414 | { |
396 | bus_unregister(&pcie_port_bus_type); | 415 | struct pcie_device *pciedev; |
416 | struct pcie_port_service_driver *driver; | ||
417 | |||
418 | if (!dev || !dev->driver) | ||
419 | return 0; | ||
420 | |||
421 | pciedev = to_pcie_device(dev); | ||
422 | driver = to_service_driver(dev->driver); | ||
423 | if (driver && driver->remove) { | ||
424 | dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", | ||
425 | driver->name); | ||
426 | driver->remove(pciedev); | ||
427 | put_device(dev); | ||
428 | } | ||
429 | return 0; | ||
397 | } | 430 | } |
398 | 431 | ||
432 | /** | ||
433 | * pcie_port_shutdown_service - shut down given PCI Express port service | ||
434 | * @dev: PCI Express port service device to handle | ||
435 | * | ||
436 | * If PCI Express port service driver is registered with | ||
437 | * pcie_port_service_register(), this function will be called by the driver core | ||
438 | * when device_shutdown() is called for the port service device associated | ||
439 | * with the driver. | ||
440 | */ | ||
441 | static void pcie_port_shutdown_service(struct device *dev) {} | ||
442 | |||
443 | /** | ||
444 | * pcie_port_service_register - register PCI Express port service driver | ||
445 | * @new: PCI Express port service driver to register | ||
446 | */ | ||
399 | int pcie_port_service_register(struct pcie_port_service_driver *new) | 447 | int pcie_port_service_register(struct pcie_port_service_driver *new) |
400 | { | 448 | { |
401 | new->driver.name = (char *)new->name; | 449 | new->driver.name = (char *)new->name; |
@@ -403,15 +451,17 @@ int pcie_port_service_register(struct pcie_port_service_driver *new) | |||
403 | new->driver.probe = pcie_port_probe_service; | 451 | new->driver.probe = pcie_port_probe_service; |
404 | new->driver.remove = pcie_port_remove_service; | 452 | new->driver.remove = pcie_port_remove_service; |
405 | new->driver.shutdown = pcie_port_shutdown_service; | 453 | new->driver.shutdown = pcie_port_shutdown_service; |
406 | new->driver.suspend = pcie_port_suspend_service; | ||
407 | new->driver.resume = pcie_port_resume_service; | ||
408 | 454 | ||
409 | return driver_register(&new->driver); | 455 | return driver_register(&new->driver); |
410 | } | 456 | } |
411 | 457 | ||
412 | void pcie_port_service_unregister(struct pcie_port_service_driver *new) | 458 | /** |
459 | * pcie_port_service_unregister - unregister PCI Express port service driver | ||
460 | * @drv: PCI Express port service driver to unregister | ||
461 | */ | ||
462 | void pcie_port_service_unregister(struct pcie_port_service_driver *drv) | ||
413 | { | 463 | { |
414 | driver_unregister(&new->driver); | 464 | driver_unregister(&drv->driver); |
415 | } | 465 | } |
416 | 466 | ||
417 | EXPORT_SYMBOL(pcie_port_service_register); | 467 | EXPORT_SYMBOL(pcie_port_service_register); |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 584422da8d8b..99a914a027f8 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -41,7 +41,6 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev) | |||
41 | { | 41 | { |
42 | int retval; | 42 | int retval; |
43 | 43 | ||
44 | pci_restore_state(dev); | ||
45 | retval = pci_enable_device(dev); | 44 | retval = pci_enable_device(dev); |
46 | if (retval) | 45 | if (retval) |
47 | return retval; | 46 | return retval; |
@@ -52,11 +51,18 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev) | |||
52 | #ifdef CONFIG_PM | 51 | #ifdef CONFIG_PM |
53 | static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state) | 52 | static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state) |
54 | { | 53 | { |
55 | int ret = pcie_port_device_suspend(dev, state); | 54 | return pcie_port_device_suspend(dev, state); |
56 | 55 | ||
57 | if (!ret) | 56 | } |
58 | ret = pcie_portdrv_save_config(dev); | 57 | |
59 | return ret; | 58 | static int pcie_portdrv_suspend_late(struct pci_dev *dev, pm_message_t state) |
59 | { | ||
60 | return pci_save_state(dev); | ||
61 | } | ||
62 | |||
63 | static int pcie_portdrv_resume_early(struct pci_dev *dev) | ||
64 | { | ||
65 | return pci_restore_state(dev); | ||
60 | } | 66 | } |
61 | 67 | ||
62 | static int pcie_portdrv_resume(struct pci_dev *dev) | 68 | static int pcie_portdrv_resume(struct pci_dev *dev) |
@@ -66,6 +72,8 @@ static int pcie_portdrv_resume(struct pci_dev *dev) | |||
66 | } | 72 | } |
67 | #else | 73 | #else |
68 | #define pcie_portdrv_suspend NULL | 74 | #define pcie_portdrv_suspend NULL |
75 | #define pcie_portdrv_suspend_late NULL | ||
76 | #define pcie_portdrv_resume_early NULL | ||
69 | #define pcie_portdrv_resume NULL | 77 | #define pcie_portdrv_resume NULL |
70 | #endif | 78 | #endif |
71 | 79 | ||
@@ -221,6 +229,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) | |||
221 | 229 | ||
222 | /* If fatal, restore cfg space for possible link reset at upstream */ | 230 | /* If fatal, restore cfg space for possible link reset at upstream */ |
223 | if (dev->error_state == pci_channel_io_frozen) { | 231 | if (dev->error_state == pci_channel_io_frozen) { |
232 | pci_restore_state(dev); | ||
224 | pcie_portdrv_restore_config(dev); | 233 | pcie_portdrv_restore_config(dev); |
225 | pci_enable_pcie_error_reporting(dev); | 234 | pci_enable_pcie_error_reporting(dev); |
226 | } | 235 | } |
@@ -283,6 +292,8 @@ static struct pci_driver pcie_portdriver = { | |||
283 | .remove = pcie_portdrv_remove, | 292 | .remove = pcie_portdrv_remove, |
284 | 293 | ||
285 | .suspend = pcie_portdrv_suspend, | 294 | .suspend = pcie_portdrv_suspend, |
295 | .suspend_late = pcie_portdrv_suspend_late, | ||
296 | .resume_early = pcie_portdrv_resume_early, | ||
286 | .resume = pcie_portdrv_resume, | 297 | .resume = pcie_portdrv_resume, |
287 | 298 | ||
288 | .err_handler = &pcie_portdrv_err_handler, | 299 | .err_handler = &pcie_portdrv_err_handler, |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 003a9b3c293f..303644614eea 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -55,8 +55,8 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev, | |||
55 | 55 | ||
56 | cpumask = pcibus_to_cpumask(to_pci_bus(dev)); | 56 | cpumask = pcibus_to_cpumask(to_pci_bus(dev)); |
57 | ret = type? | 57 | ret = type? |
58 | cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask): | 58 | cpulist_scnprintf(buf, PAGE_SIZE-2, &cpumask) : |
59 | cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask); | 59 | cpumask_scnprintf(buf, PAGE_SIZE-2, &cpumask); |
60 | buf[ret++] = '\n'; | 60 | buf[ret++] = '\n'; |
61 | buf[ret] = '\0'; | 61 | buf[ret] = '\0'; |
62 | return ret; | 62 | return ret; |
@@ -135,13 +135,6 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask) | |||
135 | return size; | 135 | return size; |
136 | } | 136 | } |
137 | 137 | ||
138 | enum pci_bar_type { | ||
139 | pci_bar_unknown, /* Standard PCI BAR probe */ | ||
140 | pci_bar_io, /* An io port BAR */ | ||
141 | pci_bar_mem32, /* A 32-bit memory BAR */ | ||
142 | pci_bar_mem64, /* A 64-bit memory BAR */ | ||
143 | }; | ||
144 | |||
145 | static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) | 138 | static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) |
146 | { | 139 | { |
147 | if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { | 140 | if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { |
@@ -156,11 +149,16 @@ static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) | |||
156 | return pci_bar_mem32; | 149 | return pci_bar_mem32; |
157 | } | 150 | } |
158 | 151 | ||
159 | /* | 152 | /** |
160 | * If the type is not unknown, we assume that the lowest bit is 'enable'. | 153 | * pci_read_base - read a PCI BAR |
161 | * Returns 1 if the BAR was 64-bit and 0 if it was 32-bit. | 154 | * @dev: the PCI device |
155 | * @type: type of the BAR | ||
156 | * @res: resource buffer to be filled in | ||
157 | * @pos: BAR position in the config space | ||
158 | * | ||
159 | * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. | ||
162 | */ | 160 | */ |
163 | static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | 161 | int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
164 | struct resource *res, unsigned int pos) | 162 | struct resource *res, unsigned int pos) |
165 | { | 163 | { |
166 | u32 l, sz, mask; | 164 | u32 l, sz, mask; |
@@ -400,19 +398,17 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, | |||
400 | if (!child) | 398 | if (!child) |
401 | return NULL; | 399 | return NULL; |
402 | 400 | ||
403 | child->self = bridge; | ||
404 | child->parent = parent; | 401 | child->parent = parent; |
405 | child->ops = parent->ops; | 402 | child->ops = parent->ops; |
406 | child->sysdata = parent->sysdata; | 403 | child->sysdata = parent->sysdata; |
407 | child->bus_flags = parent->bus_flags; | 404 | child->bus_flags = parent->bus_flags; |
408 | child->bridge = get_device(&bridge->dev); | ||
409 | 405 | ||
410 | /* initialize some portions of the bus device, but don't register it | 406 | /* initialize some portions of the bus device, but don't register it |
411 | * now as the parent is not properly set up yet. This device will get | 407 | * now as the parent is not properly set up yet. This device will get |
412 | * registered later in pci_bus_add_devices() | 408 | * registered later in pci_bus_add_devices() |
413 | */ | 409 | */ |
414 | child->dev.class = &pcibus_class; | 410 | child->dev.class = &pcibus_class; |
415 | sprintf(child->dev.bus_id, "%04x:%02x", pci_domain_nr(child), busnr); | 411 | dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); |
416 | 412 | ||
417 | /* | 413 | /* |
418 | * Set up the primary, secondary and subordinate | 414 | * Set up the primary, secondary and subordinate |
@@ -422,8 +418,14 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, | |||
422 | child->primary = parent->secondary; | 418 | child->primary = parent->secondary; |
423 | child->subordinate = 0xff; | 419 | child->subordinate = 0xff; |
424 | 420 | ||
421 | if (!bridge) | ||
422 | return child; | ||
423 | |||
424 | child->self = bridge; | ||
425 | child->bridge = get_device(&bridge->dev); | ||
426 | |||
425 | /* Set up default resource pointers and names.. */ | 427 | /* Set up default resource pointers and names.. */ |
426 | for (i = 0; i < 4; i++) { | 428 | for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { |
427 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; | 429 | child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; |
428 | child->resource[i]->name = child->name; | 430 | child->resource[i]->name = child->name; |
429 | } | 431 | } |
@@ -958,8 +960,12 @@ static void pci_init_capabilities(struct pci_dev *dev) | |||
958 | /* MSI/MSI-X list */ | 960 | /* MSI/MSI-X list */ |
959 | pci_msi_init_pci_dev(dev); | 961 | pci_msi_init_pci_dev(dev); |
960 | 962 | ||
963 | /* Buffers for saving PCIe and PCI-X capabilities */ | ||
964 | pci_allocate_cap_save_buffers(dev); | ||
965 | |||
961 | /* Power Management */ | 966 | /* Power Management */ |
962 | pci_pm_init(dev); | 967 | pci_pm_init(dev); |
968 | platform_pci_wakeup_init(dev); | ||
963 | 969 | ||
964 | /* Vital Product Data */ | 970 | /* Vital Product Data */ |
965 | pci_vpd_pci22_init(dev); | 971 | pci_vpd_pci22_init(dev); |
@@ -1130,7 +1136,7 @@ struct pci_bus * pci_create_bus(struct device *parent, | |||
1130 | memset(dev, 0, sizeof(*dev)); | 1136 | memset(dev, 0, sizeof(*dev)); |
1131 | dev->parent = parent; | 1137 | dev->parent = parent; |
1132 | dev->release = pci_release_bus_bridge_dev; | 1138 | dev->release = pci_release_bus_bridge_dev; |
1133 | sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus); | 1139 | dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); |
1134 | error = device_register(dev); | 1140 | error = device_register(dev); |
1135 | if (error) | 1141 | if (error) |
1136 | goto dev_reg_err; | 1142 | goto dev_reg_err; |
@@ -1141,7 +1147,7 @@ struct pci_bus * pci_create_bus(struct device *parent, | |||
1141 | 1147 | ||
1142 | b->dev.class = &pcibus_class; | 1148 | b->dev.class = &pcibus_class; |
1143 | b->dev.parent = b->bridge; | 1149 | b->dev.parent = b->bridge; |
1144 | sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus); | 1150 | dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); |
1145 | error = device_register(&b->dev); | 1151 | error = device_register(&b->dev); |
1146 | if (error) | 1152 | if (error) |
1147 | goto class_dev_reg_err; | 1153 | goto class_dev_reg_err; |
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index e1098c302c45..593bb844b8db 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c | |||
@@ -252,11 +252,20 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) | |||
252 | const struct proc_dir_entry *dp = PDE(inode); | 252 | const struct proc_dir_entry *dp = PDE(inode); |
253 | struct pci_dev *dev = dp->data; | 253 | struct pci_dev *dev = dp->data; |
254 | struct pci_filp_private *fpriv = file->private_data; | 254 | struct pci_filp_private *fpriv = file->private_data; |
255 | int ret; | 255 | int i, ret; |
256 | 256 | ||
257 | if (!capable(CAP_SYS_RAWIO)) | 257 | if (!capable(CAP_SYS_RAWIO)) |
258 | return -EPERM; | 258 | return -EPERM; |
259 | 259 | ||
260 | /* Make sure the caller is mapping a real resource for this device */ | ||
261 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | ||
262 | if (pci_mmap_fits(dev, i, vma)) | ||
263 | break; | ||
264 | } | ||
265 | |||
266 | if (i >= PCI_ROM_RESOURCE) | ||
267 | return -ENODEV; | ||
268 | |||
260 | ret = pci_mmap_page_range(dev, vma, | 269 | ret = pci_mmap_page_range(dev, vma, |
261 | fpriv->mmap_state, | 270 | fpriv->mmap_state, |
262 | fpriv->write_combine); | 271 | fpriv->write_combine); |
@@ -352,15 +361,16 @@ static int show_device(struct seq_file *m, void *v) | |||
352 | dev->vendor, | 361 | dev->vendor, |
353 | dev->device, | 362 | dev->device, |
354 | dev->irq); | 363 | dev->irq); |
355 | /* Here should be 7 and not PCI_NUM_RESOURCES as we need to preserve compatibility */ | 364 | |
356 | for (i=0; i<7; i++) { | 365 | /* only print standard and ROM resources to preserve compatibility */ |
366 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
357 | resource_size_t start, end; | 367 | resource_size_t start, end; |
358 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | 368 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); |
359 | seq_printf(m, "\t%16llx", | 369 | seq_printf(m, "\t%16llx", |
360 | (unsigned long long)(start | | 370 | (unsigned long long)(start | |
361 | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); | 371 | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); |
362 | } | 372 | } |
363 | for (i=0; i<7; i++) { | 373 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { |
364 | resource_size_t start, end; | 374 | resource_size_t start, end; |
365 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | 375 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); |
366 | seq_printf(m, "\t%16llx", | 376 | seq_printf(m, "\t%16llx", |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 5f4f85f56cb7..baad093aafe3 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -56,7 +56,7 @@ static void quirk_passive_release(struct pci_dev *dev) | |||
56 | while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { | 56 | while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { |
57 | pci_read_config_byte(d, 0x82, &dlc); | 57 | pci_read_config_byte(d, 0x82, &dlc); |
58 | if (!(dlc & 1<<1)) { | 58 | if (!(dlc & 1<<1)) { |
59 | dev_err(&d->dev, "PIIX3: Enabling Passive Release\n"); | 59 | dev_info(&d->dev, "PIIX3: Enabling Passive Release\n"); |
60 | dlc |= 1<<1; | 60 | dlc |= 1<<1; |
61 | pci_write_config_byte(d, 0x82, dlc); | 61 | pci_write_config_byte(d, 0x82, dlc); |
62 | } | 62 | } |
@@ -449,7 +449,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, | |||
449 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi); | 449 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi); |
450 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi); | 450 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi); |
451 | 451 | ||
452 | static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) | 452 | static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) |
453 | { | 453 | { |
454 | u32 region; | 454 | u32 region; |
455 | 455 | ||
@@ -459,20 +459,95 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) | |||
459 | pci_read_config_dword(dev, 0x48, ®ion); | 459 | pci_read_config_dword(dev, 0x48, ®ion); |
460 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); | 460 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); |
461 | } | 461 | } |
462 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi); | 462 | |
463 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi); | 463 | static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) |
464 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich6_lpc_acpi); | 464 | { |
465 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich6_lpc_acpi); | 465 | u32 val; |
466 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich6_lpc_acpi); | 466 | u32 size, base; |
467 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi); | 467 | |
468 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi); | 468 | pci_read_config_dword(dev, reg, &val); |
469 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi); | 469 | |
470 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi); | 470 | /* Enabled? */ |
471 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi); | 471 | if (!(val & 1)) |
472 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi); | 472 | return; |
473 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi); | 473 | base = val & 0xfffc; |
474 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi); | 474 | if (dynsize) { |
475 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi); | 475 | /* |
476 | * This is not correct. It is 16, 32 or 64 bytes depending on | ||
477 | * register D31:F0:ADh bits 5:4. | ||
478 | * | ||
479 | * But this gets us at least _part_ of it. | ||
480 | */ | ||
481 | size = 16; | ||
482 | } else { | ||
483 | size = 128; | ||
484 | } | ||
485 | base &= ~(size-1); | ||
486 | |||
487 | /* Just print it out for now. We should reserve it after more debugging */ | ||
488 | dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1); | ||
489 | } | ||
490 | |||
491 | static void __devinit quirk_ich6_lpc(struct pci_dev *dev) | ||
492 | { | ||
493 | /* Shared ACPI/GPIO decode with all ICH6+ */ | ||
494 | ich6_lpc_acpi_gpio(dev); | ||
495 | |||
496 | /* ICH6-specific generic IO decode */ | ||
497 | ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0); | ||
498 | ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1); | ||
499 | } | ||
500 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc); | ||
501 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc); | ||
502 | |||
503 | static void __devinit ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name) | ||
504 | { | ||
505 | u32 val; | ||
506 | u32 mask, base; | ||
507 | |||
508 | pci_read_config_dword(dev, reg, &val); | ||
509 | |||
510 | /* Enabled? */ | ||
511 | if (!(val & 1)) | ||
512 | return; | ||
513 | |||
514 | /* | ||
515 | * IO base in bits 15:2, mask in bits 23:18, both | ||
516 | * are dword-based | ||
517 | */ | ||
518 | base = val & 0xfffc; | ||
519 | mask = (val >> 16) & 0xfc; | ||
520 | mask |= 3; | ||
521 | |||
522 | /* Just print it out for now. We should reserve it after more debugging */ | ||
523 | dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask); | ||
524 | } | ||
525 | |||
526 | /* ICH7-10 has the same common LPC generic IO decode registers */ | ||
527 | static void __devinit quirk_ich7_lpc(struct pci_dev *dev) | ||
528 | { | ||
529 | /* We share the common ACPI/DPIO decode with ICH6 */ | ||
530 | ich6_lpc_acpi_gpio(dev); | ||
531 | |||
532 | /* And have 4 ICH7+ generic decodes */ | ||
533 | ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1"); | ||
534 | ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2"); | ||
535 | ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3"); | ||
536 | ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4"); | ||
537 | } | ||
538 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc); | ||
539 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc); | ||
540 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc); | ||
541 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc); | ||
542 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc); | ||
543 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc); | ||
544 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc); | ||
545 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc); | ||
546 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc); | ||
547 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc); | ||
548 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc); | ||
549 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc); | ||
550 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc); | ||
476 | 551 | ||
477 | /* | 552 | /* |
478 | * VIA ACPI: One IO region pointed to by longword at | 553 | * VIA ACPI: One IO region pointed to by longword at |
@@ -606,27 +681,6 @@ static void __init quirk_ioapic_rmw(struct pci_dev *dev) | |||
606 | sis_apic_bug = 1; | 681 | sis_apic_bug = 1; |
607 | } | 682 | } |
608 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); | 683 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); |
609 | |||
610 | #define AMD8131_revA0 0x01 | ||
611 | #define AMD8131_revB0 0x11 | ||
612 | #define AMD8131_MISC 0x40 | ||
613 | #define AMD8131_NIOAMODE_BIT 0 | ||
614 | static void quirk_amd_8131_ioapic(struct pci_dev *dev) | ||
615 | { | ||
616 | unsigned char tmp; | ||
617 | |||
618 | if (nr_ioapics == 0) | ||
619 | return; | ||
620 | |||
621 | if (dev->revision == AMD8131_revA0 || dev->revision == AMD8131_revB0) { | ||
622 | dev_info(&dev->dev, "Fixing up AMD8131 IOAPIC mode\n"); | ||
623 | pci_read_config_byte( dev, AMD8131_MISC, &tmp); | ||
624 | tmp &= ~(1 << AMD8131_NIOAMODE_BIT); | ||
625 | pci_write_config_byte( dev, AMD8131_MISC, tmp); | ||
626 | } | ||
627 | } | ||
628 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); | ||
629 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); | ||
630 | #endif /* CONFIG_X86_IO_APIC */ | 684 | #endif /* CONFIG_X86_IO_APIC */ |
631 | 685 | ||
632 | /* | 686 | /* |
@@ -1423,6 +1477,155 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); | |||
1423 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); | 1477 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); |
1424 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); | 1478 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); |
1425 | 1479 | ||
1480 | #ifdef CONFIG_X86_IO_APIC | ||
1481 | /* | ||
1482 | * Boot interrupts on some chipsets cannot be turned off. For these chipsets, | ||
1483 | * remap the original interrupt in the linux kernel to the boot interrupt, so | ||
1484 | * that a PCI device's interrupt handler is installed on the boot interrupt | ||
1485 | * line instead. | ||
1486 | */ | ||
1487 | static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) | ||
1488 | { | ||
1489 | if (noioapicquirk || noioapicreroute) | ||
1490 | return; | ||
1491 | |||
1492 | dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; | ||
1493 | |||
1494 | printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n", | ||
1495 | dev->vendor, dev->device); | ||
1496 | return; | ||
1497 | } | ||
1498 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); | ||
1499 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); | ||
1500 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); | ||
1501 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); | ||
1502 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); | ||
1503 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); | ||
1504 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); | ||
1505 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); | ||
1506 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); | ||
1507 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); | ||
1508 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); | ||
1509 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); | ||
1510 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); | ||
1511 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); | ||
1512 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); | ||
1513 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); | ||
1514 | |||
1515 | /* | ||
1516 | * On some chipsets we can disable the generation of legacy INTx boot | ||
1517 | * interrupts. | ||
1518 | */ | ||
1519 | |||
1520 | /* | ||
1521 | * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no | ||
1522 | * 300641-004US, section 5.7.3. | ||
1523 | */ | ||
1524 | #define INTEL_6300_IOAPIC_ABAR 0x40 | ||
1525 | #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) | ||
1526 | |||
1527 | static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) | ||
1528 | { | ||
1529 | u16 pci_config_word; | ||
1530 | |||
1531 | if (noioapicquirk) | ||
1532 | return; | ||
1533 | |||
1534 | pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); | ||
1535 | pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; | ||
1536 | pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); | ||
1537 | |||
1538 | printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n", | ||
1539 | dev->vendor, dev->device); | ||
1540 | } | ||
1541 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); | ||
1542 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); | ||
1543 | |||
1544 | /* | ||
1545 | * disable boot interrupts on HT-1000 | ||
1546 | */ | ||
1547 | #define BC_HT1000_FEATURE_REG 0x64 | ||
1548 | #define BC_HT1000_PIC_REGS_ENABLE (1<<0) | ||
1549 | #define BC_HT1000_MAP_IDX 0xC00 | ||
1550 | #define BC_HT1000_MAP_DATA 0xC01 | ||
1551 | |||
1552 | static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) | ||
1553 | { | ||
1554 | u32 pci_config_dword; | ||
1555 | u8 irq; | ||
1556 | |||
1557 | if (noioapicquirk) | ||
1558 | return; | ||
1559 | |||
1560 | pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword); | ||
1561 | pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword | | ||
1562 | BC_HT1000_PIC_REGS_ENABLE); | ||
1563 | |||
1564 | for (irq = 0x10; irq < 0x10 + 32; irq++) { | ||
1565 | outb(irq, BC_HT1000_MAP_IDX); | ||
1566 | outb(0x00, BC_HT1000_MAP_DATA); | ||
1567 | } | ||
1568 | |||
1569 | pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); | ||
1570 | |||
1571 | printk(KERN_INFO "disabled boot interrupts on PCI device" | ||
1572 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | ||
1573 | } | ||
1574 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); | ||
1575 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); | ||
1576 | |||
1577 | /* | ||
1578 | * disable boot interrupts on AMD and ATI chipsets | ||
1579 | */ | ||
1580 | /* | ||
1581 | * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131 | ||
1582 | * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode | ||
1583 | * (due to an erratum). | ||
1584 | */ | ||
1585 | #define AMD_813X_MISC 0x40 | ||
1586 | #define AMD_813X_NOIOAMODE (1<<0) | ||
1587 | |||
1588 | static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) | ||
1589 | { | ||
1590 | u32 pci_config_dword; | ||
1591 | |||
1592 | if (noioapicquirk) | ||
1593 | return; | ||
1594 | |||
1595 | pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); | ||
1596 | pci_config_dword &= ~AMD_813X_NOIOAMODE; | ||
1597 | pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); | ||
1598 | |||
1599 | printk(KERN_INFO "disabled boot interrupts on PCI device " | ||
1600 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | ||
1601 | } | ||
1602 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | ||
1603 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | ||
1604 | |||
1605 | #define AMD_8111_PCI_IRQ_ROUTING 0x56 | ||
1606 | |||
1607 | static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) | ||
1608 | { | ||
1609 | u16 pci_config_word; | ||
1610 | |||
1611 | if (noioapicquirk) | ||
1612 | return; | ||
1613 | |||
1614 | pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); | ||
1615 | if (!pci_config_word) { | ||
1616 | printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x " | ||
1617 | "already disabled\n", | ||
1618 | dev->vendor, dev->device); | ||
1619 | return; | ||
1620 | } | ||
1621 | pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); | ||
1622 | printk(KERN_INFO "disabled boot interrupts on PCI device " | ||
1623 | "0x%04x:0x%04x\n", dev->vendor, dev->device); | ||
1624 | } | ||
1625 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); | ||
1626 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); | ||
1627 | #endif /* CONFIG_X86_IO_APIC */ | ||
1628 | |||
1426 | /* | 1629 | /* |
1427 | * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size | 1630 | * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size |
1428 | * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. | 1631 | * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. |
@@ -1946,11 +2149,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, | |||
1946 | 2149 | ||
1947 | #endif /* CONFIG_PCI_MSI */ | 2150 | #endif /* CONFIG_PCI_MSI */ |
1948 | 2151 | ||
1949 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) | 2152 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, |
2153 | struct pci_fixup *end) | ||
1950 | { | 2154 | { |
1951 | while (f < end) { | 2155 | while (f < end) { |
1952 | if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && | 2156 | if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && |
1953 | (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { | 2157 | (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { |
1954 | dev_dbg(&dev->dev, "calling %pF\n", f->hook); | 2158 | dev_dbg(&dev->dev, "calling %pF\n", f->hook); |
1955 | f->hook(dev); | 2159 | f->hook(dev); |
1956 | } | 2160 | } |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ea979f2bc6db..704608945780 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -536,9 +536,8 @@ static void pci_bus_dump_res(struct pci_bus *bus) | |||
536 | if (!res) | 536 | if (!res) |
537 | continue; | 537 | continue; |
538 | 538 | ||
539 | printk(KERN_INFO "bus: %02x index %x %s: %pR\n", | 539 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i, |
540 | bus->number, i, | 540 | (res->flags & IORESOURCE_IO) ? "io: " : "mem:", res); |
541 | (res->flags & IORESOURCE_IO) ? "io port" : "mmio", res); | ||
542 | } | 541 | } |
543 | } | 542 | } |
544 | 543 | ||
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 2dbd96cce2d8..32e8d88a4619 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -26,11 +26,13 @@ | |||
26 | #include "pci.h" | 26 | #include "pci.h" |
27 | 27 | ||
28 | 28 | ||
29 | void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) | 29 | void pci_update_resource(struct pci_dev *dev, int resno) |
30 | { | 30 | { |
31 | struct pci_bus_region region; | 31 | struct pci_bus_region region; |
32 | u32 new, check, mask; | 32 | u32 new, check, mask; |
33 | int reg; | 33 | int reg; |
34 | enum pci_bar_type type; | ||
35 | struct resource *res = dev->resource + resno; | ||
34 | 36 | ||
35 | /* | 37 | /* |
36 | * Ignore resources for unimplemented BARs and unused resource slots | 38 | * Ignore resources for unimplemented BARs and unused resource slots |
@@ -61,17 +63,13 @@ void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) | |||
61 | else | 63 | else |
62 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; | 64 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
63 | 65 | ||
64 | if (resno < 6) { | 66 | reg = pci_resource_bar(dev, resno, &type); |
65 | reg = PCI_BASE_ADDRESS_0 + 4 * resno; | 67 | if (!reg) |
66 | } else if (resno == PCI_ROM_RESOURCE) { | 68 | return; |
69 | if (type != pci_bar_unknown) { | ||
67 | if (!(res->flags & IORESOURCE_ROM_ENABLE)) | 70 | if (!(res->flags & IORESOURCE_ROM_ENABLE)) |
68 | return; | 71 | return; |
69 | new |= PCI_ROM_ADDRESS_ENABLE; | 72 | new |= PCI_ROM_ADDRESS_ENABLE; |
70 | reg = dev->rom_base_reg; | ||
71 | } else { | ||
72 | /* Hmm, non-standard resource. */ | ||
73 | |||
74 | return; /* kill uninitialised var warning */ | ||
75 | } | 73 | } |
76 | 74 | ||
77 | pci_write_config_dword(dev, reg, new); | 75 | pci_write_config_dword(dev, reg, new); |
@@ -134,7 +132,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
134 | 132 | ||
135 | align = resource_alignment(res); | 133 | align = resource_alignment(res); |
136 | if (!align) { | 134 | if (!align) { |
137 | dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " | 135 | dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " |
138 | "alignment) %pR flags %#lx\n", | 136 | "alignment) %pR flags %#lx\n", |
139 | resno, res, res->flags); | 137 | resno, res, res->flags); |
140 | return -EINVAL; | 138 | return -EINVAL; |
@@ -157,12 +155,12 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
157 | } | 155 | } |
158 | 156 | ||
159 | if (ret) { | 157 | if (ret) { |
160 | dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", | 158 | dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", |
161 | resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); | 159 | resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); |
162 | } else { | 160 | } else { |
163 | res->flags &= ~IORESOURCE_STARTALIGN; | 161 | res->flags &= ~IORESOURCE_STARTALIGN; |
164 | if (resno < PCI_BRIDGE_RESOURCES) | 162 | if (resno < PCI_BRIDGE_RESOURCES) |
165 | pci_update_resource(dev, res, resno); | 163 | pci_update_resource(dev, resno); |
166 | } | 164 | } |
167 | 165 | ||
168 | return ret; | 166 | return ret; |
@@ -197,7 +195,7 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno) | |||
197 | dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", | 195 | dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", |
198 | resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); | 196 | resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); |
199 | } else if (resno < PCI_BRIDGE_RESOURCES) { | 197 | } else if (resno < PCI_BRIDGE_RESOURCES) { |
200 | pci_update_resource(dev, res, resno); | 198 | pci_update_resource(dev, resno); |
201 | } | 199 | } |
202 | 200 | ||
203 | return ret; | 201 | return ret; |