diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
commit | bbb20089a3275a19e475dbc21320c3742e3ca423 (patch) | |
tree | 216fdc1cbef450ca688135c5b8969169482d9a48 /drivers/pci/pcie | |
parent | 3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff) | |
parent | 657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff) |
Merge branch 'dmaengine' into async-tx-next
Conflicts:
crypto/async_tx/async_xor.c
drivers/dma/ioat/dma_v2.h
drivers/dma/ioat/pci.c
drivers/md/raid5.c
Diffstat (limited to 'drivers/pci/pcie')
-rw-r--r-- | drivers/pci/pcie/aer/Kconfig | 15 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/Kconfig.debug | 18 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/Makefile | 3 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/aer_inject.c | 473 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/aerdrv.c | 3 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/aerdrv.h | 6 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/aerdrv_core.c | 278 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/ecrc.c | 131 | ||||
-rw-r--r-- | drivers/pci/pcie/aspm.c | 787 | ||||
-rw-r--r-- | drivers/pci/pcie/portdrv_core.c | 2 |
10 files changed, 1211 insertions, 505 deletions
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig index c3bde588aa13..50e94e02378a 100644 --- a/drivers/pci/pcie/aer/Kconfig +++ b/drivers/pci/pcie/aer/Kconfig | |||
@@ -10,3 +10,18 @@ config PCIEAER | |||
10 | This enables PCI Express Root Port Advanced Error Reporting | 10 | This enables PCI Express Root Port Advanced Error Reporting |
11 | (AER) driver support. Error reporting messages sent to Root | 11 | (AER) driver support. Error reporting messages sent to Root |
12 | Port will be handled by PCI Express AER driver. | 12 | Port will be handled by PCI Express AER driver. |
13 | |||
14 | |||
15 | # | ||
16 | # PCI Express ECRC | ||
17 | # | ||
18 | config PCIE_ECRC | ||
19 | bool "PCI Express ECRC settings control" | ||
20 | depends on PCIEAER | ||
21 | help | ||
22 | Used to override firmware/bios settings for PCI Express ECRC | ||
23 | (transaction layer end-to-end CRC checking). | ||
24 | |||
25 | When in doubt, say N. | ||
26 | |||
27 | source "drivers/pci/pcie/aer/Kconfig.debug" | ||
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug new file mode 100644 index 000000000000..b8c925c1f6aa --- /dev/null +++ b/drivers/pci/pcie/aer/Kconfig.debug | |||
@@ -0,0 +1,18 @@ | |||
1 | # | ||
2 | # PCI Express Root Port Device AER Debug Configuration | ||
3 | # | ||
4 | |||
5 | config PCIEAER_INJECT | ||
6 | tristate "PCIE AER error injector support" | ||
7 | depends on PCIEAER | ||
8 | default n | ||
9 | help | ||
10 | This enables PCI Express Root Port Advanced Error Reporting | ||
11 | (AER) software error injector. | ||
12 | |||
13 | Debuging PCIE AER code is quite difficult because it is hard | ||
14 | to trigger various real hardware errors. Software based | ||
15 | error injection can fake almost all kinds of errors with the | ||
16 | help of a user space helper tool aer-inject, which can be | ||
17 | gotten from: | ||
18 | http://www.kernel.org/pub/linux/utils/pci/aer-inject/ | ||
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile index 8da3bd8455a8..2cba67510dc8 100644 --- a/drivers/pci/pcie/aer/Makefile +++ b/drivers/pci/pcie/aer/Makefile | |||
@@ -4,6 +4,9 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_PCIEAER) += aerdriver.o | 5 | obj-$(CONFIG_PCIEAER) += aerdriver.o |
6 | 6 | ||
7 | obj-$(CONFIG_PCIE_ECRC) += ecrc.o | ||
8 | |||
7 | aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o | 9 | aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o |
8 | aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o | 10 | aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o |
9 | 11 | ||
12 | obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o | ||
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c new file mode 100644 index 000000000000..d92ae21a59d8 --- /dev/null +++ b/drivers/pci/pcie/aer/aer_inject.c | |||
@@ -0,0 +1,473 @@ | |||
1 | /* | ||
2 | * PCIE AER software error injection support. | ||
3 | * | ||
4 | * Debuging PCIE AER code is quite difficult because it is hard to | ||
5 | * trigger various real hardware errors. Software based error | ||
6 | * injection can fake almost all kinds of errors with the help of a | ||
7 | * user space helper tool aer-inject, which can be gotten from: | ||
8 | * http://www.kernel.org/pub/linux/utils/pci/aer-inject/ | ||
9 | * | ||
10 | * Copyright 2009 Intel Corporation. | ||
11 | * Huang Ying <ying.huang@intel.com> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; version 2 | ||
16 | * of the License. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/miscdevice.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include "aerdrv.h" | ||
27 | |||
28 | struct aer_error_inj | ||
29 | { | ||
30 | u8 bus; | ||
31 | u8 dev; | ||
32 | u8 fn; | ||
33 | u32 uncor_status; | ||
34 | u32 cor_status; | ||
35 | u32 header_log0; | ||
36 | u32 header_log1; | ||
37 | u32 header_log2; | ||
38 | u32 header_log3; | ||
39 | }; | ||
40 | |||
41 | struct aer_error | ||
42 | { | ||
43 | struct list_head list; | ||
44 | unsigned int bus; | ||
45 | unsigned int devfn; | ||
46 | int pos_cap_err; | ||
47 | |||
48 | u32 uncor_status; | ||
49 | u32 cor_status; | ||
50 | u32 header_log0; | ||
51 | u32 header_log1; | ||
52 | u32 header_log2; | ||
53 | u32 header_log3; | ||
54 | u32 root_status; | ||
55 | u32 source_id; | ||
56 | }; | ||
57 | |||
58 | struct pci_bus_ops | ||
59 | { | ||
60 | struct list_head list; | ||
61 | struct pci_bus *bus; | ||
62 | struct pci_ops *ops; | ||
63 | }; | ||
64 | |||
65 | static LIST_HEAD(einjected); | ||
66 | |||
67 | static LIST_HEAD(pci_bus_ops_list); | ||
68 | |||
69 | /* Protect einjected and pci_bus_ops_list */ | ||
70 | static DEFINE_SPINLOCK(inject_lock); | ||
71 | |||
72 | static void aer_error_init(struct aer_error *err, unsigned int bus, | ||
73 | unsigned int devfn, int pos_cap_err) | ||
74 | { | ||
75 | INIT_LIST_HEAD(&err->list); | ||
76 | err->bus = bus; | ||
77 | err->devfn = devfn; | ||
78 | err->pos_cap_err = pos_cap_err; | ||
79 | } | ||
80 | |||
81 | /* inject_lock must be held before calling */ | ||
82 | static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn) | ||
83 | { | ||
84 | struct aer_error *err; | ||
85 | |||
86 | list_for_each_entry(err, &einjected, list) { | ||
87 | if (bus == err->bus && devfn == err->devfn) | ||
88 | return err; | ||
89 | } | ||
90 | return NULL; | ||
91 | } | ||
92 | |||
93 | /* inject_lock must be held before calling */ | ||
94 | static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev) | ||
95 | { | ||
96 | return __find_aer_error(dev->bus->number, dev->devfn); | ||
97 | } | ||
98 | |||
99 | /* inject_lock must be held before calling */ | ||
100 | static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus) | ||
101 | { | ||
102 | struct pci_bus_ops *bus_ops; | ||
103 | |||
104 | list_for_each_entry(bus_ops, &pci_bus_ops_list, list) { | ||
105 | if (bus_ops->bus == bus) | ||
106 | return bus_ops->ops; | ||
107 | } | ||
108 | return NULL; | ||
109 | } | ||
110 | |||
111 | static struct pci_bus_ops *pci_bus_ops_pop(void) | ||
112 | { | ||
113 | unsigned long flags; | ||
114 | struct pci_bus_ops *bus_ops = NULL; | ||
115 | |||
116 | spin_lock_irqsave(&inject_lock, flags); | ||
117 | if (list_empty(&pci_bus_ops_list)) | ||
118 | bus_ops = NULL; | ||
119 | else { | ||
120 | struct list_head *lh = pci_bus_ops_list.next; | ||
121 | list_del(lh); | ||
122 | bus_ops = list_entry(lh, struct pci_bus_ops, list); | ||
123 | } | ||
124 | spin_unlock_irqrestore(&inject_lock, flags); | ||
125 | return bus_ops; | ||
126 | } | ||
127 | |||
128 | static u32 *find_pci_config_dword(struct aer_error *err, int where, | ||
129 | int *prw1cs) | ||
130 | { | ||
131 | int rw1cs = 0; | ||
132 | u32 *target = NULL; | ||
133 | |||
134 | if (err->pos_cap_err == -1) | ||
135 | return NULL; | ||
136 | |||
137 | switch (where - err->pos_cap_err) { | ||
138 | case PCI_ERR_UNCOR_STATUS: | ||
139 | target = &err->uncor_status; | ||
140 | rw1cs = 1; | ||
141 | break; | ||
142 | case PCI_ERR_COR_STATUS: | ||
143 | target = &err->cor_status; | ||
144 | rw1cs = 1; | ||
145 | break; | ||
146 | case PCI_ERR_HEADER_LOG: | ||
147 | target = &err->header_log0; | ||
148 | break; | ||
149 | case PCI_ERR_HEADER_LOG+4: | ||
150 | target = &err->header_log1; | ||
151 | break; | ||
152 | case PCI_ERR_HEADER_LOG+8: | ||
153 | target = &err->header_log2; | ||
154 | break; | ||
155 | case PCI_ERR_HEADER_LOG+12: | ||
156 | target = &err->header_log3; | ||
157 | break; | ||
158 | case PCI_ERR_ROOT_STATUS: | ||
159 | target = &err->root_status; | ||
160 | rw1cs = 1; | ||
161 | break; | ||
162 | case PCI_ERR_ROOT_COR_SRC: | ||
163 | target = &err->source_id; | ||
164 | break; | ||
165 | } | ||
166 | if (prw1cs) | ||
167 | *prw1cs = rw1cs; | ||
168 | return target; | ||
169 | } | ||
170 | |||
171 | static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where, | ||
172 | int size, u32 *val) | ||
173 | { | ||
174 | u32 *sim; | ||
175 | struct aer_error *err; | ||
176 | unsigned long flags; | ||
177 | struct pci_ops *ops; | ||
178 | |||
179 | spin_lock_irqsave(&inject_lock, flags); | ||
180 | if (size != sizeof(u32)) | ||
181 | goto out; | ||
182 | err = __find_aer_error(bus->number, devfn); | ||
183 | if (!err) | ||
184 | goto out; | ||
185 | |||
186 | sim = find_pci_config_dword(err, where, NULL); | ||
187 | if (sim) { | ||
188 | *val = *sim; | ||
189 | spin_unlock_irqrestore(&inject_lock, flags); | ||
190 | return 0; | ||
191 | } | ||
192 | out: | ||
193 | ops = __find_pci_bus_ops(bus); | ||
194 | spin_unlock_irqrestore(&inject_lock, flags); | ||
195 | return ops->read(bus, devfn, where, size, val); | ||
196 | } | ||
197 | |||
198 | int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size, | ||
199 | u32 val) | ||
200 | { | ||
201 | u32 *sim; | ||
202 | struct aer_error *err; | ||
203 | unsigned long flags; | ||
204 | int rw1cs; | ||
205 | struct pci_ops *ops; | ||
206 | |||
207 | spin_lock_irqsave(&inject_lock, flags); | ||
208 | if (size != sizeof(u32)) | ||
209 | goto out; | ||
210 | err = __find_aer_error(bus->number, devfn); | ||
211 | if (!err) | ||
212 | goto out; | ||
213 | |||
214 | sim = find_pci_config_dword(err, where, &rw1cs); | ||
215 | if (sim) { | ||
216 | if (rw1cs) | ||
217 | *sim ^= val; | ||
218 | else | ||
219 | *sim = val; | ||
220 | spin_unlock_irqrestore(&inject_lock, flags); | ||
221 | return 0; | ||
222 | } | ||
223 | out: | ||
224 | ops = __find_pci_bus_ops(bus); | ||
225 | spin_unlock_irqrestore(&inject_lock, flags); | ||
226 | return ops->write(bus, devfn, where, size, val); | ||
227 | } | ||
228 | |||
229 | static struct pci_ops pci_ops_aer = { | ||
230 | .read = pci_read_aer, | ||
231 | .write = pci_write_aer, | ||
232 | }; | ||
233 | |||
234 | static void pci_bus_ops_init(struct pci_bus_ops *bus_ops, | ||
235 | struct pci_bus *bus, | ||
236 | struct pci_ops *ops) | ||
237 | { | ||
238 | INIT_LIST_HEAD(&bus_ops->list); | ||
239 | bus_ops->bus = bus; | ||
240 | bus_ops->ops = ops; | ||
241 | } | ||
242 | |||
243 | static int pci_bus_set_aer_ops(struct pci_bus *bus) | ||
244 | { | ||
245 | struct pci_ops *ops; | ||
246 | struct pci_bus_ops *bus_ops; | ||
247 | unsigned long flags; | ||
248 | |||
249 | bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL); | ||
250 | if (!bus_ops) | ||
251 | return -ENOMEM; | ||
252 | ops = pci_bus_set_ops(bus, &pci_ops_aer); | ||
253 | spin_lock_irqsave(&inject_lock, flags); | ||
254 | if (ops == &pci_ops_aer) | ||
255 | goto out; | ||
256 | pci_bus_ops_init(bus_ops, bus, ops); | ||
257 | list_add(&bus_ops->list, &pci_bus_ops_list); | ||
258 | bus_ops = NULL; | ||
259 | out: | ||
260 | spin_unlock_irqrestore(&inject_lock, flags); | ||
261 | if (bus_ops) | ||
262 | kfree(bus_ops); | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) | ||
267 | { | ||
268 | while (1) { | ||
269 | if (!dev->is_pcie) | ||
270 | break; | ||
271 | if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) | ||
272 | return dev; | ||
273 | if (!dev->bus->self) | ||
274 | break; | ||
275 | dev = dev->bus->self; | ||
276 | } | ||
277 | return NULL; | ||
278 | } | ||
279 | |||
280 | static int find_aer_device_iter(struct device *device, void *data) | ||
281 | { | ||
282 | struct pcie_device **result = data; | ||
283 | struct pcie_device *pcie_dev; | ||
284 | |||
285 | if (device->bus == &pcie_port_bus_type) { | ||
286 | pcie_dev = to_pcie_device(device); | ||
287 | if (pcie_dev->service & PCIE_PORT_SERVICE_AER) { | ||
288 | *result = pcie_dev; | ||
289 | return 1; | ||
290 | } | ||
291 | } | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | static int find_aer_device(struct pci_dev *dev, struct pcie_device **result) | ||
296 | { | ||
297 | return device_for_each_child(&dev->dev, result, find_aer_device_iter); | ||
298 | } | ||
299 | |||
300 | static int aer_inject(struct aer_error_inj *einj) | ||
301 | { | ||
302 | struct aer_error *err, *rperr; | ||
303 | struct aer_error *err_alloc = NULL, *rperr_alloc = NULL; | ||
304 | struct pci_dev *dev, *rpdev; | ||
305 | struct pcie_device *edev; | ||
306 | unsigned long flags; | ||
307 | unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); | ||
308 | int pos_cap_err, rp_pos_cap_err; | ||
309 | u32 sever; | ||
310 | int ret = 0; | ||
311 | |||
312 | dev = pci_get_bus_and_slot(einj->bus, devfn); | ||
313 | if (!dev) | ||
314 | return -EINVAL; | ||
315 | rpdev = pcie_find_root_port(dev); | ||
316 | if (!rpdev) { | ||
317 | ret = -EINVAL; | ||
318 | goto out_put; | ||
319 | } | ||
320 | |||
321 | pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
322 | if (!pos_cap_err) { | ||
323 | ret = -EIO; | ||
324 | goto out_put; | ||
325 | } | ||
326 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); | ||
327 | |||
328 | rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); | ||
329 | if (!rp_pos_cap_err) { | ||
330 | ret = -EIO; | ||
331 | goto out_put; | ||
332 | } | ||
333 | |||
334 | err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL); | ||
335 | if (!err_alloc) { | ||
336 | ret = -ENOMEM; | ||
337 | goto out_put; | ||
338 | } | ||
339 | rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL); | ||
340 | if (!rperr_alloc) { | ||
341 | ret = -ENOMEM; | ||
342 | goto out_put; | ||
343 | } | ||
344 | |||
345 | spin_lock_irqsave(&inject_lock, flags); | ||
346 | |||
347 | err = __find_aer_error_by_dev(dev); | ||
348 | if (!err) { | ||
349 | err = err_alloc; | ||
350 | err_alloc = NULL; | ||
351 | aer_error_init(err, einj->bus, devfn, pos_cap_err); | ||
352 | list_add(&err->list, &einjected); | ||
353 | } | ||
354 | err->uncor_status |= einj->uncor_status; | ||
355 | err->cor_status |= einj->cor_status; | ||
356 | err->header_log0 = einj->header_log0; | ||
357 | err->header_log1 = einj->header_log1; | ||
358 | err->header_log2 = einj->header_log2; | ||
359 | err->header_log3 = einj->header_log3; | ||
360 | |||
361 | rperr = __find_aer_error_by_dev(rpdev); | ||
362 | if (!rperr) { | ||
363 | rperr = rperr_alloc; | ||
364 | rperr_alloc = NULL; | ||
365 | aer_error_init(rperr, rpdev->bus->number, rpdev->devfn, | ||
366 | rp_pos_cap_err); | ||
367 | list_add(&rperr->list, &einjected); | ||
368 | } | ||
369 | if (einj->cor_status) { | ||
370 | if (rperr->root_status & PCI_ERR_ROOT_COR_RCV) | ||
371 | rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV; | ||
372 | else | ||
373 | rperr->root_status |= PCI_ERR_ROOT_COR_RCV; | ||
374 | rperr->source_id &= 0xffff0000; | ||
375 | rperr->source_id |= (einj->bus << 8) | devfn; | ||
376 | } | ||
377 | if (einj->uncor_status) { | ||
378 | if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV) | ||
379 | rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV; | ||
380 | if (sever & einj->uncor_status) { | ||
381 | rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV; | ||
382 | if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)) | ||
383 | rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL; | ||
384 | } else | ||
385 | rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV; | ||
386 | rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV; | ||
387 | rperr->source_id &= 0x0000ffff; | ||
388 | rperr->source_id |= ((einj->bus << 8) | devfn) << 16; | ||
389 | } | ||
390 | spin_unlock_irqrestore(&inject_lock, flags); | ||
391 | |||
392 | ret = pci_bus_set_aer_ops(dev->bus); | ||
393 | if (ret) | ||
394 | goto out_put; | ||
395 | ret = pci_bus_set_aer_ops(rpdev->bus); | ||
396 | if (ret) | ||
397 | goto out_put; | ||
398 | |||
399 | if (find_aer_device(rpdev, &edev)) | ||
400 | aer_irq(-1, edev); | ||
401 | else | ||
402 | ret = -EINVAL; | ||
403 | out_put: | ||
404 | if (err_alloc) | ||
405 | kfree(err_alloc); | ||
406 | if (rperr_alloc) | ||
407 | kfree(rperr_alloc); | ||
408 | pci_dev_put(dev); | ||
409 | return ret; | ||
410 | } | ||
411 | |||
412 | static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf, | ||
413 | size_t usize, loff_t *off) | ||
414 | { | ||
415 | struct aer_error_inj einj; | ||
416 | int ret; | ||
417 | |||
418 | if (!capable(CAP_SYS_ADMIN)) | ||
419 | return -EPERM; | ||
420 | |||
421 | if (usize != sizeof(struct aer_error_inj)) | ||
422 | return -EINVAL; | ||
423 | |||
424 | if (copy_from_user(&einj, ubuf, usize)) | ||
425 | return -EFAULT; | ||
426 | |||
427 | ret = aer_inject(&einj); | ||
428 | return ret ? ret : usize; | ||
429 | } | ||
430 | |||
431 | static const struct file_operations aer_inject_fops = { | ||
432 | .write = aer_inject_write, | ||
433 | .owner = THIS_MODULE, | ||
434 | }; | ||
435 | |||
436 | static struct miscdevice aer_inject_device = { | ||
437 | .minor = MISC_DYNAMIC_MINOR, | ||
438 | .name = "aer_inject", | ||
439 | .fops = &aer_inject_fops, | ||
440 | }; | ||
441 | |||
442 | static int __init aer_inject_init(void) | ||
443 | { | ||
444 | return misc_register(&aer_inject_device); | ||
445 | } | ||
446 | |||
447 | static void __exit aer_inject_exit(void) | ||
448 | { | ||
449 | struct aer_error *err, *err_next; | ||
450 | unsigned long flags; | ||
451 | struct pci_bus_ops *bus_ops; | ||
452 | |||
453 | misc_deregister(&aer_inject_device); | ||
454 | |||
455 | while ((bus_ops = pci_bus_ops_pop())) { | ||
456 | pci_bus_set_ops(bus_ops->bus, bus_ops->ops); | ||
457 | kfree(bus_ops); | ||
458 | } | ||
459 | |||
460 | spin_lock_irqsave(&inject_lock, flags); | ||
461 | list_for_each_entry_safe(err, err_next, | ||
462 | &pci_bus_ops_list, list) { | ||
463 | list_del(&err->list); | ||
464 | kfree(err); | ||
465 | } | ||
466 | spin_unlock_irqrestore(&inject_lock, flags); | ||
467 | } | ||
468 | |||
469 | module_init(aer_inject_init); | ||
470 | module_exit(aer_inject_exit); | ||
471 | |||
472 | MODULE_DESCRIPTION("PCIE AER software error injector"); | ||
473 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 32ade5af927e..4770f13b3ca1 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -77,7 +77,7 @@ void pci_no_aer(void) | |||
77 | * | 77 | * |
78 | * Invoked when Root Port detects AER messages. | 78 | * Invoked when Root Port detects AER messages. |
79 | **/ | 79 | **/ |
80 | static irqreturn_t aer_irq(int irq, void *context) | 80 | irqreturn_t aer_irq(int irq, void *context) |
81 | { | 81 | { |
82 | unsigned int status, id; | 82 | unsigned int status, id; |
83 | struct pcie_device *pdev = (struct pcie_device *)context; | 83 | struct pcie_device *pdev = (struct pcie_device *)context; |
@@ -126,6 +126,7 @@ static irqreturn_t aer_irq(int irq, void *context) | |||
126 | 126 | ||
127 | return IRQ_HANDLED; | 127 | return IRQ_HANDLED; |
128 | } | 128 | } |
129 | EXPORT_SYMBOL_GPL(aer_irq); | ||
129 | 130 | ||
130 | /** | 131 | /** |
131 | * aer_alloc_rpc - allocate Root Port data structure | 132 | * aer_alloc_rpc - allocate Root Port data structure |
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index aa14482a4779..bbd7428ca2d0 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/pcieport_if.h> | 12 | #include <linux/pcieport_if.h> |
13 | #include <linux/aer.h> | 13 | #include <linux/aer.h> |
14 | #include <linux/interrupt.h> | ||
14 | 15 | ||
15 | #define AER_NONFATAL 0 | 16 | #define AER_NONFATAL 0 |
16 | #define AER_FATAL 1 | 17 | #define AER_FATAL 1 |
@@ -56,7 +57,11 @@ struct header_log_regs { | |||
56 | unsigned int dw3; | 57 | unsigned int dw3; |
57 | }; | 58 | }; |
58 | 59 | ||
60 | #define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */ | ||
59 | struct aer_err_info { | 61 | struct aer_err_info { |
62 | struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES]; | ||
63 | int error_dev_num; | ||
64 | u16 id; | ||
60 | int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */ | 65 | int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */ |
61 | int flags; | 66 | int flags; |
62 | unsigned int status; /* COR/UNCOR Error Status */ | 67 | unsigned int status; /* COR/UNCOR Error Status */ |
@@ -120,6 +125,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc); | |||
120 | extern int aer_init(struct pcie_device *dev); | 125 | extern int aer_init(struct pcie_device *dev); |
121 | extern void aer_isr(struct work_struct *work); | 126 | extern void aer_isr(struct work_struct *work); |
122 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); | 127 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); |
128 | extern irqreturn_t aer_irq(int irq, void *context); | ||
123 | 129 | ||
124 | #ifdef CONFIG_ACPI | 130 | #ifdef CONFIG_ACPI |
125 | extern int aer_osc_setup(struct pcie_device *pciedev); | 131 | extern int aer_osc_setup(struct pcie_device *pciedev); |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 307452f30035..3d8872704a58 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -26,7 +26,9 @@ | |||
26 | #include "aerdrv.h" | 26 | #include "aerdrv.h" |
27 | 27 | ||
28 | static int forceload; | 28 | static int forceload; |
29 | static int nosourceid; | ||
29 | module_param(forceload, bool, 0); | 30 | module_param(forceload, bool, 0); |
31 | module_param(nosourceid, bool, 0); | ||
30 | 32 | ||
31 | int pci_enable_pcie_error_reporting(struct pci_dev *dev) | 33 | int pci_enable_pcie_error_reporting(struct pci_dev *dev) |
32 | { | 34 | { |
@@ -109,19 +111,23 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) | |||
109 | #endif /* 0 */ | 111 | #endif /* 0 */ |
110 | 112 | ||
111 | 113 | ||
112 | static void set_device_error_reporting(struct pci_dev *dev, void *data) | 114 | static int set_device_error_reporting(struct pci_dev *dev, void *data) |
113 | { | 115 | { |
114 | bool enable = *((bool *)data); | 116 | bool enable = *((bool *)data); |
115 | 117 | ||
116 | if (dev->pcie_type != PCIE_RC_PORT && | 118 | if (dev->pcie_type == PCIE_RC_PORT || |
117 | dev->pcie_type != PCIE_SW_UPSTREAM_PORT && | 119 | dev->pcie_type == PCIE_SW_UPSTREAM_PORT || |
118 | dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT) | 120 | dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) { |
119 | return; | 121 | if (enable) |
122 | pci_enable_pcie_error_reporting(dev); | ||
123 | else | ||
124 | pci_disable_pcie_error_reporting(dev); | ||
125 | } | ||
120 | 126 | ||
121 | if (enable) | 127 | if (enable) |
122 | pci_enable_pcie_error_reporting(dev); | 128 | pcie_set_ecrc_checking(dev); |
123 | else | 129 | |
124 | pci_disable_pcie_error_reporting(dev); | 130 | return 0; |
125 | } | 131 | } |
126 | 132 | ||
127 | /** | 133 | /** |
@@ -139,73 +145,148 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev, | |||
139 | pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); | 145 | pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); |
140 | } | 146 | } |
141 | 147 | ||
142 | static int find_device_iter(struct device *device, void *data) | 148 | static inline int compare_device_id(struct pci_dev *dev, |
149 | struct aer_err_info *e_info) | ||
143 | { | 150 | { |
144 | struct pci_dev *dev; | 151 | if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) { |
145 | u16 id = *(unsigned long *)data; | 152 | /* |
146 | u8 secondary, subordinate, d_bus = id >> 8; | 153 | * Device ID match |
154 | */ | ||
155 | return 1; | ||
156 | } | ||
147 | 157 | ||
148 | if (device->bus == &pci_bus_type) { | 158 | return 0; |
149 | dev = to_pci_dev(device); | 159 | } |
150 | if (id == ((dev->bus->number << 8) | dev->devfn)) { | 160 | |
151 | /* | 161 | static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) |
152 | * Device ID match | 162 | { |
153 | */ | 163 | if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { |
154 | *(unsigned long*)data = (unsigned long)device; | 164 | e_info->dev[e_info->error_dev_num] = dev; |
155 | return 1; | 165 | e_info->error_dev_num++; |
156 | } | 166 | return 1; |
167 | } else | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | |||
172 | #define PCI_BUS(x) (((x) >> 8) & 0xff) | ||
173 | |||
174 | static int find_device_iter(struct pci_dev *dev, void *data) | ||
175 | { | ||
176 | int pos; | ||
177 | u32 status; | ||
178 | u32 mask; | ||
179 | u16 reg16; | ||
180 | int result; | ||
181 | struct aer_err_info *e_info = (struct aer_err_info *)data; | ||
182 | |||
183 | /* | ||
184 | * When bus id is equal to 0, it might be a bad id | ||
185 | * reported by root port. | ||
186 | */ | ||
187 | if (!nosourceid && (PCI_BUS(e_info->id) != 0)) { | ||
188 | result = compare_device_id(dev, e_info); | ||
189 | if (result) | ||
190 | add_error_device(e_info, dev); | ||
157 | 191 | ||
158 | /* | 192 | /* |
159 | * If device is P2P, check if it is an upstream? | 193 | * If there is no multiple error, we stop |
194 | * or continue based on the id comparing. | ||
160 | */ | 195 | */ |
161 | if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) { | 196 | if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG)) |
162 | pci_read_config_byte(dev, PCI_SECONDARY_BUS, | 197 | return result; |
163 | &secondary); | 198 | |
164 | pci_read_config_byte(dev, PCI_SUBORDINATE_BUS, | 199 | /* |
165 | &subordinate); | 200 | * If there are multiple errors and id does match, |
166 | if (d_bus >= secondary && d_bus <= subordinate) { | 201 | * We need continue to search other devices under |
167 | *(unsigned long*)data = (unsigned long)device; | 202 | * the root port. Return 0 means that. |
168 | return 1; | 203 | */ |
169 | } | 204 | if (result) |
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * When either | ||
210 | * 1) nosourceid==y; | ||
211 | * 2) bus id is equal to 0. Some ports might lose the bus | ||
212 | * id of error source id; | ||
213 | * 3) There are multiple errors and prior id comparing fails; | ||
214 | * We check AER status registers to find the initial reporter. | ||
215 | */ | ||
216 | if (atomic_read(&dev->enable_cnt) == 0) | ||
217 | return 0; | ||
218 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
219 | if (!pos) | ||
220 | return 0; | ||
221 | /* Check if AER is enabled */ | ||
222 | pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, ®16); | ||
223 | if (!(reg16 & ( | ||
224 | PCI_EXP_DEVCTL_CERE | | ||
225 | PCI_EXP_DEVCTL_NFERE | | ||
226 | PCI_EXP_DEVCTL_FERE | | ||
227 | PCI_EXP_DEVCTL_URRE))) | ||
228 | return 0; | ||
229 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
230 | if (!pos) | ||
231 | return 0; | ||
232 | |||
233 | status = 0; | ||
234 | mask = 0; | ||
235 | if (e_info->severity == AER_CORRECTABLE) { | ||
236 | pci_read_config_dword(dev, | ||
237 | pos + PCI_ERR_COR_STATUS, | ||
238 | &status); | ||
239 | pci_read_config_dword(dev, | ||
240 | pos + PCI_ERR_COR_MASK, | ||
241 | &mask); | ||
242 | if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) { | ||
243 | add_error_device(e_info, dev); | ||
244 | goto added; | ||
245 | } | ||
246 | } else { | ||
247 | pci_read_config_dword(dev, | ||
248 | pos + PCI_ERR_UNCOR_STATUS, | ||
249 | &status); | ||
250 | pci_read_config_dword(dev, | ||
251 | pos + PCI_ERR_UNCOR_MASK, | ||
252 | &mask); | ||
253 | if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) { | ||
254 | add_error_device(e_info, dev); | ||
255 | goto added; | ||
170 | } | 256 | } |
171 | } | 257 | } |
172 | 258 | ||
173 | return 0; | 259 | return 0; |
260 | |||
261 | added: | ||
262 | if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG) | ||
263 | return 0; | ||
264 | else | ||
265 | return 1; | ||
174 | } | 266 | } |
175 | 267 | ||
176 | /** | 268 | /** |
177 | * find_source_device - search through device hierarchy for source device | 269 | * find_source_device - search through device hierarchy for source device |
178 | * @parent: pointer to Root Port pci_dev data structure | 270 | * @parent: pointer to Root Port pci_dev data structure |
179 | * @id: device ID of agent who sends an error message to this Root Port | 271 | * @err_info: including detailed error information such like id |
180 | * | 272 | * |
181 | * Invoked when error is detected at the Root Port. | 273 | * Invoked when error is detected at the Root Port. |
182 | */ | 274 | */ |
183 | static struct device* find_source_device(struct pci_dev *parent, u16 id) | 275 | static void find_source_device(struct pci_dev *parent, |
276 | struct aer_err_info *e_info) | ||
184 | { | 277 | { |
185 | struct pci_dev *dev = parent; | 278 | struct pci_dev *dev = parent; |
186 | struct device *device; | 279 | int result; |
187 | unsigned long device_addr; | ||
188 | int status; | ||
189 | 280 | ||
190 | /* Is Root Port an agent that sends error message? */ | 281 | /* Is Root Port an agent that sends error message? */ |
191 | if (id == ((dev->bus->number << 8) | dev->devfn)) | 282 | result = find_device_iter(dev, e_info); |
192 | return &dev->dev; | 283 | if (result) |
193 | 284 | return; | |
194 | do { | ||
195 | device_addr = id; | ||
196 | if ((status = device_for_each_child(&dev->dev, | ||
197 | &device_addr, find_device_iter))) { | ||
198 | device = (struct device*)device_addr; | ||
199 | dev = to_pci_dev(device); | ||
200 | if (id == ((dev->bus->number << 8) | dev->devfn)) | ||
201 | return device; | ||
202 | } | ||
203 | }while (status); | ||
204 | 285 | ||
205 | return NULL; | 286 | pci_walk_bus(parent->subordinate, find_device_iter, e_info); |
206 | } | 287 | } |
207 | 288 | ||
208 | static void report_error_detected(struct pci_dev *dev, void *data) | 289 | static int report_error_detected(struct pci_dev *dev, void *data) |
209 | { | 290 | { |
210 | pci_ers_result_t vote; | 291 | pci_ers_result_t vote; |
211 | struct pci_error_handlers *err_handler; | 292 | struct pci_error_handlers *err_handler; |
@@ -230,16 +311,16 @@ static void report_error_detected(struct pci_dev *dev, void *data) | |||
230 | dev->driver ? | 311 | dev->driver ? |
231 | "no AER-aware driver" : "no driver"); | 312 | "no AER-aware driver" : "no driver"); |
232 | } | 313 | } |
233 | return; | 314 | return 0; |
234 | } | 315 | } |
235 | 316 | ||
236 | err_handler = dev->driver->err_handler; | 317 | err_handler = dev->driver->err_handler; |
237 | vote = err_handler->error_detected(dev, result_data->state); | 318 | vote = err_handler->error_detected(dev, result_data->state); |
238 | result_data->result = merge_result(result_data->result, vote); | 319 | result_data->result = merge_result(result_data->result, vote); |
239 | return; | 320 | return 0; |
240 | } | 321 | } |
241 | 322 | ||
242 | static void report_mmio_enabled(struct pci_dev *dev, void *data) | 323 | static int report_mmio_enabled(struct pci_dev *dev, void *data) |
243 | { | 324 | { |
244 | pci_ers_result_t vote; | 325 | pci_ers_result_t vote; |
245 | struct pci_error_handlers *err_handler; | 326 | struct pci_error_handlers *err_handler; |
@@ -249,15 +330,15 @@ static void report_mmio_enabled(struct pci_dev *dev, void *data) | |||
249 | if (!dev->driver || | 330 | if (!dev->driver || |
250 | !dev->driver->err_handler || | 331 | !dev->driver->err_handler || |
251 | !dev->driver->err_handler->mmio_enabled) | 332 | !dev->driver->err_handler->mmio_enabled) |
252 | return; | 333 | return 0; |
253 | 334 | ||
254 | err_handler = dev->driver->err_handler; | 335 | err_handler = dev->driver->err_handler; |
255 | vote = err_handler->mmio_enabled(dev); | 336 | vote = err_handler->mmio_enabled(dev); |
256 | result_data->result = merge_result(result_data->result, vote); | 337 | result_data->result = merge_result(result_data->result, vote); |
257 | return; | 338 | return 0; |
258 | } | 339 | } |
259 | 340 | ||
260 | static void report_slot_reset(struct pci_dev *dev, void *data) | 341 | static int report_slot_reset(struct pci_dev *dev, void *data) |
261 | { | 342 | { |
262 | pci_ers_result_t vote; | 343 | pci_ers_result_t vote; |
263 | struct pci_error_handlers *err_handler; | 344 | struct pci_error_handlers *err_handler; |
@@ -267,15 +348,15 @@ static void report_slot_reset(struct pci_dev *dev, void *data) | |||
267 | if (!dev->driver || | 348 | if (!dev->driver || |
268 | !dev->driver->err_handler || | 349 | !dev->driver->err_handler || |
269 | !dev->driver->err_handler->slot_reset) | 350 | !dev->driver->err_handler->slot_reset) |
270 | return; | 351 | return 0; |
271 | 352 | ||
272 | err_handler = dev->driver->err_handler; | 353 | err_handler = dev->driver->err_handler; |
273 | vote = err_handler->slot_reset(dev); | 354 | vote = err_handler->slot_reset(dev); |
274 | result_data->result = merge_result(result_data->result, vote); | 355 | result_data->result = merge_result(result_data->result, vote); |
275 | return; | 356 | return 0; |
276 | } | 357 | } |
277 | 358 | ||
278 | static void report_resume(struct pci_dev *dev, void *data) | 359 | static int report_resume(struct pci_dev *dev, void *data) |
279 | { | 360 | { |
280 | struct pci_error_handlers *err_handler; | 361 | struct pci_error_handlers *err_handler; |
281 | 362 | ||
@@ -284,11 +365,11 @@ static void report_resume(struct pci_dev *dev, void *data) | |||
284 | if (!dev->driver || | 365 | if (!dev->driver || |
285 | !dev->driver->err_handler || | 366 | !dev->driver->err_handler || |
286 | !dev->driver->err_handler->resume) | 367 | !dev->driver->err_handler->resume) |
287 | return; | 368 | return 0; |
288 | 369 | ||
289 | err_handler = dev->driver->err_handler; | 370 | err_handler = dev->driver->err_handler; |
290 | err_handler->resume(dev); | 371 | err_handler->resume(dev); |
291 | return; | 372 | return 0; |
292 | } | 373 | } |
293 | 374 | ||
294 | /** | 375 | /** |
@@ -305,7 +386,7 @@ static void report_resume(struct pci_dev *dev, void *data) | |||
305 | static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, | 386 | static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, |
306 | enum pci_channel_state state, | 387 | enum pci_channel_state state, |
307 | char *error_mesg, | 388 | char *error_mesg, |
308 | void (*cb)(struct pci_dev *, void *)) | 389 | int (*cb)(struct pci_dev *, void *)) |
309 | { | 390 | { |
310 | struct aer_broadcast_data result_data; | 391 | struct aer_broadcast_data result_data; |
311 | 392 | ||
@@ -497,12 +578,12 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev, | |||
497 | */ | 578 | */ |
498 | static void handle_error_source(struct pcie_device * aerdev, | 579 | static void handle_error_source(struct pcie_device * aerdev, |
499 | struct pci_dev *dev, | 580 | struct pci_dev *dev, |
500 | struct aer_err_info info) | 581 | struct aer_err_info *info) |
501 | { | 582 | { |
502 | pci_ers_result_t status = 0; | 583 | pci_ers_result_t status = 0; |
503 | int pos; | 584 | int pos; |
504 | 585 | ||
505 | if (info.severity == AER_CORRECTABLE) { | 586 | if (info->severity == AER_CORRECTABLE) { |
506 | /* | 587 | /* |
507 | * Correctable error does not need software intevention. | 588 | * Correctable error does not need software intevention. |
508 | * No need to go through error recovery process. | 589 | * No need to go through error recovery process. |
@@ -510,9 +591,9 @@ static void handle_error_source(struct pcie_device * aerdev, | |||
510 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 591 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
511 | if (pos) | 592 | if (pos) |
512 | pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, | 593 | pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, |
513 | info.status); | 594 | info->status); |
514 | } else { | 595 | } else { |
515 | status = do_recovery(aerdev, dev, info.severity); | 596 | status = do_recovery(aerdev, dev, info->severity); |
516 | if (status == PCI_ERS_RESULT_RECOVERED) { | 597 | if (status == PCI_ERS_RESULT_RECOVERED) { |
517 | dev_printk(KERN_DEBUG, &dev->dev, "AER driver " | 598 | dev_printk(KERN_DEBUG, &dev->dev, "AER driver " |
518 | "successfully recovered\n"); | 599 | "successfully recovered\n"); |
@@ -661,6 +742,28 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) | |||
661 | return AER_SUCCESS; | 742 | return AER_SUCCESS; |
662 | } | 743 | } |
663 | 744 | ||
745 | static inline void aer_process_err_devices(struct pcie_device *p_device, | ||
746 | struct aer_err_info *e_info) | ||
747 | { | ||
748 | int i; | ||
749 | |||
750 | if (!e_info->dev[0]) { | ||
751 | dev_printk(KERN_DEBUG, &p_device->port->dev, | ||
752 | "can't find device of ID%04x\n", | ||
753 | e_info->id); | ||
754 | } | ||
755 | |||
756 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { | ||
757 | if (get_device_error_info(e_info->dev[i], e_info) == | ||
758 | AER_SUCCESS) { | ||
759 | aer_print_error(e_info->dev[i], e_info); | ||
760 | handle_error_source(p_device, | ||
761 | e_info->dev[i], | ||
762 | e_info); | ||
763 | } | ||
764 | } | ||
765 | } | ||
766 | |||
664 | /** | 767 | /** |
665 | * aer_isr_one_error - consume an error detected by root port | 768 | * aer_isr_one_error - consume an error detected by root port |
666 | * @p_device: pointer to error root port service device | 769 | * @p_device: pointer to error root port service device |
@@ -669,10 +772,16 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) | |||
669 | static void aer_isr_one_error(struct pcie_device *p_device, | 772 | static void aer_isr_one_error(struct pcie_device *p_device, |
670 | struct aer_err_source *e_src) | 773 | struct aer_err_source *e_src) |
671 | { | 774 | { |
672 | struct device *s_device; | 775 | struct aer_err_info *e_info; |
673 | struct aer_err_info e_info = {0, 0, 0,}; | ||
674 | int i; | 776 | int i; |
675 | u16 id; | 777 | |
778 | /* struct aer_err_info might be big, so we allocate it with slab */ | ||
779 | e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL); | ||
780 | if (e_info == NULL) { | ||
781 | dev_printk(KERN_DEBUG, &p_device->port->dev, | ||
782 | "Can't allocate mem when processing AER errors\n"); | ||
783 | return; | ||
784 | } | ||
676 | 785 | ||
677 | /* | 786 | /* |
678 | * There is a possibility that both correctable error and | 787 | * There is a possibility that both correctable error and |
@@ -684,31 +793,26 @@ static void aer_isr_one_error(struct pcie_device *p_device, | |||
684 | if (!(e_src->status & i)) | 793 | if (!(e_src->status & i)) |
685 | continue; | 794 | continue; |
686 | 795 | ||
796 | memset(e_info, 0, sizeof(struct aer_err_info)); | ||
797 | |||
687 | /* Init comprehensive error information */ | 798 | /* Init comprehensive error information */ |
688 | if (i & PCI_ERR_ROOT_COR_RCV) { | 799 | if (i & PCI_ERR_ROOT_COR_RCV) { |
689 | id = ERR_COR_ID(e_src->id); | 800 | e_info->id = ERR_COR_ID(e_src->id); |
690 | e_info.severity = AER_CORRECTABLE; | 801 | e_info->severity = AER_CORRECTABLE; |
691 | } else { | 802 | } else { |
692 | id = ERR_UNCOR_ID(e_src->id); | 803 | e_info->id = ERR_UNCOR_ID(e_src->id); |
693 | e_info.severity = ((e_src->status >> 6) & 1); | 804 | e_info->severity = ((e_src->status >> 6) & 1); |
694 | } | 805 | } |
695 | if (e_src->status & | 806 | if (e_src->status & |
696 | (PCI_ERR_ROOT_MULTI_COR_RCV | | 807 | (PCI_ERR_ROOT_MULTI_COR_RCV | |
697 | PCI_ERR_ROOT_MULTI_UNCOR_RCV)) | 808 | PCI_ERR_ROOT_MULTI_UNCOR_RCV)) |
698 | e_info.flags |= AER_MULTI_ERROR_VALID_FLAG; | 809 | e_info->flags |= AER_MULTI_ERROR_VALID_FLAG; |
699 | if (!(s_device = find_source_device(p_device->port, id))) { | 810 | |
700 | printk(KERN_DEBUG "%s->can't find device of ID%04x\n", | 811 | find_source_device(p_device->port, e_info); |
701 | __func__, id); | 812 | aer_process_err_devices(p_device, e_info); |
702 | continue; | ||
703 | } | ||
704 | if (get_device_error_info(to_pci_dev(s_device), &e_info) == | ||
705 | AER_SUCCESS) { | ||
706 | aer_print_error(to_pci_dev(s_device), &e_info); | ||
707 | handle_error_source(p_device, | ||
708 | to_pci_dev(s_device), | ||
709 | e_info); | ||
710 | } | ||
711 | } | 813 | } |
814 | |||
815 | kfree(e_info); | ||
712 | } | 816 | } |
713 | 817 | ||
714 | /** | 818 | /** |
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c new file mode 100644 index 000000000000..ece97df4df6d --- /dev/null +++ b/drivers/pci/pcie/aer/ecrc.c | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Enables/disables PCIe ECRC checking. | ||
3 | * | ||
4 | * (C) Copyright 2009 Hewlett-Packard Development Company, L.P. | ||
5 | * Andrew Patterson <andrew.patterson@hp.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; version 2 of the License. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA | ||
19 | * 02111-1307, USA. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/pci_regs.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include "../../pci.h" | ||
30 | |||
31 | #define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */ | ||
32 | #define ECRC_POLICY_OFF 1 /* ECRC off for performance */ | ||
33 | #define ECRC_POLICY_ON 2 /* ECRC on for data integrity */ | ||
34 | |||
35 | static int ecrc_policy = ECRC_POLICY_DEFAULT; | ||
36 | |||
37 | static const char *ecrc_policy_str[] = { | ||
38 | [ECRC_POLICY_DEFAULT] = "bios", | ||
39 | [ECRC_POLICY_OFF] = "off", | ||
40 | [ECRC_POLICY_ON] = "on" | ||
41 | }; | ||
42 | |||
43 | /** | ||
44 | * enable_ercr_checking - enable PCIe ECRC checking for a device | ||
45 | * @dev: the PCI device | ||
46 | * | ||
47 | * Returns 0 on success, or negative on failure. | ||
48 | */ | ||
49 | static int enable_ecrc_checking(struct pci_dev *dev) | ||
50 | { | ||
51 | int pos; | ||
52 | u32 reg32; | ||
53 | |||
54 | if (!dev->is_pcie) | ||
55 | return -ENODEV; | ||
56 | |||
57 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
58 | if (!pos) | ||
59 | return -ENODEV; | ||
60 | |||
61 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | ||
62 | if (reg32 & PCI_ERR_CAP_ECRC_GENC) | ||
63 | reg32 |= PCI_ERR_CAP_ECRC_GENE; | ||
64 | if (reg32 & PCI_ERR_CAP_ECRC_CHKC) | ||
65 | reg32 |= PCI_ERR_CAP_ECRC_CHKE; | ||
66 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | /** | ||
72 | * disable_ercr_checking - disables PCIe ECRC checking for a device | ||
73 | * @dev: the PCI device | ||
74 | * | ||
75 | * Returns 0 on success, or negative on failure. | ||
76 | */ | ||
77 | static int disable_ecrc_checking(struct pci_dev *dev) | ||
78 | { | ||
79 | int pos; | ||
80 | u32 reg32; | ||
81 | |||
82 | if (!dev->is_pcie) | ||
83 | return -ENODEV; | ||
84 | |||
85 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
86 | if (!pos) | ||
87 | return -ENODEV; | ||
88 | |||
89 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | ||
90 | reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); | ||
91 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); | ||
92 | |||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy | ||
98 | * @dev: the PCI device | ||
99 | */ | ||
100 | void pcie_set_ecrc_checking(struct pci_dev *dev) | ||
101 | { | ||
102 | switch (ecrc_policy) { | ||
103 | case ECRC_POLICY_DEFAULT: | ||
104 | return; | ||
105 | case ECRC_POLICY_OFF: | ||
106 | disable_ecrc_checking(dev); | ||
107 | break; | ||
108 | case ECRC_POLICY_ON: | ||
109 | enable_ecrc_checking(dev);; | ||
110 | break; | ||
111 | default: | ||
112 | return; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * pcie_ecrc_get_policy - parse kernel command-line ecrc option | ||
118 | */ | ||
119 | void pcie_ecrc_get_policy(char *str) | ||
120 | { | ||
121 | int i; | ||
122 | |||
123 | for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++) | ||
124 | if (!strncmp(str, ecrc_policy_str[i], | ||
125 | strlen(ecrc_policy_str[i]))) | ||
126 | break; | ||
127 | if (i >= ARRAY_SIZE(ecrc_policy_str)) | ||
128 | return; | ||
129 | |||
130 | ecrc_policy = i; | ||
131 | } | ||
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index b0367f168af4..3d27c97e0486 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -26,40 +26,36 @@ | |||
26 | #endif | 26 | #endif |
27 | #define MODULE_PARAM_PREFIX "pcie_aspm." | 27 | #define MODULE_PARAM_PREFIX "pcie_aspm." |
28 | 28 | ||
29 | struct endpoint_state { | 29 | struct aspm_latency { |
30 | unsigned int l0s_acceptable_latency; | 30 | u32 l0s; /* L0s latency (nsec) */ |
31 | unsigned int l1_acceptable_latency; | 31 | u32 l1; /* L1 latency (nsec) */ |
32 | }; | 32 | }; |
33 | 33 | ||
34 | struct pcie_link_state { | 34 | struct pcie_link_state { |
35 | struct list_head sibiling; | 35 | struct pci_dev *pdev; /* Upstream component of the Link */ |
36 | struct pci_dev *pdev; | 36 | struct pcie_link_state *root; /* pointer to the root port link */ |
37 | bool downstream_has_switch; | 37 | struct pcie_link_state *parent; /* pointer to the parent Link state */ |
38 | 38 | struct list_head sibling; /* node in link_list */ | |
39 | struct pcie_link_state *parent; | 39 | struct list_head children; /* list of child link states */ |
40 | struct list_head children; | 40 | struct list_head link; /* node in parent's children list */ |
41 | struct list_head link; | ||
42 | 41 | ||
43 | /* ASPM state */ | 42 | /* ASPM state */ |
44 | unsigned int support_state; | 43 | u32 aspm_support:2; /* Supported ASPM state */ |
45 | unsigned int enabled_state; | 44 | u32 aspm_enabled:2; /* Enabled ASPM state */ |
46 | unsigned int bios_aspm_state; | 45 | u32 aspm_default:2; /* Default ASPM state by BIOS */ |
47 | /* upstream component */ | 46 | |
48 | unsigned int l0s_upper_latency; | 47 | /* Clock PM state */ |
49 | unsigned int l1_upper_latency; | 48 | u32 clkpm_capable:1; /* Clock PM capable? */ |
50 | /* downstream component */ | 49 | u32 clkpm_enabled:1; /* Current Clock PM state */ |
51 | unsigned int l0s_down_latency; | 50 | u32 clkpm_default:1; /* Default Clock PM state by BIOS */ |
52 | unsigned int l1_down_latency; | ||
53 | /* Clock PM state*/ | ||
54 | unsigned int clk_pm_capable; | ||
55 | unsigned int clk_pm_enabled; | ||
56 | unsigned int bios_clk_state; | ||
57 | 51 | ||
52 | /* Latencies */ | ||
53 | struct aspm_latency latency; /* Exit latency */ | ||
58 | /* | 54 | /* |
59 | * A pcie downstream port only has one slot under it, so at most there | 55 | * Endpoint acceptable latencies. A pcie downstream port only |
60 | * are 8 functions | 56 | * has one slot under it, so at most there are 8 functions. |
61 | */ | 57 | */ |
62 | struct endpoint_state endpoints[8]; | 58 | struct aspm_latency acceptable[8]; |
63 | }; | 59 | }; |
64 | 60 | ||
65 | static int aspm_disabled, aspm_force; | 61 | static int aspm_disabled, aspm_force; |
@@ -78,27 +74,23 @@ static const char *policy_str[] = { | |||
78 | 74 | ||
79 | #define LINK_RETRAIN_TIMEOUT HZ | 75 | #define LINK_RETRAIN_TIMEOUT HZ |
80 | 76 | ||
81 | static int policy_to_aspm_state(struct pci_dev *pdev) | 77 | static int policy_to_aspm_state(struct pcie_link_state *link) |
82 | { | 78 | { |
83 | struct pcie_link_state *link_state = pdev->link_state; | ||
84 | |||
85 | switch (aspm_policy) { | 79 | switch (aspm_policy) { |
86 | case POLICY_PERFORMANCE: | 80 | case POLICY_PERFORMANCE: |
87 | /* Disable ASPM and Clock PM */ | 81 | /* Disable ASPM and Clock PM */ |
88 | return 0; | 82 | return 0; |
89 | case POLICY_POWERSAVE: | 83 | case POLICY_POWERSAVE: |
90 | /* Enable ASPM L0s/L1 */ | 84 | /* Enable ASPM L0s/L1 */ |
91 | return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; | 85 | return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; |
92 | case POLICY_DEFAULT: | 86 | case POLICY_DEFAULT: |
93 | return link_state->bios_aspm_state; | 87 | return link->aspm_default; |
94 | } | 88 | } |
95 | return 0; | 89 | return 0; |
96 | } | 90 | } |
97 | 91 | ||
98 | static int policy_to_clkpm_state(struct pci_dev *pdev) | 92 | static int policy_to_clkpm_state(struct pcie_link_state *link) |
99 | { | 93 | { |
100 | struct pcie_link_state *link_state = pdev->link_state; | ||
101 | |||
102 | switch (aspm_policy) { | 94 | switch (aspm_policy) { |
103 | case POLICY_PERFORMANCE: | 95 | case POLICY_PERFORMANCE: |
104 | /* Disable ASPM and Clock PM */ | 96 | /* Disable ASPM and Clock PM */ |
@@ -107,73 +99,78 @@ static int policy_to_clkpm_state(struct pci_dev *pdev) | |||
107 | /* Disable Clock PM */ | 99 | /* Disable Clock PM */ |
108 | return 1; | 100 | return 1; |
109 | case POLICY_DEFAULT: | 101 | case POLICY_DEFAULT: |
110 | return link_state->bios_clk_state; | 102 | return link->clkpm_default; |
111 | } | 103 | } |
112 | return 0; | 104 | return 0; |
113 | } | 105 | } |
114 | 106 | ||
115 | static void pcie_set_clock_pm(struct pci_dev *pdev, int enable) | 107 | static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) |
116 | { | 108 | { |
117 | struct pci_dev *child_dev; | ||
118 | int pos; | 109 | int pos; |
119 | u16 reg16; | 110 | u16 reg16; |
120 | struct pcie_link_state *link_state = pdev->link_state; | 111 | struct pci_dev *child; |
112 | struct pci_bus *linkbus = link->pdev->subordinate; | ||
121 | 113 | ||
122 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | 114 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
123 | pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); | 115 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); |
124 | if (!pos) | 116 | if (!pos) |
125 | return; | 117 | return; |
126 | pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, ®16); | 118 | pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16); |
127 | if (enable) | 119 | if (enable) |
128 | reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN; | 120 | reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN; |
129 | else | 121 | else |
130 | reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN; | 122 | reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN; |
131 | pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16); | 123 | pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16); |
132 | } | 124 | } |
133 | link_state->clk_pm_enabled = !!enable; | 125 | link->clkpm_enabled = !!enable; |
134 | } | 126 | } |
135 | 127 | ||
136 | static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist) | 128 | static void pcie_set_clkpm(struct pcie_link_state *link, int enable) |
137 | { | 129 | { |
138 | int pos; | 130 | /* Don't enable Clock PM if the link is not Clock PM capable */ |
131 | if (!link->clkpm_capable && enable) | ||
132 | return; | ||
133 | /* Need nothing if the specified equals to current state */ | ||
134 | if (link->clkpm_enabled == enable) | ||
135 | return; | ||
136 | pcie_set_clkpm_nocheck(link, enable); | ||
137 | } | ||
138 | |||
139 | static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) | ||
140 | { | ||
141 | int pos, capable = 1, enabled = 1; | ||
139 | u32 reg32; | 142 | u32 reg32; |
140 | u16 reg16; | 143 | u16 reg16; |
141 | int capable = 1, enabled = 1; | 144 | struct pci_dev *child; |
142 | struct pci_dev *child_dev; | 145 | struct pci_bus *linkbus = link->pdev->subordinate; |
143 | struct pcie_link_state *link_state = pdev->link_state; | ||
144 | 146 | ||
145 | /* All functions should have the same cap and state, take the worst */ | 147 | /* All functions should have the same cap and state, take the worst */ |
146 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | 148 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
147 | pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); | 149 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); |
148 | if (!pos) | 150 | if (!pos) |
149 | return; | 151 | return; |
150 | pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, ®32); | 152 | pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, ®32); |
151 | if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { | 153 | if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { |
152 | capable = 0; | 154 | capable = 0; |
153 | enabled = 0; | 155 | enabled = 0; |
154 | break; | 156 | break; |
155 | } | 157 | } |
156 | pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, ®16); | 158 | pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16); |
157 | if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) | 159 | if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) |
158 | enabled = 0; | 160 | enabled = 0; |
159 | } | 161 | } |
160 | link_state->clk_pm_enabled = enabled; | 162 | link->clkpm_enabled = enabled; |
161 | link_state->bios_clk_state = enabled; | 163 | link->clkpm_default = enabled; |
162 | if (!blacklist) { | 164 | link->clkpm_capable = (blacklist) ? 0 : capable; |
163 | link_state->clk_pm_capable = capable; | ||
164 | pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev)); | ||
165 | } else { | ||
166 | link_state->clk_pm_capable = 0; | ||
167 | pcie_set_clock_pm(pdev, 0); | ||
168 | } | ||
169 | } | 165 | } |
170 | 166 | ||
171 | static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev) | 167 | static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link) |
172 | { | 168 | { |
173 | struct pci_dev *child_dev; | 169 | struct pci_dev *child; |
170 | struct pci_bus *linkbus = link->pdev->subordinate; | ||
174 | 171 | ||
175 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | 172 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
176 | if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) | 173 | if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM) |
177 | return true; | 174 | return true; |
178 | } | 175 | } |
179 | return false; | 176 | return false; |
@@ -184,289 +181,263 @@ static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev) | |||
184 | * could use common clock. If they are, configure them to use the | 181 | * could use common clock. If they are, configure them to use the |
185 | * common clock. That will reduce the ASPM state exit latency. | 182 | * common clock. That will reduce the ASPM state exit latency. |
186 | */ | 183 | */ |
187 | static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) | 184 | static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) |
188 | { | 185 | { |
189 | int pos, child_pos, i = 0; | 186 | int ppos, cpos, same_clock = 1; |
190 | u16 reg16 = 0; | 187 | u16 reg16, parent_reg, child_reg[8]; |
191 | struct pci_dev *child_dev; | ||
192 | int same_clock = 1; | ||
193 | unsigned long start_jiffies; | 188 | unsigned long start_jiffies; |
194 | u16 child_regs[8], parent_reg; | 189 | struct pci_dev *child, *parent = link->pdev; |
190 | struct pci_bus *linkbus = parent->subordinate; | ||
195 | /* | 191 | /* |
196 | * all functions of a slot should have the same Slot Clock | 192 | * All functions of a slot should have the same Slot Clock |
197 | * Configuration, so just check one function | 193 | * Configuration, so just check one function |
198 | * */ | 194 | */ |
199 | child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev, | 195 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); |
200 | bus_list); | 196 | BUG_ON(!child->is_pcie); |
201 | BUG_ON(!child_dev->is_pcie); | ||
202 | 197 | ||
203 | /* Check downstream component if bit Slot Clock Configuration is 1 */ | 198 | /* Check downstream component if bit Slot Clock Configuration is 1 */ |
204 | child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); | 199 | cpos = pci_find_capability(child, PCI_CAP_ID_EXP); |
205 | pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, ®16); | 200 | pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, ®16); |
206 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) | 201 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) |
207 | same_clock = 0; | 202 | same_clock = 0; |
208 | 203 | ||
209 | /* Check upstream component if bit Slot Clock Configuration is 1 */ | 204 | /* Check upstream component if bit Slot Clock Configuration is 1 */ |
210 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 205 | ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); |
211 | pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16); | 206 | pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16); |
212 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) | 207 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) |
213 | same_clock = 0; | 208 | same_clock = 0; |
214 | 209 | ||
215 | /* Configure downstream component, all functions */ | 210 | /* Configure downstream component, all functions */ |
216 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | 211 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
217 | child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); | 212 | cpos = pci_find_capability(child, PCI_CAP_ID_EXP); |
218 | pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, | 213 | pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, ®16); |
219 | ®16); | 214 | child_reg[PCI_FUNC(child->devfn)] = reg16; |
220 | child_regs[i] = reg16; | ||
221 | if (same_clock) | 215 | if (same_clock) |
222 | reg16 |= PCI_EXP_LNKCTL_CCC; | 216 | reg16 |= PCI_EXP_LNKCTL_CCC; |
223 | else | 217 | else |
224 | reg16 &= ~PCI_EXP_LNKCTL_CCC; | 218 | reg16 &= ~PCI_EXP_LNKCTL_CCC; |
225 | pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, | 219 | pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16); |
226 | reg16); | ||
227 | i++; | ||
228 | } | 220 | } |
229 | 221 | ||
230 | /* Configure upstream component */ | 222 | /* Configure upstream component */ |
231 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | 223 | pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, ®16); |
232 | parent_reg = reg16; | 224 | parent_reg = reg16; |
233 | if (same_clock) | 225 | if (same_clock) |
234 | reg16 |= PCI_EXP_LNKCTL_CCC; | 226 | reg16 |= PCI_EXP_LNKCTL_CCC; |
235 | else | 227 | else |
236 | reg16 &= ~PCI_EXP_LNKCTL_CCC; | 228 | reg16 &= ~PCI_EXP_LNKCTL_CCC; |
237 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); | 229 | pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); |
238 | 230 | ||
239 | /* retrain link */ | 231 | /* Retrain link */ |
240 | reg16 |= PCI_EXP_LNKCTL_RL; | 232 | reg16 |= PCI_EXP_LNKCTL_RL; |
241 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); | 233 | pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); |
242 | 234 | ||
243 | /* Wait for link training end */ | 235 | /* Wait for link training end. Break out after waiting for timeout */ |
244 | /* break out after waiting for timeout */ | ||
245 | start_jiffies = jiffies; | 236 | start_jiffies = jiffies; |
246 | for (;;) { | 237 | for (;;) { |
247 | pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16); | 238 | pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16); |
248 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) | 239 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) |
249 | break; | 240 | break; |
250 | if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) | 241 | if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) |
251 | break; | 242 | break; |
252 | msleep(1); | 243 | msleep(1); |
253 | } | 244 | } |
254 | /* training failed -> recover */ | 245 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) |
255 | if (reg16 & PCI_EXP_LNKSTA_LT) { | 246 | return; |
256 | dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure" | 247 | |
257 | " common clock\n"); | 248 | /* Training failed. Restore common clock configurations */ |
258 | i = 0; | 249 | dev_printk(KERN_ERR, &parent->dev, |
259 | list_for_each_entry(child_dev, &pdev->subordinate->devices, | 250 | "ASPM: Could not configure common clock\n"); |
260 | bus_list) { | 251 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
261 | child_pos = pci_find_capability(child_dev, | 252 | cpos = pci_find_capability(child, PCI_CAP_ID_EXP); |
262 | PCI_CAP_ID_EXP); | 253 | pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, |
263 | pci_write_config_word(child_dev, | 254 | child_reg[PCI_FUNC(child->devfn)]); |
264 | child_pos + PCI_EXP_LNKCTL, | ||
265 | child_regs[i]); | ||
266 | i++; | ||
267 | } | ||
268 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg); | ||
269 | } | 255 | } |
256 | pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg); | ||
270 | } | 257 | } |
271 | 258 | ||
272 | /* | 259 | /* Convert L0s latency encoding to ns */ |
273 | * calc_L0S_latency: Convert L0s latency encoding to ns | 260 | static u32 calc_l0s_latency(u32 encoding) |
274 | */ | ||
275 | static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac) | ||
276 | { | 261 | { |
277 | unsigned int ns = 64; | 262 | if (encoding == 0x7) |
263 | return (5 * 1000); /* > 4us */ | ||
264 | return (64 << encoding); | ||
265 | } | ||
278 | 266 | ||
279 | if (latency_encoding == 0x7) { | 267 | /* Convert L0s acceptable latency encoding to ns */ |
280 | if (ac) | 268 | static u32 calc_l0s_acceptable(u32 encoding) |
281 | ns = -1U; | 269 | { |
282 | else | 270 | if (encoding == 0x7) |
283 | ns = 5*1000; /* > 4us */ | 271 | return -1U; |
284 | } else | 272 | return (64 << encoding); |
285 | ns *= (1 << latency_encoding); | ||
286 | return ns; | ||
287 | } | 273 | } |
288 | 274 | ||
289 | /* | 275 | /* Convert L1 latency encoding to ns */ |
290 | * calc_L1_latency: Convert L1 latency encoding to ns | 276 | static u32 calc_l1_latency(u32 encoding) |
291 | */ | ||
292 | static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac) | ||
293 | { | 277 | { |
294 | unsigned int ns = 1000; | 278 | if (encoding == 0x7) |
279 | return (65 * 1000); /* > 64us */ | ||
280 | return (1000 << encoding); | ||
281 | } | ||
295 | 282 | ||
296 | if (latency_encoding == 0x7) { | 283 | /* Convert L1 acceptable latency encoding to ns */ |
297 | if (ac) | 284 | static u32 calc_l1_acceptable(u32 encoding) |
298 | ns = -1U; | 285 | { |
299 | else | 286 | if (encoding == 0x7) |
300 | ns = 65*1000; /* > 64us */ | 287 | return -1U; |
301 | } else | 288 | return (1000 << encoding); |
302 | ns *= (1 << latency_encoding); | ||
303 | return ns; | ||
304 | } | 289 | } |
305 | 290 | ||
306 | static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state, | 291 | static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state, |
307 | unsigned int *l0s, unsigned int *l1, unsigned int *enabled) | 292 | u32 *l0s, u32 *l1, u32 *enabled) |
308 | { | 293 | { |
309 | int pos; | 294 | int pos; |
310 | u16 reg16; | 295 | u16 reg16; |
311 | u32 reg32; | 296 | u32 reg32, encoding; |
312 | unsigned int latency; | ||
313 | 297 | ||
298 | *l0s = *l1 = *enabled = 0; | ||
314 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 299 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
315 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); | 300 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); |
316 | *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; | 301 | *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; |
317 | if (*state != PCIE_LINK_STATE_L0S && | 302 | if (*state != PCIE_LINK_STATE_L0S && |
318 | *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S)) | 303 | *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S)) |
319 | *state = 0; | 304 | *state = 0; |
320 | if (*state == 0) | 305 | if (*state == 0) |
321 | return; | 306 | return; |
322 | 307 | ||
323 | latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; | 308 | encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; |
324 | *l0s = calc_L0S_latency(latency, 0); | 309 | *l0s = calc_l0s_latency(encoding); |
325 | if (*state & PCIE_LINK_STATE_L1) { | 310 | if (*state & PCIE_LINK_STATE_L1) { |
326 | latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; | 311 | encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; |
327 | *l1 = calc_L1_latency(latency, 0); | 312 | *l1 = calc_l1_latency(encoding); |
328 | } | 313 | } |
329 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | 314 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); |
330 | *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1); | 315 | *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); |
331 | } | 316 | } |
332 | 317 | ||
333 | static void pcie_aspm_cap_init(struct pci_dev *pdev) | 318 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) |
334 | { | 319 | { |
335 | struct pci_dev *child_dev; | 320 | u32 support, l0s, l1, enabled; |
336 | u32 state, tmp; | 321 | struct pci_dev *child, *parent = link->pdev; |
337 | struct pcie_link_state *link_state = pdev->link_state; | 322 | struct pci_bus *linkbus = parent->subordinate; |
323 | |||
324 | if (blacklist) { | ||
325 | /* Set support state to 0, so we will disable ASPM later */ | ||
326 | link->aspm_support = 0; | ||
327 | link->aspm_default = 0; | ||
328 | link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; | ||
329 | return; | ||
330 | } | ||
331 | |||
332 | /* Configure common clock before checking latencies */ | ||
333 | pcie_aspm_configure_common_clock(link); | ||
338 | 334 | ||
339 | /* upstream component states */ | 335 | /* upstream component states */ |
340 | pcie_aspm_get_cap_device(pdev, &link_state->support_state, | 336 | pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled); |
341 | &link_state->l0s_upper_latency, | 337 | link->aspm_support = support; |
342 | &link_state->l1_upper_latency, | 338 | link->latency.l0s = l0s; |
343 | &link_state->enabled_state); | 339 | link->latency.l1 = l1; |
340 | link->aspm_enabled = enabled; | ||
341 | |||
344 | /* downstream component states, all functions have the same setting */ | 342 | /* downstream component states, all functions have the same setting */ |
345 | child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev, | 343 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); |
346 | bus_list); | 344 | pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled); |
347 | pcie_aspm_get_cap_device(child_dev, &state, | 345 | link->aspm_support &= support; |
348 | &link_state->l0s_down_latency, | 346 | link->latency.l0s = max_t(u32, link->latency.l0s, l0s); |
349 | &link_state->l1_down_latency, | 347 | link->latency.l1 = max_t(u32, link->latency.l1, l1); |
350 | &tmp); | 348 | |
351 | link_state->support_state &= state; | 349 | if (!link->aspm_support) |
352 | if (!link_state->support_state) | ||
353 | return; | 350 | return; |
354 | link_state->enabled_state &= link_state->support_state; | 351 | |
355 | link_state->bios_aspm_state = link_state->enabled_state; | 352 | link->aspm_enabled &= link->aspm_support; |
353 | link->aspm_default = link->aspm_enabled; | ||
356 | 354 | ||
357 | /* ENDPOINT states*/ | 355 | /* ENDPOINT states*/ |
358 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | 356 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
359 | int pos; | 357 | int pos; |
360 | u32 reg32; | 358 | u32 reg32, encoding; |
361 | unsigned int latency; | 359 | struct aspm_latency *acceptable = |
362 | struct endpoint_state *ep_state = | 360 | &link->acceptable[PCI_FUNC(child->devfn)]; |
363 | &link_state->endpoints[PCI_FUNC(child_dev->devfn)]; | ||
364 | 361 | ||
365 | if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT && | 362 | if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT && |
366 | child_dev->pcie_type != PCI_EXP_TYPE_LEG_END) | 363 | child->pcie_type != PCI_EXP_TYPE_LEG_END) |
367 | continue; | 364 | continue; |
368 | 365 | ||
369 | pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); | 366 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); |
370 | pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, ®32); | 367 | pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); |
371 | latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; | 368 | encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; |
372 | latency = calc_L0S_latency(latency, 1); | 369 | acceptable->l0s = calc_l0s_acceptable(encoding); |
373 | ep_state->l0s_acceptable_latency = latency; | 370 | if (link->aspm_support & PCIE_LINK_STATE_L1) { |
374 | if (link_state->support_state & PCIE_LINK_STATE_L1) { | 371 | encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; |
375 | latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; | 372 | acceptable->l1 = calc_l1_acceptable(encoding); |
376 | latency = calc_L1_latency(latency, 1); | ||
377 | ep_state->l1_acceptable_latency = latency; | ||
378 | } | 373 | } |
379 | } | 374 | } |
380 | } | 375 | } |
381 | 376 | ||
382 | static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev, | 377 | /** |
383 | unsigned int state) | 378 | * __pcie_aspm_check_state_one - check latency for endpoint device. |
384 | { | 379 | * @endpoint: pointer to the struct pci_dev of endpoint device |
385 | struct pci_dev *parent_dev, *tmp_dev; | 380 | * |
386 | unsigned int latency, l1_latency = 0; | 381 | * TBD: The latency from the endpoint to root complex vary per switch's |
387 | struct pcie_link_state *link_state; | 382 | * upstream link state above the device. Here we just do a simple check |
388 | struct endpoint_state *ep_state; | 383 | * which assumes all links above the device can be in L1 state, that |
389 | 384 | * is we just consider the worst case. If switch's upstream link can't | |
390 | parent_dev = pdev->bus->self; | 385 | * be put into L0S/L1, then our check is too strictly. |
391 | link_state = parent_dev->link_state; | 386 | */ |
392 | state &= link_state->support_state; | 387 | static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state) |
393 | if (state == 0) | 388 | { |
394 | return 0; | 389 | u32 l1_switch_latency = 0; |
395 | ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)]; | 390 | struct aspm_latency *acceptable; |
396 | 391 | struct pcie_link_state *link; | |
397 | /* | 392 | |
398 | * Check latency for endpoint device. | 393 | link = endpoint->bus->self->link_state; |
399 | * TBD: The latency from the endpoint to root complex vary per | 394 | state &= link->aspm_support; |
400 | * switch's upstream link state above the device. Here we just do a | 395 | acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; |
401 | * simple check which assumes all links above the device can be in L1 | 396 | |
402 | * state, that is we just consider the worst case. If switch's upstream | 397 | while (link && state) { |
403 | * link can't be put into L0S/L1, then our check is too strictly. | 398 | if ((state & PCIE_LINK_STATE_L0S) && |
404 | */ | 399 | (link->latency.l0s > acceptable->l0s)) |
405 | tmp_dev = pdev; | 400 | state &= ~PCIE_LINK_STATE_L0S; |
406 | while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { | 401 | if ((state & PCIE_LINK_STATE_L1) && |
407 | parent_dev = tmp_dev->bus->self; | 402 | (link->latency.l1 + l1_switch_latency > acceptable->l1)) |
408 | link_state = parent_dev->link_state; | 403 | state &= ~PCIE_LINK_STATE_L1; |
409 | if (state & PCIE_LINK_STATE_L0S) { | 404 | link = link->parent; |
410 | latency = max_t(unsigned int, | 405 | /* |
411 | link_state->l0s_upper_latency, | 406 | * Every switch on the path to root complex need 1 |
412 | link_state->l0s_down_latency); | 407 | * more microsecond for L1. Spec doesn't mention L0s. |
413 | if (latency > ep_state->l0s_acceptable_latency) | 408 | */ |
414 | state &= ~PCIE_LINK_STATE_L0S; | 409 | l1_switch_latency += 1000; |
415 | } | ||
416 | if (state & PCIE_LINK_STATE_L1) { | ||
417 | latency = max_t(unsigned int, | ||
418 | link_state->l1_upper_latency, | ||
419 | link_state->l1_down_latency); | ||
420 | if (latency + l1_latency > | ||
421 | ep_state->l1_acceptable_latency) | ||
422 | state &= ~PCIE_LINK_STATE_L1; | ||
423 | } | ||
424 | if (!parent_dev->bus->self) /* parent_dev is a root port */ | ||
425 | break; | ||
426 | else { | ||
427 | /* | ||
428 | * parent_dev is the downstream port of a switch, make | ||
429 | * tmp_dev the upstream port of the switch | ||
430 | */ | ||
431 | tmp_dev = parent_dev->bus->self; | ||
432 | /* | ||
433 | * every switch on the path to root complex need 1 more | ||
434 | * microsecond for L1. Spec doesn't mention L0S. | ||
435 | */ | ||
436 | if (state & PCIE_LINK_STATE_L1) | ||
437 | l1_latency += 1000; | ||
438 | } | ||
439 | } | 410 | } |
440 | return state; | 411 | return state; |
441 | } | 412 | } |
442 | 413 | ||
443 | static unsigned int pcie_aspm_check_state(struct pci_dev *pdev, | 414 | static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state) |
444 | unsigned int state) | ||
445 | { | 415 | { |
446 | struct pci_dev *child_dev; | 416 | pci_power_t power_state; |
417 | struct pci_dev *child; | ||
418 | struct pci_bus *linkbus = link->pdev->subordinate; | ||
447 | 419 | ||
448 | /* If no child, ignore the link */ | 420 | /* If no child, ignore the link */ |
449 | if (list_empty(&pdev->subordinate->devices)) | 421 | if (list_empty(&linkbus->devices)) |
450 | return state; | 422 | return state; |
451 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | 423 | |
452 | if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { | 424 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
453 | /* | 425 | /* |
454 | * If downstream component of a link is pci bridge, we | 426 | * If downstream component of a link is pci bridge, we |
455 | * disable ASPM for now for the link | 427 | * disable ASPM for now for the link |
456 | * */ | 428 | */ |
457 | state = 0; | 429 | if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) |
458 | break; | 430 | return 0; |
459 | } | 431 | |
460 | if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT && | 432 | if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT && |
461 | child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)) | 433 | child->pcie_type != PCI_EXP_TYPE_LEG_END)) |
462 | continue; | 434 | continue; |
463 | /* Device not in D0 doesn't need check latency */ | 435 | /* Device not in D0 doesn't need check latency */ |
464 | if (child_dev->current_state == PCI_D1 || | 436 | power_state = child->current_state; |
465 | child_dev->current_state == PCI_D2 || | 437 | if (power_state == PCI_D1 || power_state == PCI_D2 || |
466 | child_dev->current_state == PCI_D3hot || | 438 | power_state == PCI_D3hot || power_state == PCI_D3cold) |
467 | child_dev->current_state == PCI_D3cold) | ||
468 | continue; | 439 | continue; |
469 | state = __pcie_aspm_check_state_one(child_dev, state); | 440 | state = __pcie_aspm_check_state_one(child, state); |
470 | } | 441 | } |
471 | return state; | 442 | return state; |
472 | } | 443 | } |
@@ -482,90 +453,71 @@ static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state) | |||
482 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); | 453 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); |
483 | } | 454 | } |
484 | 455 | ||
485 | static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state) | 456 | static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state) |
486 | { | 457 | { |
487 | struct pci_dev *child_dev; | 458 | struct pci_dev *child, *parent = link->pdev; |
488 | int valid = 1; | 459 | struct pci_bus *linkbus = parent->subordinate; |
489 | struct pcie_link_state *link_state = pdev->link_state; | ||
490 | 460 | ||
491 | /* If no child, disable the link */ | 461 | /* If no child, disable the link */ |
492 | if (list_empty(&pdev->subordinate->devices)) | 462 | if (list_empty(&linkbus->devices)) |
493 | state = 0; | 463 | state = 0; |
494 | /* | 464 | /* |
495 | * if the downstream component has pci bridge function, don't do ASPM | 465 | * If the downstream component has pci bridge function, don't |
496 | * now | 466 | * do ASPM now. |
497 | */ | 467 | */ |
498 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | 468 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
499 | if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { | 469 | if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) |
500 | valid = 0; | 470 | return; |
501 | break; | ||
502 | } | ||
503 | } | 471 | } |
504 | if (!valid) | ||
505 | return; | ||
506 | |||
507 | /* | 472 | /* |
508 | * spec 2.0 suggests all functions should be configured the same | 473 | * Spec 2.0 suggests all functions should be configured the |
509 | * setting for ASPM. Enabling ASPM L1 should be done in upstream | 474 | * same setting for ASPM. Enabling ASPM L1 should be done in |
510 | * component first and then downstream, and vice versa for disabling | 475 | * upstream component first and then downstream, and vice |
511 | * ASPM L1. Spec doesn't mention L0S. | 476 | * versa for disabling ASPM L1. Spec doesn't mention L0S. |
512 | */ | 477 | */ |
513 | if (state & PCIE_LINK_STATE_L1) | 478 | if (state & PCIE_LINK_STATE_L1) |
514 | __pcie_aspm_config_one_dev(pdev, state); | 479 | __pcie_aspm_config_one_dev(parent, state); |
515 | 480 | ||
516 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) | 481 | list_for_each_entry(child, &linkbus->devices, bus_list) |
517 | __pcie_aspm_config_one_dev(child_dev, state); | 482 | __pcie_aspm_config_one_dev(child, state); |
518 | 483 | ||
519 | if (!(state & PCIE_LINK_STATE_L1)) | 484 | if (!(state & PCIE_LINK_STATE_L1)) |
520 | __pcie_aspm_config_one_dev(pdev, state); | 485 | __pcie_aspm_config_one_dev(parent, state); |
521 | 486 | ||
522 | link_state->enabled_state = state; | 487 | link->aspm_enabled = state; |
523 | } | 488 | } |
524 | 489 | ||
525 | static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link) | 490 | /* Check the whole hierarchy, and configure each link in the hierarchy */ |
491 | static void __pcie_aspm_configure_link_state(struct pcie_link_state *link, | ||
492 | u32 state) | ||
526 | { | 493 | { |
527 | struct pcie_link_state *root_port_link = link; | 494 | struct pcie_link_state *leaf, *root = link->root; |
528 | while (root_port_link->parent) | ||
529 | root_port_link = root_port_link->parent; | ||
530 | return root_port_link; | ||
531 | } | ||
532 | 495 | ||
533 | /* check the whole hierarchy, and configure each link in the hierarchy */ | 496 | state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); |
534 | static void __pcie_aspm_configure_link_state(struct pci_dev *pdev, | ||
535 | unsigned int state) | ||
536 | { | ||
537 | struct pcie_link_state *link_state = pdev->link_state; | ||
538 | struct pcie_link_state *root_port_link = get_root_port_link(link_state); | ||
539 | struct pcie_link_state *leaf; | ||
540 | 497 | ||
541 | state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; | 498 | /* Check all links who have specific root port link */ |
542 | 499 | list_for_each_entry(leaf, &link_list, sibling) { | |
543 | /* check all links who have specific root port link */ | 500 | if (!list_empty(&leaf->children) || (leaf->root != root)) |
544 | list_for_each_entry(leaf, &link_list, sibiling) { | ||
545 | if (!list_empty(&leaf->children) || | ||
546 | get_root_port_link(leaf) != root_port_link) | ||
547 | continue; | 501 | continue; |
548 | state = pcie_aspm_check_state(leaf->pdev, state); | 502 | state = pcie_aspm_check_state(leaf, state); |
549 | } | 503 | } |
550 | /* check root port link too in case it hasn't children */ | 504 | /* Check root port link too in case it hasn't children */ |
551 | state = pcie_aspm_check_state(root_port_link->pdev, state); | 505 | state = pcie_aspm_check_state(root, state); |
552 | 506 | if (link->aspm_enabled == state) | |
553 | if (link_state->enabled_state == state) | ||
554 | return; | 507 | return; |
555 | |||
556 | /* | 508 | /* |
557 | * we must change the hierarchy. See comments in | 509 | * We must change the hierarchy. See comments in |
558 | * __pcie_aspm_config_link for the order | 510 | * __pcie_aspm_config_link for the order |
559 | **/ | 511 | **/ |
560 | if (state & PCIE_LINK_STATE_L1) { | 512 | if (state & PCIE_LINK_STATE_L1) { |
561 | list_for_each_entry(leaf, &link_list, sibiling) { | 513 | list_for_each_entry(leaf, &link_list, sibling) { |
562 | if (get_root_port_link(leaf) == root_port_link) | 514 | if (leaf->root == root) |
563 | __pcie_aspm_config_link(leaf->pdev, state); | 515 | __pcie_aspm_config_link(leaf, state); |
564 | } | 516 | } |
565 | } else { | 517 | } else { |
566 | list_for_each_entry_reverse(leaf, &link_list, sibiling) { | 518 | list_for_each_entry_reverse(leaf, &link_list, sibling) { |
567 | if (get_root_port_link(leaf) == root_port_link) | 519 | if (leaf->root == root) |
568 | __pcie_aspm_config_link(leaf->pdev, state); | 520 | __pcie_aspm_config_link(leaf, state); |
569 | } | 521 | } |
570 | } | 522 | } |
571 | } | 523 | } |
@@ -574,45 +526,42 @@ static void __pcie_aspm_configure_link_state(struct pci_dev *pdev, | |||
574 | * pcie_aspm_configure_link_state: enable/disable PCI express link state | 526 | * pcie_aspm_configure_link_state: enable/disable PCI express link state |
575 | * @pdev: the root port or switch downstream port | 527 | * @pdev: the root port or switch downstream port |
576 | */ | 528 | */ |
577 | static void pcie_aspm_configure_link_state(struct pci_dev *pdev, | 529 | static void pcie_aspm_configure_link_state(struct pcie_link_state *link, |
578 | unsigned int state) | 530 | u32 state) |
579 | { | 531 | { |
580 | down_read(&pci_bus_sem); | 532 | down_read(&pci_bus_sem); |
581 | mutex_lock(&aspm_lock); | 533 | mutex_lock(&aspm_lock); |
582 | __pcie_aspm_configure_link_state(pdev, state); | 534 | __pcie_aspm_configure_link_state(link, state); |
583 | mutex_unlock(&aspm_lock); | 535 | mutex_unlock(&aspm_lock); |
584 | up_read(&pci_bus_sem); | 536 | up_read(&pci_bus_sem); |
585 | } | 537 | } |
586 | 538 | ||
587 | static void free_link_state(struct pci_dev *pdev) | 539 | static void free_link_state(struct pcie_link_state *link) |
588 | { | 540 | { |
589 | kfree(pdev->link_state); | 541 | link->pdev->link_state = NULL; |
590 | pdev->link_state = NULL; | 542 | kfree(link); |
591 | } | 543 | } |
592 | 544 | ||
593 | static int pcie_aspm_sanity_check(struct pci_dev *pdev) | 545 | static int pcie_aspm_sanity_check(struct pci_dev *pdev) |
594 | { | 546 | { |
595 | struct pci_dev *child_dev; | 547 | struct pci_dev *child; |
596 | int child_pos; | 548 | int pos; |
597 | u32 reg32; | 549 | u32 reg32; |
598 | |||
599 | /* | 550 | /* |
600 | * Some functions in a slot might not all be PCIE functions, very | 551 | * Some functions in a slot might not all be PCIE functions, |
601 | * strange. Disable ASPM for the whole slot | 552 | * very strange. Disable ASPM for the whole slot |
602 | */ | 553 | */ |
603 | list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { | 554 | list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { |
604 | child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); | 555 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); |
605 | if (!child_pos) | 556 | if (!pos) |
606 | return -EINVAL; | 557 | return -EINVAL; |
607 | |||
608 | /* | 558 | /* |
609 | * Disable ASPM for pre-1.1 PCIe device, we follow MS to use | 559 | * Disable ASPM for pre-1.1 PCIe device, we follow MS to use |
610 | * RBER bit to determine if a function is 1.1 version device | 560 | * RBER bit to determine if a function is 1.1 version device |
611 | */ | 561 | */ |
612 | pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, | 562 | pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); |
613 | ®32); | ||
614 | if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { | 563 | if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { |
615 | dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM" | 564 | dev_printk(KERN_INFO, &child->dev, "disabling ASPM" |
616 | " on pre-1.1 PCIe device. You can enable it" | 565 | " on pre-1.1 PCIe device. You can enable it" |
617 | " with 'pcie_aspm=force'\n"); | 566 | " with 'pcie_aspm=force'\n"); |
618 | return -EINVAL; | 567 | return -EINVAL; |
@@ -621,6 +570,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
621 | return 0; | 570 | return 0; |
622 | } | 571 | } |
623 | 572 | ||
573 | static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev) | ||
574 | { | ||
575 | struct pcie_link_state *link; | ||
576 | int blacklist = !!pcie_aspm_sanity_check(pdev); | ||
577 | |||
578 | link = kzalloc(sizeof(*link), GFP_KERNEL); | ||
579 | if (!link) | ||
580 | return NULL; | ||
581 | INIT_LIST_HEAD(&link->sibling); | ||
582 | INIT_LIST_HEAD(&link->children); | ||
583 | INIT_LIST_HEAD(&link->link); | ||
584 | link->pdev = pdev; | ||
585 | if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { | ||
586 | struct pcie_link_state *parent; | ||
587 | parent = pdev->bus->parent->self->link_state; | ||
588 | if (!parent) { | ||
589 | kfree(link); | ||
590 | return NULL; | ||
591 | } | ||
592 | link->parent = parent; | ||
593 | list_add(&link->link, &parent->children); | ||
594 | } | ||
595 | /* Setup a pointer to the root port link */ | ||
596 | if (!link->parent) | ||
597 | link->root = link; | ||
598 | else | ||
599 | link->root = link->parent->root; | ||
600 | |||
601 | list_add(&link->sibling, &link_list); | ||
602 | |||
603 | pdev->link_state = link; | ||
604 | |||
605 | /* Check ASPM capability */ | ||
606 | pcie_aspm_cap_init(link, blacklist); | ||
607 | |||
608 | /* Check Clock PM capability */ | ||
609 | pcie_clkpm_cap_init(link, blacklist); | ||
610 | |||
611 | return link; | ||
612 | } | ||
613 | |||
624 | /* | 614 | /* |
625 | * pcie_aspm_init_link_state: Initiate PCI express link state. | 615 | * pcie_aspm_init_link_state: Initiate PCI express link state. |
626 | * It is called after the pcie and its children devices are scaned. | 616 | * It is called after the pcie and its children devices are scaned. |
@@ -628,75 +618,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
628 | */ | 618 | */ |
629 | void pcie_aspm_init_link_state(struct pci_dev *pdev) | 619 | void pcie_aspm_init_link_state(struct pci_dev *pdev) |
630 | { | 620 | { |
631 | unsigned int state; | 621 | u32 state; |
632 | struct pcie_link_state *link_state; | 622 | struct pcie_link_state *link; |
633 | int error = 0; | ||
634 | int blacklist; | ||
635 | 623 | ||
636 | if (aspm_disabled || !pdev->is_pcie || pdev->link_state) | 624 | if (aspm_disabled || !pdev->is_pcie || pdev->link_state) |
637 | return; | 625 | return; |
638 | if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | 626 | if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && |
639 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) | 627 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) |
628 | return; | ||
629 | |||
630 | /* VIA has a strange chipset, root port is under a bridge */ | ||
631 | if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && | ||
632 | pdev->bus->self) | ||
640 | return; | 633 | return; |
634 | |||
641 | down_read(&pci_bus_sem); | 635 | down_read(&pci_bus_sem); |
642 | if (list_empty(&pdev->subordinate->devices)) | 636 | if (list_empty(&pdev->subordinate->devices)) |
643 | goto out; | 637 | goto out; |
644 | 638 | ||
645 | blacklist = !!pcie_aspm_sanity_check(pdev); | ||
646 | |||
647 | mutex_lock(&aspm_lock); | 639 | mutex_lock(&aspm_lock); |
648 | 640 | link = pcie_aspm_setup_link_state(pdev); | |
649 | link_state = kzalloc(sizeof(*link_state), GFP_KERNEL); | 641 | if (!link) |
650 | if (!link_state) | 642 | goto unlock; |
651 | goto unlock_out; | 643 | /* |
652 | 644 | * Setup initial ASPM state | |
653 | link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev); | 645 | * |
654 | INIT_LIST_HEAD(&link_state->children); | 646 | * If link has switch, delay the link config. The leaf link |
655 | INIT_LIST_HEAD(&link_state->link); | 647 | * initialization will config the whole hierarchy. But we must |
656 | if (pdev->bus->self) {/* this is a switch */ | 648 | * make sure BIOS doesn't set unsupported link state. |
657 | struct pcie_link_state *parent_link_state; | 649 | */ |
658 | 650 | if (pcie_aspm_downstream_has_switch(link)) { | |
659 | parent_link_state = pdev->bus->parent->self->link_state; | 651 | state = pcie_aspm_check_state(link, link->aspm_default); |
660 | if (!parent_link_state) { | 652 | __pcie_aspm_config_link(link, state); |
661 | kfree(link_state); | ||
662 | goto unlock_out; | ||
663 | } | ||
664 | list_add(&link_state->link, &parent_link_state->children); | ||
665 | link_state->parent = parent_link_state; | ||
666 | } | ||
667 | |||
668 | pdev->link_state = link_state; | ||
669 | |||
670 | if (!blacklist) { | ||
671 | pcie_aspm_configure_common_clock(pdev); | ||
672 | pcie_aspm_cap_init(pdev); | ||
673 | } else { | 653 | } else { |
674 | link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; | 654 | state = policy_to_aspm_state(link); |
675 | link_state->bios_aspm_state = 0; | 655 | __pcie_aspm_configure_link_state(link, state); |
676 | /* Set support state to 0, so we will disable ASPM later */ | ||
677 | link_state->support_state = 0; | ||
678 | } | 656 | } |
679 | 657 | ||
680 | link_state->pdev = pdev; | 658 | /* Setup initial Clock PM state */ |
681 | list_add(&link_state->sibiling, &link_list); | 659 | state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0; |
682 | 660 | pcie_set_clkpm(link, state); | |
683 | if (link_state->downstream_has_switch) { | 661 | unlock: |
684 | /* | ||
685 | * If link has switch, delay the link config. The leaf link | ||
686 | * initialization will config the whole hierarchy. but we must | ||
687 | * make sure BIOS doesn't set unsupported link state | ||
688 | **/ | ||
689 | state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state); | ||
690 | __pcie_aspm_config_link(pdev, state); | ||
691 | } else | ||
692 | __pcie_aspm_configure_link_state(pdev, | ||
693 | policy_to_aspm_state(pdev)); | ||
694 | |||
695 | pcie_check_clock_pm(pdev, blacklist); | ||
696 | |||
697 | unlock_out: | ||
698 | if (error) | ||
699 | free_link_state(pdev); | ||
700 | mutex_unlock(&aspm_lock); | 662 | mutex_unlock(&aspm_lock); |
701 | out: | 663 | out: |
702 | up_read(&pci_bus_sem); | 664 | up_read(&pci_bus_sem); |
@@ -725,11 +687,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) | |||
725 | 687 | ||
726 | /* All functions are removed, so just disable ASPM for the link */ | 688 | /* All functions are removed, so just disable ASPM for the link */ |
727 | __pcie_aspm_config_one_dev(parent, 0); | 689 | __pcie_aspm_config_one_dev(parent, 0); |
728 | list_del(&link_state->sibiling); | 690 | list_del(&link_state->sibling); |
729 | list_del(&link_state->link); | 691 | list_del(&link_state->link); |
730 | /* Clock PM is for endpoint device */ | 692 | /* Clock PM is for endpoint device */ |
731 | 693 | ||
732 | free_link_state(parent); | 694 | free_link_state(link_state); |
733 | out: | 695 | out: |
734 | mutex_unlock(&aspm_lock); | 696 | mutex_unlock(&aspm_lock); |
735 | up_read(&pci_bus_sem); | 697 | up_read(&pci_bus_sem); |
@@ -749,7 +711,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) | |||
749 | * devices changed PM state, we should recheck if latency meets all | 711 | * devices changed PM state, we should recheck if latency meets all |
750 | * functions' requirement | 712 | * functions' requirement |
751 | */ | 713 | */ |
752 | pcie_aspm_configure_link_state(pdev, link_state->enabled_state); | 714 | pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled); |
753 | } | 715 | } |
754 | 716 | ||
755 | /* | 717 | /* |
@@ -772,14 +734,12 @@ void pci_disable_link_state(struct pci_dev *pdev, int state) | |||
772 | down_read(&pci_bus_sem); | 734 | down_read(&pci_bus_sem); |
773 | mutex_lock(&aspm_lock); | 735 | mutex_lock(&aspm_lock); |
774 | link_state = parent->link_state; | 736 | link_state = parent->link_state; |
775 | link_state->support_state &= | 737 | link_state->aspm_support &= ~state; |
776 | ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1)); | 738 | __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled); |
777 | if (state & PCIE_LINK_STATE_CLKPM) | 739 | if (state & PCIE_LINK_STATE_CLKPM) { |
778 | link_state->clk_pm_capable = 0; | 740 | link_state->clkpm_capable = 0; |
779 | 741 | pcie_set_clkpm(link_state, 0); | |
780 | __pcie_aspm_configure_link_state(parent, link_state->enabled_state); | 742 | } |
781 | if (!link_state->clk_pm_capable && link_state->clk_pm_enabled) | ||
782 | pcie_set_clock_pm(parent, 0); | ||
783 | mutex_unlock(&aspm_lock); | 743 | mutex_unlock(&aspm_lock); |
784 | up_read(&pci_bus_sem); | 744 | up_read(&pci_bus_sem); |
785 | } | 745 | } |
@@ -788,7 +748,6 @@ EXPORT_SYMBOL(pci_disable_link_state); | |||
788 | static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) | 748 | static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) |
789 | { | 749 | { |
790 | int i; | 750 | int i; |
791 | struct pci_dev *pdev; | ||
792 | struct pcie_link_state *link_state; | 751 | struct pcie_link_state *link_state; |
793 | 752 | ||
794 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) | 753 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) |
@@ -802,14 +761,10 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) | |||
802 | down_read(&pci_bus_sem); | 761 | down_read(&pci_bus_sem); |
803 | mutex_lock(&aspm_lock); | 762 | mutex_lock(&aspm_lock); |
804 | aspm_policy = i; | 763 | aspm_policy = i; |
805 | list_for_each_entry(link_state, &link_list, sibiling) { | 764 | list_for_each_entry(link_state, &link_list, sibling) { |
806 | pdev = link_state->pdev; | 765 | __pcie_aspm_configure_link_state(link_state, |
807 | __pcie_aspm_configure_link_state(pdev, | 766 | policy_to_aspm_state(link_state)); |
808 | policy_to_aspm_state(pdev)); | 767 | pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state)); |
809 | if (link_state->clk_pm_capable && | ||
810 | link_state->clk_pm_enabled != policy_to_clkpm_state(pdev)) | ||
811 | pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev)); | ||
812 | |||
813 | } | 768 | } |
814 | mutex_unlock(&aspm_lock); | 769 | mutex_unlock(&aspm_lock); |
815 | up_read(&pci_bus_sem); | 770 | up_read(&pci_bus_sem); |
@@ -838,7 +793,7 @@ static ssize_t link_state_show(struct device *dev, | |||
838 | struct pci_dev *pci_device = to_pci_dev(dev); | 793 | struct pci_dev *pci_device = to_pci_dev(dev); |
839 | struct pcie_link_state *link_state = pci_device->link_state; | 794 | struct pcie_link_state *link_state = pci_device->link_state; |
840 | 795 | ||
841 | return sprintf(buf, "%d\n", link_state->enabled_state); | 796 | return sprintf(buf, "%d\n", link_state->aspm_enabled); |
842 | } | 797 | } |
843 | 798 | ||
844 | static ssize_t link_state_store(struct device *dev, | 799 | static ssize_t link_state_store(struct device *dev, |
@@ -846,7 +801,7 @@ static ssize_t link_state_store(struct device *dev, | |||
846 | const char *buf, | 801 | const char *buf, |
847 | size_t n) | 802 | size_t n) |
848 | { | 803 | { |
849 | struct pci_dev *pci_device = to_pci_dev(dev); | 804 | struct pci_dev *pdev = to_pci_dev(dev); |
850 | int state; | 805 | int state; |
851 | 806 | ||
852 | if (n < 1) | 807 | if (n < 1) |
@@ -854,7 +809,7 @@ static ssize_t link_state_store(struct device *dev, | |||
854 | state = buf[0]-'0'; | 809 | state = buf[0]-'0'; |
855 | if (state >= 0 && state <= 3) { | 810 | if (state >= 0 && state <= 3) { |
856 | /* setup link aspm state */ | 811 | /* setup link aspm state */ |
857 | pcie_aspm_configure_link_state(pci_device, state); | 812 | pcie_aspm_configure_link_state(pdev->link_state, state); |
858 | return n; | 813 | return n; |
859 | } | 814 | } |
860 | 815 | ||
@@ -868,7 +823,7 @@ static ssize_t clk_ctl_show(struct device *dev, | |||
868 | struct pci_dev *pci_device = to_pci_dev(dev); | 823 | struct pci_dev *pci_device = to_pci_dev(dev); |
869 | struct pcie_link_state *link_state = pci_device->link_state; | 824 | struct pcie_link_state *link_state = pci_device->link_state; |
870 | 825 | ||
871 | return sprintf(buf, "%d\n", link_state->clk_pm_enabled); | 826 | return sprintf(buf, "%d\n", link_state->clkpm_enabled); |
872 | } | 827 | } |
873 | 828 | ||
874 | static ssize_t clk_ctl_store(struct device *dev, | 829 | static ssize_t clk_ctl_store(struct device *dev, |
@@ -876,7 +831,7 @@ static ssize_t clk_ctl_store(struct device *dev, | |||
876 | const char *buf, | 831 | const char *buf, |
877 | size_t n) | 832 | size_t n) |
878 | { | 833 | { |
879 | struct pci_dev *pci_device = to_pci_dev(dev); | 834 | struct pci_dev *pdev = to_pci_dev(dev); |
880 | int state; | 835 | int state; |
881 | 836 | ||
882 | if (n < 1) | 837 | if (n < 1) |
@@ -885,7 +840,7 @@ static ssize_t clk_ctl_store(struct device *dev, | |||
885 | 840 | ||
886 | down_read(&pci_bus_sem); | 841 | down_read(&pci_bus_sem); |
887 | mutex_lock(&aspm_lock); | 842 | mutex_lock(&aspm_lock); |
888 | pcie_set_clock_pm(pci_device, !!state); | 843 | pcie_set_clkpm_nocheck(pdev->link_state, !!state); |
889 | mutex_unlock(&aspm_lock); | 844 | mutex_unlock(&aspm_lock); |
890 | up_read(&pci_bus_sem); | 845 | up_read(&pci_bus_sem); |
891 | 846 | ||
@@ -904,10 +859,10 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) | |||
904 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) | 859 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) |
905 | return; | 860 | return; |
906 | 861 | ||
907 | if (link_state->support_state) | 862 | if (link_state->aspm_support) |
908 | sysfs_add_file_to_group(&pdev->dev.kobj, | 863 | sysfs_add_file_to_group(&pdev->dev.kobj, |
909 | &dev_attr_link_state.attr, power_group); | 864 | &dev_attr_link_state.attr, power_group); |
910 | if (link_state->clk_pm_capable) | 865 | if (link_state->clkpm_capable) |
911 | sysfs_add_file_to_group(&pdev->dev.kobj, | 866 | sysfs_add_file_to_group(&pdev->dev.kobj, |
912 | &dev_attr_clk_ctl.attr, power_group); | 867 | &dev_attr_clk_ctl.attr, power_group); |
913 | } | 868 | } |
@@ -920,10 +875,10 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) | |||
920 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) | 875 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) |
921 | return; | 876 | return; |
922 | 877 | ||
923 | if (link_state->support_state) | 878 | if (link_state->aspm_support) |
924 | sysfs_remove_file_from_group(&pdev->dev.kobj, | 879 | sysfs_remove_file_from_group(&pdev->dev.kobj, |
925 | &dev_attr_link_state.attr, power_group); | 880 | &dev_attr_link_state.attr, power_group); |
926 | if (link_state->clk_pm_capable) | 881 | if (link_state->clkpm_capable) |
927 | sysfs_remove_file_from_group(&pdev->dev.kobj, | 882 | sysfs_remove_file_from_group(&pdev->dev.kobj, |
928 | &dev_attr_clk_ctl.attr, power_group); | 883 | &dev_attr_clk_ctl.attr, power_group); |
929 | } | 884 | } |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index e39982503863..13ffdc35ea0e 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -275,7 +275,7 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, | |||
275 | memset(device, 0, sizeof(struct device)); | 275 | memset(device, 0, sizeof(struct device)); |
276 | device->bus = &pcie_port_bus_type; | 276 | device->bus = &pcie_port_bus_type; |
277 | device->driver = NULL; | 277 | device->driver = NULL; |
278 | device->driver_data = NULL; | 278 | dev_set_drvdata(device, NULL); |
279 | device->release = release_pcie_device; /* callback to free pcie dev */ | 279 | device->release = release_pcie_device; /* callback to free pcie dev */ |
280 | dev_set_name(device, "%s:pcie%02x", | 280 | dev_set_name(device, "%s:pcie%02x", |
281 | pci_name(parent), get_descriptor_id(port_type, service_type)); | 281 | pci_name(parent), get_descriptor_id(port_type, service_type)); |