diff options
-rw-r--r-- | drivers/pci/host/pcie-rcar.c | 156 | ||||
-rw-r--r-- | drivers/pci/hotplug/cpqphp_sysfs.c | 3 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp.h | 3 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_core.c | 7 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_hpc.c | 101 | ||||
-rw-r--r-- | drivers/pci/msi.c | 59 | ||||
-rw-r--r-- | drivers/pci/pci-label.c | 18 | ||||
-rw-r--r-- | drivers/pci/pcie/portdrv_pci.c | 4 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 2 | ||||
-rw-r--r-- | include/linux/msi.h | 3 |
10 files changed, 163 insertions, 193 deletions
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index f7d3de32c9a0..f033972da9b9 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c | |||
@@ -105,7 +105,7 @@ | |||
105 | #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) | 105 | #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) |
106 | #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) | 106 | #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) |
107 | 107 | ||
108 | #define PCI_MAX_RESOURCES 4 | 108 | #define RCAR_PCI_MAX_RESOURCES 4 |
109 | #define MAX_NR_INBOUND_MAPS 6 | 109 | #define MAX_NR_INBOUND_MAPS 6 |
110 | 110 | ||
111 | struct rcar_msi { | 111 | struct rcar_msi { |
@@ -127,7 +127,7 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip) | |||
127 | struct rcar_pcie { | 127 | struct rcar_pcie { |
128 | struct device *dev; | 128 | struct device *dev; |
129 | void __iomem *base; | 129 | void __iomem *base; |
130 | struct resource res[PCI_MAX_RESOURCES]; | 130 | struct resource res[RCAR_PCI_MAX_RESOURCES]; |
131 | struct resource busn; | 131 | struct resource busn; |
132 | int root_bus_nr; | 132 | int root_bus_nr; |
133 | struct clk *clk; | 133 | struct clk *clk; |
@@ -140,36 +140,37 @@ static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys) | |||
140 | return sys->private_data; | 140 | return sys->private_data; |
141 | } | 141 | } |
142 | 142 | ||
143 | static void pci_write_reg(struct rcar_pcie *pcie, unsigned long val, | 143 | static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val, |
144 | unsigned long reg) | 144 | unsigned long reg) |
145 | { | 145 | { |
146 | writel(val, pcie->base + reg); | 146 | writel(val, pcie->base + reg); |
147 | } | 147 | } |
148 | 148 | ||
149 | static unsigned long pci_read_reg(struct rcar_pcie *pcie, unsigned long reg) | 149 | static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie, |
150 | unsigned long reg) | ||
150 | { | 151 | { |
151 | return readl(pcie->base + reg); | 152 | return readl(pcie->base + reg); |
152 | } | 153 | } |
153 | 154 | ||
154 | enum { | 155 | enum { |
155 | PCI_ACCESS_READ, | 156 | RCAR_PCI_ACCESS_READ, |
156 | PCI_ACCESS_WRITE, | 157 | RCAR_PCI_ACCESS_WRITE, |
157 | }; | 158 | }; |
158 | 159 | ||
159 | static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) | 160 | static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) |
160 | { | 161 | { |
161 | int shift = 8 * (where & 3); | 162 | int shift = 8 * (where & 3); |
162 | u32 val = pci_read_reg(pcie, where & ~3); | 163 | u32 val = rcar_pci_read_reg(pcie, where & ~3); |
163 | 164 | ||
164 | val &= ~(mask << shift); | 165 | val &= ~(mask << shift); |
165 | val |= data << shift; | 166 | val |= data << shift; |
166 | pci_write_reg(pcie, val, where & ~3); | 167 | rcar_pci_write_reg(pcie, val, where & ~3); |
167 | } | 168 | } |
168 | 169 | ||
169 | static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) | 170 | static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) |
170 | { | 171 | { |
171 | int shift = 8 * (where & 3); | 172 | int shift = 8 * (where & 3); |
172 | u32 val = pci_read_reg(pcie, where & ~3); | 173 | u32 val = rcar_pci_read_reg(pcie, where & ~3); |
173 | 174 | ||
174 | return val >> shift; | 175 | return val >> shift; |
175 | } | 176 | } |
@@ -205,14 +206,14 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie, | |||
205 | if (dev != 0) | 206 | if (dev != 0) |
206 | return PCIBIOS_DEVICE_NOT_FOUND; | 207 | return PCIBIOS_DEVICE_NOT_FOUND; |
207 | 208 | ||
208 | if (access_type == PCI_ACCESS_READ) { | 209 | if (access_type == RCAR_PCI_ACCESS_READ) { |
209 | *data = pci_read_reg(pcie, PCICONF(index)); | 210 | *data = rcar_pci_read_reg(pcie, PCICONF(index)); |
210 | } else { | 211 | } else { |
211 | /* Keep an eye out for changes to the root bus number */ | 212 | /* Keep an eye out for changes to the root bus number */ |
212 | if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) | 213 | if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) |
213 | pcie->root_bus_nr = *data & 0xff; | 214 | pcie->root_bus_nr = *data & 0xff; |
214 | 215 | ||
215 | pci_write_reg(pcie, *data, PCICONF(index)); | 216 | rcar_pci_write_reg(pcie, *data, PCICONF(index)); |
216 | } | 217 | } |
217 | 218 | ||
218 | return PCIBIOS_SUCCESSFUL; | 219 | return PCIBIOS_SUCCESSFUL; |
@@ -222,20 +223,20 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie, | |||
222 | return PCIBIOS_DEVICE_NOT_FOUND; | 223 | return PCIBIOS_DEVICE_NOT_FOUND; |
223 | 224 | ||
224 | /* Clear errors */ | 225 | /* Clear errors */ |
225 | pci_write_reg(pcie, pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); | 226 | rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); |
226 | 227 | ||
227 | /* Set the PIO address */ | 228 | /* Set the PIO address */ |
228 | pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) | | 229 | rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | |
229 | PCIE_CONF_FUNC(func) | reg, PCIECAR); | 230 | PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); |
230 | 231 | ||
231 | /* Enable the configuration access */ | 232 | /* Enable the configuration access */ |
232 | if (bus->parent->number == pcie->root_bus_nr) | 233 | if (bus->parent->number == pcie->root_bus_nr) |
233 | pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); | 234 | rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); |
234 | else | 235 | else |
235 | pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); | 236 | rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); |
236 | 237 | ||
237 | /* Check for errors */ | 238 | /* Check for errors */ |
238 | if (pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) | 239 | if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) |
239 | return PCIBIOS_DEVICE_NOT_FOUND; | 240 | return PCIBIOS_DEVICE_NOT_FOUND; |
240 | 241 | ||
241 | /* Check for master and target aborts */ | 242 | /* Check for master and target aborts */ |
@@ -243,13 +244,13 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie, | |||
243 | (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) | 244 | (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) |
244 | return PCIBIOS_DEVICE_NOT_FOUND; | 245 | return PCIBIOS_DEVICE_NOT_FOUND; |
245 | 246 | ||
246 | if (access_type == PCI_ACCESS_READ) | 247 | if (access_type == RCAR_PCI_ACCESS_READ) |
247 | *data = pci_read_reg(pcie, PCIECDR); | 248 | *data = rcar_pci_read_reg(pcie, PCIECDR); |
248 | else | 249 | else |
249 | pci_write_reg(pcie, *data, PCIECDR); | 250 | rcar_pci_write_reg(pcie, *data, PCIECDR); |
250 | 251 | ||
251 | /* Disable the configuration access */ | 252 | /* Disable the configuration access */ |
252 | pci_write_reg(pcie, 0, PCIECCTLR); | 253 | rcar_pci_write_reg(pcie, 0, PCIECCTLR); |
253 | 254 | ||
254 | return PCIBIOS_SUCCESSFUL; | 255 | return PCIBIOS_SUCCESSFUL; |
255 | } | 256 | } |
@@ -260,12 +261,7 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, | |||
260 | struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); | 261 | struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); |
261 | int ret; | 262 | int ret; |
262 | 263 | ||
263 | if ((size == 2) && (where & 1)) | 264 | ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, |
264 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
265 | else if ((size == 4) && (where & 3)) | ||
266 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
267 | |||
268 | ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ, | ||
269 | bus, devfn, where, val); | 265 | bus, devfn, where, val); |
270 | if (ret != PCIBIOS_SUCCESSFUL) { | 266 | if (ret != PCIBIOS_SUCCESSFUL) { |
271 | *val = 0xffffffff; | 267 | *val = 0xffffffff; |
@@ -291,12 +287,7 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, | |||
291 | int shift, ret; | 287 | int shift, ret; |
292 | u32 data; | 288 | u32 data; |
293 | 289 | ||
294 | if ((size == 2) && (where & 1)) | 290 | ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, |
295 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
296 | else if ((size == 4) && (where & 3)) | ||
297 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
298 | |||
299 | ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ, | ||
300 | bus, devfn, where, &data); | 291 | bus, devfn, where, &data); |
301 | if (ret != PCIBIOS_SUCCESSFUL) | 292 | if (ret != PCIBIOS_SUCCESSFUL) |
302 | return ret; | 293 | return ret; |
@@ -315,7 +306,7 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, | |||
315 | } else | 306 | } else |
316 | data = val; | 307 | data = val; |
317 | 308 | ||
318 | ret = rcar_pcie_config_access(pcie, PCI_ACCESS_WRITE, | 309 | ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE, |
319 | bus, devfn, where, &data); | 310 | bus, devfn, where, &data); |
320 | 311 | ||
321 | return ret; | 312 | return ret; |
@@ -326,14 +317,15 @@ static struct pci_ops rcar_pcie_ops = { | |||
326 | .write = rcar_pcie_write_conf, | 317 | .write = rcar_pcie_write_conf, |
327 | }; | 318 | }; |
328 | 319 | ||
329 | static void rcar_pcie_setup_window(int win, struct resource *res, | 320 | static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie) |
330 | struct rcar_pcie *pcie) | ||
331 | { | 321 | { |
322 | struct resource *res = &pcie->res[win]; | ||
323 | |||
332 | /* Setup PCIe address space mappings for each resource */ | 324 | /* Setup PCIe address space mappings for each resource */ |
333 | resource_size_t size; | 325 | resource_size_t size; |
334 | u32 mask; | 326 | u32 mask; |
335 | 327 | ||
336 | pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); | 328 | rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); |
337 | 329 | ||
338 | /* | 330 | /* |
339 | * The PAMR mask is calculated in units of 128Bytes, which | 331 | * The PAMR mask is calculated in units of 128Bytes, which |
@@ -341,17 +333,17 @@ static void rcar_pcie_setup_window(int win, struct resource *res, | |||
341 | */ | 333 | */ |
342 | size = resource_size(res); | 334 | size = resource_size(res); |
343 | mask = (roundup_pow_of_two(size) / SZ_128) - 1; | 335 | mask = (roundup_pow_of_two(size) / SZ_128) - 1; |
344 | pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); | 336 | rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); |
345 | 337 | ||
346 | pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win)); | 338 | rcar_pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win)); |
347 | pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win)); | 339 | rcar_pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win)); |
348 | 340 | ||
349 | /* First resource is for IO */ | 341 | /* First resource is for IO */ |
350 | mask = PAR_ENABLE; | 342 | mask = PAR_ENABLE; |
351 | if (res->flags & IORESOURCE_IO) | 343 | if (res->flags & IORESOURCE_IO) |
352 | mask |= IO_SPACE; | 344 | mask |= IO_SPACE; |
353 | 345 | ||
354 | pci_write_reg(pcie, mask, PCIEPTCTLR(win)); | 346 | rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win)); |
355 | } | 347 | } |
356 | 348 | ||
357 | static int rcar_pcie_setup(int nr, struct pci_sys_data *sys) | 349 | static int rcar_pcie_setup(int nr, struct pci_sys_data *sys) |
@@ -363,13 +355,13 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys) | |||
363 | pcie->root_bus_nr = -1; | 355 | pcie->root_bus_nr = -1; |
364 | 356 | ||
365 | /* Setup PCI resources */ | 357 | /* Setup PCI resources */ |
366 | for (i = 0; i < PCI_MAX_RESOURCES; i++) { | 358 | for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) { |
367 | 359 | ||
368 | res = &pcie->res[i]; | 360 | res = &pcie->res[i]; |
369 | if (!res->flags) | 361 | if (!res->flags) |
370 | continue; | 362 | continue; |
371 | 363 | ||
372 | rcar_pcie_setup_window(i, res, pcie); | 364 | rcar_pcie_setup_window(i, pcie); |
373 | 365 | ||
374 | if (res->flags & IORESOURCE_IO) | 366 | if (res->flags & IORESOURCE_IO) |
375 | pci_ioremap_io(nr * SZ_64K, res->start); | 367 | pci_ioremap_io(nr * SZ_64K, res->start); |
@@ -415,7 +407,7 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie) | |||
415 | unsigned int timeout = 100; | 407 | unsigned int timeout = 100; |
416 | 408 | ||
417 | while (timeout--) { | 409 | while (timeout--) { |
418 | if (pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) | 410 | if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) |
419 | return 0; | 411 | return 0; |
420 | 412 | ||
421 | udelay(100); | 413 | udelay(100); |
@@ -438,15 +430,15 @@ static void phy_write_reg(struct rcar_pcie *pcie, | |||
438 | ((addr & 0xff) << ADR_POS); | 430 | ((addr & 0xff) << ADR_POS); |
439 | 431 | ||
440 | /* Set write data */ | 432 | /* Set write data */ |
441 | pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); | 433 | rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); |
442 | pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); | 434 | rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); |
443 | 435 | ||
444 | /* Ignore errors as they will be dealt with if the data link is down */ | 436 | /* Ignore errors as they will be dealt with if the data link is down */ |
445 | phy_wait_for_ack(pcie); | 437 | phy_wait_for_ack(pcie); |
446 | 438 | ||
447 | /* Clear command */ | 439 | /* Clear command */ |
448 | pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); | 440 | rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); |
449 | pci_write_reg(pcie, 0, H1_PCIEPHYADRR); | 441 | rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR); |
450 | 442 | ||
451 | /* Ignore errors as they will be dealt with if the data link is down */ | 443 | /* Ignore errors as they will be dealt with if the data link is down */ |
452 | phy_wait_for_ack(pcie); | 444 | phy_wait_for_ack(pcie); |
@@ -457,7 +449,7 @@ static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) | |||
457 | unsigned int timeout = 10; | 449 | unsigned int timeout = 10; |
458 | 450 | ||
459 | while (timeout--) { | 451 | while (timeout--) { |
460 | if ((pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) | 452 | if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) |
461 | return 0; | 453 | return 0; |
462 | 454 | ||
463 | msleep(5); | 455 | msleep(5); |
@@ -471,17 +463,17 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie) | |||
471 | int err; | 463 | int err; |
472 | 464 | ||
473 | /* Begin initialization */ | 465 | /* Begin initialization */ |
474 | pci_write_reg(pcie, 0, PCIETCTLR); | 466 | rcar_pci_write_reg(pcie, 0, PCIETCTLR); |
475 | 467 | ||
476 | /* Set mode */ | 468 | /* Set mode */ |
477 | pci_write_reg(pcie, 1, PCIEMSR); | 469 | rcar_pci_write_reg(pcie, 1, PCIEMSR); |
478 | 470 | ||
479 | /* | 471 | /* |
480 | * Initial header for port config space is type 1, set the device | 472 | * Initial header for port config space is type 1, set the device |
481 | * class to match. Hardware takes care of propagating the IDSETR | 473 | * class to match. Hardware takes care of propagating the IDSETR |
482 | * settings, so there is no need to bother with a quirk. | 474 | * settings, so there is no need to bother with a quirk. |
483 | */ | 475 | */ |
484 | pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); | 476 | rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); |
485 | 477 | ||
486 | /* | 478 | /* |
487 | * Setup Secondary Bus Number & Subordinate Bus Number, even though | 479 | * Setup Secondary Bus Number & Subordinate Bus Number, even though |
@@ -491,33 +483,31 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie) | |||
491 | rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); | 483 | rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); |
492 | 484 | ||
493 | /* Initialize default capabilities. */ | 485 | /* Initialize default capabilities. */ |
494 | rcar_rmw32(pcie, REXPCAP(0), 0, PCI_CAP_ID_EXP); | 486 | rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); |
495 | rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), | 487 | rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), |
496 | PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); | 488 | PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); |
497 | rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, | 489 | rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, |
498 | PCI_HEADER_TYPE_BRIDGE); | 490 | PCI_HEADER_TYPE_BRIDGE); |
499 | 491 | ||
500 | /* Enable data link layer active state reporting */ | 492 | /* Enable data link layer active state reporting */ |
501 | rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), 0, PCI_EXP_LNKCAP_DLLLARC); | 493 | rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC, |
494 | PCI_EXP_LNKCAP_DLLLARC); | ||
502 | 495 | ||
503 | /* Write out the physical slot number = 0 */ | 496 | /* Write out the physical slot number = 0 */ |
504 | rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); | 497 | rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); |
505 | 498 | ||
506 | /* Set the completion timer timeout to the maximum 50ms. */ | 499 | /* Set the completion timer timeout to the maximum 50ms. */ |
507 | rcar_rmw32(pcie, TLCTLR+1, 0x3f, 50); | 500 | rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); |
508 | 501 | ||
509 | /* Terminate list of capabilities (Next Capability Offset=0) */ | 502 | /* Terminate list of capabilities (Next Capability Offset=0) */ |
510 | rcar_rmw32(pcie, RVCCAP(0), 0xfff0, 0); | 503 | rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); |
511 | |||
512 | /* Enable MAC data scrambling. */ | ||
513 | rcar_rmw32(pcie, MACCTLR, SCRAMBLE_DISABLE, 0); | ||
514 | 504 | ||
515 | /* Enable MSI */ | 505 | /* Enable MSI */ |
516 | if (IS_ENABLED(CONFIG_PCI_MSI)) | 506 | if (IS_ENABLED(CONFIG_PCI_MSI)) |
517 | pci_write_reg(pcie, 0x101f0000, PCIEMSITXR); | 507 | rcar_pci_write_reg(pcie, 0x101f0000, PCIEMSITXR); |
518 | 508 | ||
519 | /* Finish initialization - establish a PCI Express link */ | 509 | /* Finish initialization - establish a PCI Express link */ |
520 | pci_write_reg(pcie, CFINIT, PCIETCTLR); | 510 | rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); |
521 | 511 | ||
522 | /* This will timeout if we don't have a link. */ | 512 | /* This will timeout if we don't have a link. */ |
523 | err = rcar_pcie_wait_for_dl(pcie); | 513 | err = rcar_pcie_wait_for_dl(pcie); |
@@ -527,11 +517,6 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie) | |||
527 | /* Enable INTx interrupts */ | 517 | /* Enable INTx interrupts */ |
528 | rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); | 518 | rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); |
529 | 519 | ||
530 | /* Enable slave Bus Mastering */ | ||
531 | rcar_rmw32(pcie, RCONF(PCI_STATUS), PCI_STATUS_DEVSEL_MASK, | ||
532 | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | | ||
533 | PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST); | ||
534 | |||
535 | wmb(); | 520 | wmb(); |
536 | 521 | ||
537 | return 0; | 522 | return 0; |
@@ -560,7 +545,7 @@ static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie) | |||
560 | phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); | 545 | phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); |
561 | 546 | ||
562 | while (timeout--) { | 547 | while (timeout--) { |
563 | if (pci_read_reg(pcie, H1_PCIEPHYSR)) | 548 | if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR)) |
564 | return rcar_pcie_hw_init(pcie); | 549 | return rcar_pcie_hw_init(pcie); |
565 | 550 | ||
566 | msleep(5); | 551 | msleep(5); |
@@ -599,7 +584,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) | |||
599 | struct rcar_msi *msi = &pcie->msi; | 584 | struct rcar_msi *msi = &pcie->msi; |
600 | unsigned long reg; | 585 | unsigned long reg; |
601 | 586 | ||
602 | reg = pci_read_reg(pcie, PCIEMSIFR); | 587 | reg = rcar_pci_read_reg(pcie, PCIEMSIFR); |
603 | 588 | ||
604 | /* MSI & INTx share an interrupt - we only handle MSI here */ | 589 | /* MSI & INTx share an interrupt - we only handle MSI here */ |
605 | if (!reg) | 590 | if (!reg) |
@@ -610,7 +595,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) | |||
610 | unsigned int irq; | 595 | unsigned int irq; |
611 | 596 | ||
612 | /* clear the interrupt */ | 597 | /* clear the interrupt */ |
613 | pci_write_reg(pcie, 1 << index, PCIEMSIFR); | 598 | rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); |
614 | 599 | ||
615 | irq = irq_find_mapping(msi->domain, index); | 600 | irq = irq_find_mapping(msi->domain, index); |
616 | if (irq) { | 601 | if (irq) { |
@@ -624,7 +609,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) | |||
624 | } | 609 | } |
625 | 610 | ||
626 | /* see if there's any more pending in this vector */ | 611 | /* see if there's any more pending in this vector */ |
627 | reg = pci_read_reg(pcie, PCIEMSIFR); | 612 | reg = rcar_pci_read_reg(pcie, PCIEMSIFR); |
628 | } | 613 | } |
629 | 614 | ||
630 | return IRQ_HANDLED; | 615 | return IRQ_HANDLED; |
@@ -651,8 +636,8 @@ static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, | |||
651 | 636 | ||
652 | irq_set_msi_desc(irq, desc); | 637 | irq_set_msi_desc(irq, desc); |
653 | 638 | ||
654 | msg.address_lo = pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; | 639 | msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; |
655 | msg.address_hi = pci_read_reg(pcie, PCIEMSIAUR); | 640 | msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); |
656 | msg.data = hwirq; | 641 | msg.data = hwirq; |
657 | 642 | ||
658 | write_msi_msg(irq, &msg); | 643 | write_msi_msg(irq, &msg); |
@@ -729,11 +714,11 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) | |||
729 | msi->pages = __get_free_pages(GFP_KERNEL, 0); | 714 | msi->pages = __get_free_pages(GFP_KERNEL, 0); |
730 | base = virt_to_phys((void *)msi->pages); | 715 | base = virt_to_phys((void *)msi->pages); |
731 | 716 | ||
732 | pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); | 717 | rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); |
733 | pci_write_reg(pcie, 0, PCIEMSIAUR); | 718 | rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); |
734 | 719 | ||
735 | /* enable all MSI interrupts */ | 720 | /* enable all MSI interrupts */ |
736 | pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); | 721 | rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); |
737 | 722 | ||
738 | return 0; | 723 | return 0; |
739 | 724 | ||
@@ -826,6 +811,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, | |||
826 | if (cpu_addr > 0) { | 811 | if (cpu_addr > 0) { |
827 | unsigned long nr_zeros = __ffs64(cpu_addr); | 812 | unsigned long nr_zeros = __ffs64(cpu_addr); |
828 | u64 alignment = 1ULL << nr_zeros; | 813 | u64 alignment = 1ULL << nr_zeros; |
814 | |||
829 | size = min(range->size, alignment); | 815 | size = min(range->size, alignment); |
830 | } else { | 816 | } else { |
831 | size = range->size; | 817 | size = range->size; |
@@ -841,13 +827,13 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, | |||
841 | * Set up 64-bit inbound regions as the range parser doesn't | 827 | * Set up 64-bit inbound regions as the range parser doesn't |
842 | * distinguish between 32 and 64-bit types. | 828 | * distinguish between 32 and 64-bit types. |
843 | */ | 829 | */ |
844 | pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx)); | 830 | rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx)); |
845 | pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); | 831 | rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); |
846 | pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx)); | 832 | rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx)); |
847 | 833 | ||
848 | pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1)); | 834 | rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1)); |
849 | pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1)); | 835 | rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1)); |
850 | pci_write_reg(pcie, 0, PCIELAMR(idx+1)); | 836 | rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1)); |
851 | 837 | ||
852 | pci_addr += size; | 838 | pci_addr += size; |
853 | cpu_addr += size; | 839 | cpu_addr += size; |
@@ -952,7 +938,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
952 | of_pci_range_to_resource(&range, pdev->dev.of_node, | 938 | of_pci_range_to_resource(&range, pdev->dev.of_node, |
953 | &pcie->res[win++]); | 939 | &pcie->res[win++]); |
954 | 940 | ||
955 | if (win > PCI_MAX_RESOURCES) | 941 | if (win > RCAR_PCI_MAX_RESOURCES) |
956 | break; | 942 | break; |
957 | } | 943 | } |
958 | 944 | ||
@@ -982,7 +968,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
982 | return 0; | 968 | return 0; |
983 | } | 969 | } |
984 | 970 | ||
985 | data = pci_read_reg(pcie, MACSR); | 971 | data = rcar_pci_read_reg(pcie, MACSR); |
986 | dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); | 972 | dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); |
987 | 973 | ||
988 | rcar_pcie_enable(pcie); | 974 | rcar_pcie_enable(pcie); |
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c index 4a392c44e3d3..d81648f71425 100644 --- a/drivers/pci/hotplug/cpqphp_sysfs.c +++ b/drivers/pci/hotplug/cpqphp_sysfs.c | |||
@@ -216,8 +216,7 @@ void cpqhp_create_debugfs_files(struct controller *ctrl) | |||
216 | 216 | ||
217 | void cpqhp_remove_debugfs_files(struct controller *ctrl) | 217 | void cpqhp_remove_debugfs_files(struct controller *ctrl) |
218 | { | 218 | { |
219 | if (ctrl->dentry) | 219 | debugfs_remove(ctrl->dentry); |
220 | debugfs_remove(ctrl->dentry); | ||
221 | ctrl->dentry = NULL; | 220 | ctrl->dentry = NULL; |
222 | } | 221 | } |
223 | 222 | ||
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 8e9012dca450..9e5a9fbb93d7 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
@@ -92,9 +92,10 @@ struct controller { | |||
92 | struct slot *slot; | 92 | struct slot *slot; |
93 | wait_queue_head_t queue; /* sleep & wake process */ | 93 | wait_queue_head_t queue; /* sleep & wake process */ |
94 | u32 slot_cap; | 94 | u32 slot_cap; |
95 | u32 slot_ctrl; | ||
95 | struct timer_list poll_timer; | 96 | struct timer_list poll_timer; |
97 | unsigned long cmd_started; /* jiffies */ | ||
96 | unsigned int cmd_busy:1; | 98 | unsigned int cmd_busy:1; |
97 | unsigned int no_cmd_complete:1; | ||
98 | unsigned int link_active_reporting:1; | 99 | unsigned int link_active_reporting:1; |
99 | unsigned int notification_enabled:1; | 100 | unsigned int notification_enabled:1; |
100 | unsigned int power_fault_detected; | 101 | unsigned int power_fault_detected; |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index a2297db80813..07aa722bb12c 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -255,6 +255,13 @@ static int pciehp_probe(struct pcie_device *dev) | |||
255 | else if (pciehp_acpi_slot_detection_check(dev->port)) | 255 | else if (pciehp_acpi_slot_detection_check(dev->port)) |
256 | goto err_out_none; | 256 | goto err_out_none; |
257 | 257 | ||
258 | if (!dev->port->subordinate) { | ||
259 | /* Can happen if we run out of bus numbers during probe */ | ||
260 | dev_err(&dev->device, | ||
261 | "Hotplug bridge without secondary bus, ignoring\n"); | ||
262 | goto err_out_none; | ||
263 | } | ||
264 | |||
258 | ctrl = pcie_init(dev); | 265 | ctrl = pcie_init(dev); |
259 | if (!ctrl) { | 266 | if (!ctrl) { |
260 | dev_err(&dev->device, "Controller initialization failed\n"); | 267 | dev_err(&dev->device, "Controller initialization failed\n"); |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 42914e04d110..9da84b8b27d8 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -104,11 +104,10 @@ static inline void pciehp_free_irq(struct controller *ctrl) | |||
104 | free_irq(ctrl->pcie->irq, ctrl); | 104 | free_irq(ctrl->pcie->irq, ctrl); |
105 | } | 105 | } |
106 | 106 | ||
107 | static int pcie_poll_cmd(struct controller *ctrl) | 107 | static int pcie_poll_cmd(struct controller *ctrl, int timeout) |
108 | { | 108 | { |
109 | struct pci_dev *pdev = ctrl_dev(ctrl); | 109 | struct pci_dev *pdev = ctrl_dev(ctrl); |
110 | u16 slot_status; | 110 | u16 slot_status; |
111 | int timeout = 1000; | ||
112 | 111 | ||
113 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); | 112 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); |
114 | if (slot_status & PCI_EXP_SLTSTA_CC) { | 113 | if (slot_status & PCI_EXP_SLTSTA_CC) { |
@@ -129,18 +128,52 @@ static int pcie_poll_cmd(struct controller *ctrl) | |||
129 | return 0; /* timeout */ | 128 | return 0; /* timeout */ |
130 | } | 129 | } |
131 | 130 | ||
132 | static void pcie_wait_cmd(struct controller *ctrl, int poll) | 131 | static void pcie_wait_cmd(struct controller *ctrl) |
133 | { | 132 | { |
134 | unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; | 133 | unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; |
135 | unsigned long timeout = msecs_to_jiffies(msecs); | 134 | unsigned long duration = msecs_to_jiffies(msecs); |
135 | unsigned long cmd_timeout = ctrl->cmd_started + duration; | ||
136 | unsigned long now, timeout; | ||
136 | int rc; | 137 | int rc; |
137 | 138 | ||
138 | if (poll) | 139 | /* |
139 | rc = pcie_poll_cmd(ctrl); | 140 | * If the controller does not generate notifications for command |
141 | * completions, we never need to wait between writes. | ||
142 | */ | ||
143 | if (NO_CMD_CMPL(ctrl)) | ||
144 | return; | ||
145 | |||
146 | if (!ctrl->cmd_busy) | ||
147 | return; | ||
148 | |||
149 | /* | ||
150 | * Even if the command has already timed out, we want to call | ||
151 | * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC. | ||
152 | */ | ||
153 | now = jiffies; | ||
154 | if (time_before_eq(cmd_timeout, now)) | ||
155 | timeout = 1; | ||
140 | else | 156 | else |
157 | timeout = cmd_timeout - now; | ||
158 | |||
159 | if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE && | ||
160 | ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE) | ||
141 | rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); | 161 | rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); |
162 | else | ||
163 | rc = pcie_poll_cmd(ctrl, timeout); | ||
164 | |||
165 | /* | ||
166 | * Controllers with errata like Intel CF118 don't generate | ||
167 | * completion notifications unless the power/indicator/interlock | ||
168 | * control bits are changed. On such controllers, we'll emit this | ||
169 | * timeout message when we wait for completion of commands that | ||
170 | * don't change those bits, e.g., commands that merely enable | ||
171 | * interrupts. | ||
172 | */ | ||
142 | if (!rc) | 173 | if (!rc) |
143 | ctrl_dbg(ctrl, "Command not completed in 1000 msec\n"); | 174 | ctrl_info(ctrl, "Timeout on hotplug command %#010x (issued %u msec ago)\n", |
175 | ctrl->slot_ctrl, | ||
176 | jiffies_to_msecs(now - ctrl->cmd_started)); | ||
144 | } | 177 | } |
145 | 178 | ||
146 | /** | 179 | /** |
@@ -152,34 +185,12 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll) | |||
152 | static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | 185 | static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) |
153 | { | 186 | { |
154 | struct pci_dev *pdev = ctrl_dev(ctrl); | 187 | struct pci_dev *pdev = ctrl_dev(ctrl); |
155 | u16 slot_status; | ||
156 | u16 slot_ctrl; | 188 | u16 slot_ctrl; |
157 | 189 | ||
158 | mutex_lock(&ctrl->ctrl_lock); | 190 | mutex_lock(&ctrl->ctrl_lock); |
159 | 191 | ||
160 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); | 192 | /* Wait for any previous command that might still be in progress */ |
161 | if (slot_status & PCI_EXP_SLTSTA_CC) { | 193 | pcie_wait_cmd(ctrl); |
162 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, | ||
163 | PCI_EXP_SLTSTA_CC); | ||
164 | if (!ctrl->no_cmd_complete) { | ||
165 | /* | ||
166 | * After 1 sec and CMD_COMPLETED still not set, just | ||
167 | * proceed forward to issue the next command according | ||
168 | * to spec. Just print out the error message. | ||
169 | */ | ||
170 | ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n"); | ||
171 | } else if (!NO_CMD_CMPL(ctrl)) { | ||
172 | /* | ||
173 | * This controller seems to notify of command completed | ||
174 | * event even though it supports none of power | ||
175 | * controller, attention led, power led and EMI. | ||
176 | */ | ||
177 | ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to wait for command completed event\n"); | ||
178 | ctrl->no_cmd_complete = 0; | ||
179 | } else { | ||
180 | ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe the controller is broken\n"); | ||
181 | } | ||
182 | } | ||
183 | 194 | ||
184 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); | 195 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); |
185 | slot_ctrl &= ~mask; | 196 | slot_ctrl &= ~mask; |
@@ -187,22 +198,9 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) | |||
187 | ctrl->cmd_busy = 1; | 198 | ctrl->cmd_busy = 1; |
188 | smp_mb(); | 199 | smp_mb(); |
189 | pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); | 200 | pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); |
201 | ctrl->cmd_started = jiffies; | ||
202 | ctrl->slot_ctrl = slot_ctrl; | ||
190 | 203 | ||
191 | /* | ||
192 | * Wait for command completion. | ||
193 | */ | ||
194 | if (!ctrl->no_cmd_complete) { | ||
195 | int poll = 0; | ||
196 | /* | ||
197 | * if hotplug interrupt is not enabled or command | ||
198 | * completed interrupt is not enabled, we need to poll | ||
199 | * command completed event. | ||
200 | */ | ||
201 | if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) || | ||
202 | !(slot_ctrl & PCI_EXP_SLTCTL_CCIE)) | ||
203 | poll = 1; | ||
204 | pcie_wait_cmd(ctrl, poll); | ||
205 | } | ||
206 | mutex_unlock(&ctrl->ctrl_lock); | 204 | mutex_unlock(&ctrl->ctrl_lock); |
207 | } | 205 | } |
208 | 206 | ||
@@ -773,15 +771,6 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
773 | mutex_init(&ctrl->ctrl_lock); | 771 | mutex_init(&ctrl->ctrl_lock); |
774 | init_waitqueue_head(&ctrl->queue); | 772 | init_waitqueue_head(&ctrl->queue); |
775 | dbg_ctrl(ctrl); | 773 | dbg_ctrl(ctrl); |
776 | /* | ||
777 | * Controller doesn't notify of command completion if the "No | ||
778 | * Command Completed Support" bit is set in Slot Capability | ||
779 | * register or the controller supports none of power | ||
780 | * controller, attention led, power led and EMI. | ||
781 | */ | ||
782 | if (NO_CMD_CMPL(ctrl) || | ||
783 | !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) | ||
784 | ctrl->no_cmd_complete = 1; | ||
785 | 774 | ||
786 | /* Check if Data Link Layer Link Active Reporting is implemented */ | 775 | /* Check if Data Link Layer Link Active Reporting is implemented */ |
787 | pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap); | 776 | pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap); |
@@ -794,7 +783,7 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
794 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, | 783 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, |
795 | PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | | 784 | PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | |
796 | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | | 785 | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | |
797 | PCI_EXP_SLTSTA_CC); | 786 | PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC); |
798 | 787 | ||
799 | /* Disable software notification */ | 788 | /* Disable software notification */ |
800 | pcie_disable_notification(ctrl); | 789 | pcie_disable_notification(ctrl); |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 13f3d3037272..50a7e4e96da7 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -149,15 +149,14 @@ static void msi_set_enable(struct pci_dev *dev, int enable) | |||
149 | pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); | 149 | pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void msix_set_enable(struct pci_dev *dev, int enable) | 152 | static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) |
153 | { | 153 | { |
154 | u16 control; | 154 | u16 ctrl; |
155 | 155 | ||
156 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | 156 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); |
157 | control &= ~PCI_MSIX_FLAGS_ENABLE; | 157 | ctrl &= ~clear; |
158 | if (enable) | 158 | ctrl |= set; |
159 | control |= PCI_MSIX_FLAGS_ENABLE; | 159 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); |
160 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | ||
161 | } | 160 | } |
162 | 161 | ||
163 | static inline __attribute_const__ u32 msi_mask(unsigned x) | 162 | static inline __attribute_const__ u32 msi_mask(unsigned x) |
@@ -168,16 +167,6 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) | |||
168 | return (1 << (1 << x)) - 1; | 167 | return (1 << (1 << x)) - 1; |
169 | } | 168 | } |
170 | 169 | ||
171 | static inline __attribute_const__ u32 msi_capable_mask(u16 control) | ||
172 | { | ||
173 | return msi_mask((control >> 1) & 7); | ||
174 | } | ||
175 | |||
176 | static inline __attribute_const__ u32 msi_enabled_mask(u16 control) | ||
177 | { | ||
178 | return msi_mask((control >> 4) & 7); | ||
179 | } | ||
180 | |||
181 | /* | 170 | /* |
182 | * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to | 171 | * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to |
183 | * mask all MSI interrupts by clearing the MSI enable bit does not work | 172 | * mask all MSI interrupts by clearing the MSI enable bit does not work |
@@ -460,7 +449,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
460 | arch_restore_msi_irqs(dev); | 449 | arch_restore_msi_irqs(dev); |
461 | 450 | ||
462 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); | 451 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); |
463 | msi_mask_irq(entry, msi_capable_mask(control), entry->masked); | 452 | msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap), |
453 | entry->masked); | ||
464 | control &= ~PCI_MSI_FLAGS_QSIZE; | 454 | control &= ~PCI_MSI_FLAGS_QSIZE; |
465 | control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; | 455 | control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; |
466 | pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); | 456 | pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); |
@@ -469,26 +459,23 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
469 | static void __pci_restore_msix_state(struct pci_dev *dev) | 459 | static void __pci_restore_msix_state(struct pci_dev *dev) |
470 | { | 460 | { |
471 | struct msi_desc *entry; | 461 | struct msi_desc *entry; |
472 | u16 control; | ||
473 | 462 | ||
474 | if (!dev->msix_enabled) | 463 | if (!dev->msix_enabled) |
475 | return; | 464 | return; |
476 | BUG_ON(list_empty(&dev->msi_list)); | 465 | BUG_ON(list_empty(&dev->msi_list)); |
477 | entry = list_first_entry(&dev->msi_list, struct msi_desc, list); | 466 | entry = list_first_entry(&dev->msi_list, struct msi_desc, list); |
478 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | ||
479 | 467 | ||
480 | /* route the table */ | 468 | /* route the table */ |
481 | pci_intx_for_msi(dev, 0); | 469 | pci_intx_for_msi(dev, 0); |
482 | control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; | 470 | msix_clear_and_set_ctrl(dev, 0, |
483 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 471 | PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); |
484 | 472 | ||
485 | arch_restore_msi_irqs(dev); | 473 | arch_restore_msi_irqs(dev); |
486 | list_for_each_entry(entry, &dev->msi_list, list) { | 474 | list_for_each_entry(entry, &dev->msi_list, list) { |
487 | msix_mask_irq(entry, entry->masked); | 475 | msix_mask_irq(entry, entry->masked); |
488 | } | 476 | } |
489 | 477 | ||
490 | control &= ~PCI_MSIX_FLAGS_MASKALL; | 478 | msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); |
491 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | ||
492 | } | 479 | } |
493 | 480 | ||
494 | void pci_restore_msi_state(struct pci_dev *dev) | 481 | void pci_restore_msi_state(struct pci_dev *dev) |
@@ -626,6 +613,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
626 | entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); | 613 | entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); |
627 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ | 614 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ |
628 | entry->msi_attrib.pos = dev->msi_cap; | 615 | entry->msi_attrib.pos = dev->msi_cap; |
616 | entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; | ||
629 | 617 | ||
630 | if (control & PCI_MSI_FLAGS_64BIT) | 618 | if (control & PCI_MSI_FLAGS_64BIT) |
631 | entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; | 619 | entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; |
@@ -634,7 +622,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) | |||
634 | /* All MSIs are unmasked by default, Mask them all */ | 622 | /* All MSIs are unmasked by default, Mask them all */ |
635 | if (entry->msi_attrib.maskbit) | 623 | if (entry->msi_attrib.maskbit) |
636 | pci_read_config_dword(dev, entry->mask_pos, &entry->masked); | 624 | pci_read_config_dword(dev, entry->mask_pos, &entry->masked); |
637 | mask = msi_capable_mask(control); | 625 | mask = msi_mask(entry->msi_attrib.multi_cap); |
638 | msi_mask_irq(entry, mask, mask); | 626 | msi_mask_irq(entry, mask, mask); |
639 | 627 | ||
640 | list_add_tail(&entry->list, &dev->msi_list); | 628 | list_add_tail(&entry->list, &dev->msi_list); |
@@ -743,12 +731,10 @@ static int msix_capability_init(struct pci_dev *dev, | |||
743 | u16 control; | 731 | u16 control; |
744 | void __iomem *base; | 732 | void __iomem *base; |
745 | 733 | ||
746 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | ||
747 | |||
748 | /* Ensure MSI-X is disabled while it is set up */ | 734 | /* Ensure MSI-X is disabled while it is set up */ |
749 | control &= ~PCI_MSIX_FLAGS_ENABLE; | 735 | msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
750 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | ||
751 | 736 | ||
737 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | ||
752 | /* Request & Map MSI-X table region */ | 738 | /* Request & Map MSI-X table region */ |
753 | base = msix_map_region(dev, msix_table_size(control)); | 739 | base = msix_map_region(dev, msix_table_size(control)); |
754 | if (!base) | 740 | if (!base) |
@@ -767,8 +753,8 @@ static int msix_capability_init(struct pci_dev *dev, | |||
767 | * MSI-X registers. We need to mask all the vectors to prevent | 753 | * MSI-X registers. We need to mask all the vectors to prevent |
768 | * interrupts coming in before they're fully set up. | 754 | * interrupts coming in before they're fully set up. |
769 | */ | 755 | */ |
770 | control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; | 756 | msix_clear_and_set_ctrl(dev, 0, |
771 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 757 | PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); |
772 | 758 | ||
773 | msix_program_entries(dev, entries); | 759 | msix_program_entries(dev, entries); |
774 | 760 | ||
@@ -780,8 +766,7 @@ static int msix_capability_init(struct pci_dev *dev, | |||
780 | pci_intx_for_msi(dev, 0); | 766 | pci_intx_for_msi(dev, 0); |
781 | dev->msix_enabled = 1; | 767 | dev->msix_enabled = 1; |
782 | 768 | ||
783 | control &= ~PCI_MSIX_FLAGS_MASKALL; | 769 | msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); |
784 | pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | ||
785 | 770 | ||
786 | return 0; | 771 | return 0; |
787 | 772 | ||
@@ -882,7 +867,6 @@ void pci_msi_shutdown(struct pci_dev *dev) | |||
882 | { | 867 | { |
883 | struct msi_desc *desc; | 868 | struct msi_desc *desc; |
884 | u32 mask; | 869 | u32 mask; |
885 | u16 ctrl; | ||
886 | 870 | ||
887 | if (!pci_msi_enable || !dev || !dev->msi_enabled) | 871 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
888 | return; | 872 | return; |
@@ -895,8 +879,7 @@ void pci_msi_shutdown(struct pci_dev *dev) | |||
895 | dev->msi_enabled = 0; | 879 | dev->msi_enabled = 0; |
896 | 880 | ||
897 | /* Return the device with MSI unmasked as initial states */ | 881 | /* Return the device with MSI unmasked as initial states */ |
898 | pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl); | 882 | mask = msi_mask(desc->msi_attrib.multi_cap); |
899 | mask = msi_capable_mask(ctrl); | ||
900 | /* Keep cached state to be restored */ | 883 | /* Keep cached state to be restored */ |
901 | arch_msi_mask_irq(desc, mask, ~mask); | 884 | arch_msi_mask_irq(desc, mask, ~mask); |
902 | 885 | ||
@@ -1001,7 +984,7 @@ void pci_msix_shutdown(struct pci_dev *dev) | |||
1001 | arch_msix_mask_irq(entry, 1); | 984 | arch_msix_mask_irq(entry, 1); |
1002 | } | 985 | } |
1003 | 986 | ||
1004 | msix_set_enable(dev, 0); | 987 | msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
1005 | pci_intx_for_msi(dev, 1); | 988 | pci_intx_for_msi(dev, 1); |
1006 | dev->msix_enabled = 0; | 989 | dev->msix_enabled = 0; |
1007 | } | 990 | } |
@@ -1065,7 +1048,7 @@ void pci_msi_init_pci_dev(struct pci_dev *dev) | |||
1065 | 1048 | ||
1066 | dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 1049 | dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
1067 | if (dev->msix_cap) | 1050 | if (dev->msix_cap) |
1068 | msix_set_enable(dev, 0); | 1051 | msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
1069 | } | 1052 | } |
1070 | 1053 | ||
1071 | /** | 1054 | /** |
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index a3fbe2012ea3..2ab1b47c7651 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c | |||
@@ -161,8 +161,8 @@ enum acpi_attr_enum { | |||
161 | static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) | 161 | static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) |
162 | { | 162 | { |
163 | int len; | 163 | int len; |
164 | len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer, | 164 | len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer, |
165 | obj->string.length, | 165 | obj->buffer.length, |
166 | UTF16_LITTLE_ENDIAN, | 166 | UTF16_LITTLE_ENDIAN, |
167 | buf, PAGE_SIZE); | 167 | buf, PAGE_SIZE); |
168 | buf[len] = '\n'; | 168 | buf[len] = '\n'; |
@@ -187,16 +187,22 @@ static int dsm_get_label(struct device *dev, char *buf, | |||
187 | tmp = obj->package.elements; | 187 | tmp = obj->package.elements; |
188 | if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 && | 188 | if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 && |
189 | tmp[0].type == ACPI_TYPE_INTEGER && | 189 | tmp[0].type == ACPI_TYPE_INTEGER && |
190 | tmp[1].type == ACPI_TYPE_STRING) { | 190 | (tmp[1].type == ACPI_TYPE_STRING || |
191 | tmp[1].type == ACPI_TYPE_BUFFER)) { | ||
191 | /* | 192 | /* |
192 | * The second string element is optional even when | 193 | * The second string element is optional even when |
193 | * this _DSM is implemented; when not implemented, | 194 | * this _DSM is implemented; when not implemented, |
194 | * this entry must return a null string. | 195 | * this entry must return a null string. |
195 | */ | 196 | */ |
196 | if (attr == ACPI_ATTR_INDEX_SHOW) | 197 | if (attr == ACPI_ATTR_INDEX_SHOW) { |
197 | scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value); | 198 | scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value); |
198 | else if (attr == ACPI_ATTR_LABEL_SHOW) | 199 | } else if (attr == ACPI_ATTR_LABEL_SHOW) { |
199 | dsm_label_utf16s_to_utf8s(tmp + 1, buf); | 200 | if (tmp[1].type == ACPI_TYPE_STRING) |
201 | scnprintf(buf, PAGE_SIZE, "%s\n", | ||
202 | tmp[1].string.pointer); | ||
203 | else if (tmp[1].type == ACPI_TYPE_BUFFER) | ||
204 | dsm_label_utf16s_to_utf8s(tmp + 1, buf); | ||
205 | } | ||
200 | len = strlen(buf) > 0 ? strlen(buf) : -1; | 206 | len = strlen(buf) > 0 ? strlen(buf) : -1; |
201 | } | 207 | } |
202 | 208 | ||
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 80887eaa0668..2ccc9b926ea7 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -203,10 +203,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev, | |||
203 | (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) | 203 | (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) |
204 | return -ENODEV; | 204 | return -ENODEV; |
205 | 205 | ||
206 | if (!dev->irq && dev->pin) { | ||
207 | dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; check vendor BIOS\n", | ||
208 | dev->vendor, dev->device); | ||
209 | } | ||
210 | status = pcie_port_device_register(dev); | 206 | status = pcie_port_device_register(dev); |
211 | if (status) | 207 | if (status) |
212 | return status; | 208 | return status; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index d0f69269eb6c..ad566827b547 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -3405,6 +3405,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080, | |||
3405 | DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias); | 3405 | DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias); |
3406 | /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */ | 3406 | /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */ |
3407 | DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); | 3407 | DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); |
3408 | /* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */ | ||
3409 | DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); | ||
3408 | 3410 | ||
3409 | static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev) | 3411 | static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev) |
3410 | { | 3412 | { |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 92a2f991262a..8103f32f6d87 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -25,7 +25,8 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg); | |||
25 | struct msi_desc { | 25 | struct msi_desc { |
26 | struct { | 26 | struct { |
27 | __u8 is_msix : 1; | 27 | __u8 is_msix : 1; |
28 | __u8 multiple: 3; /* log2 number of messages */ | 28 | __u8 multiple: 3; /* log2 num of messages allocated */ |
29 | __u8 multi_cap : 3; /* log2 num of messages supported */ | ||
29 | __u8 maskbit : 1; /* mask-pending bit supported ? */ | 30 | __u8 maskbit : 1; /* mask-pending bit supported ? */ |
30 | __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ | 31 | __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ |
31 | __u8 pos; /* Location of the msi capability */ | 32 | __u8 pos; /* Location of the msi capability */ |