diff options
Diffstat (limited to 'drivers/pci/controller/dwc/pcie-designware-ep.c')
-rw-r--r-- | drivers/pci/controller/dwc/pcie-designware-ep.c | 210 |
1 files changed, 185 insertions, 25 deletions
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 8650416f6f9e..1e7b02221eac 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c | |||
@@ -40,6 +40,39 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) | |||
40 | __dw_pcie_ep_reset_bar(pci, bar, 0); | 40 | __dw_pcie_ep_reset_bar(pci, bar, 0); |
41 | } | 41 | } |
42 | 42 | ||
43 | static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, | ||
44 | u8 cap) | ||
45 | { | ||
46 | u8 cap_id, next_cap_ptr; | ||
47 | u16 reg; | ||
48 | |||
49 | reg = dw_pcie_readw_dbi(pci, cap_ptr); | ||
50 | next_cap_ptr = (reg & 0xff00) >> 8; | ||
51 | cap_id = (reg & 0x00ff); | ||
52 | |||
53 | if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX) | ||
54 | return 0; | ||
55 | |||
56 | if (cap_id == cap) | ||
57 | return cap_ptr; | ||
58 | |||
59 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); | ||
60 | } | ||
61 | |||
62 | static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap) | ||
63 | { | ||
64 | u8 next_cap_ptr; | ||
65 | u16 reg; | ||
66 | |||
67 | reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); | ||
68 | next_cap_ptr = (reg & 0x00ff); | ||
69 | |||
70 | if (!next_cap_ptr) | ||
71 | return 0; | ||
72 | |||
73 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); | ||
74 | } | ||
75 | |||
43 | static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, | 76 | static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, |
44 | struct pci_epf_header *hdr) | 77 | struct pci_epf_header *hdr) |
45 | { | 78 | { |
@@ -213,36 +246,84 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, | |||
213 | 246 | ||
214 | static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) | 247 | static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) |
215 | { | 248 | { |
216 | int val; | ||
217 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | 249 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); |
218 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | 250 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
251 | u32 val, reg; | ||
252 | |||
253 | if (!ep->msi_cap) | ||
254 | return -EINVAL; | ||
255 | |||
256 | reg = ep->msi_cap + PCI_MSI_FLAGS; | ||
257 | val = dw_pcie_readw_dbi(pci, reg); | ||
258 | if (!(val & PCI_MSI_FLAGS_ENABLE)) | ||
259 | return -EINVAL; | ||
260 | |||
261 | val = (val & PCI_MSI_FLAGS_QSIZE) >> 4; | ||
262 | |||
263 | return val; | ||
264 | } | ||
265 | |||
266 | static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts) | ||
267 | { | ||
268 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
269 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
270 | u32 val, reg; | ||
271 | |||
272 | if (!ep->msi_cap) | ||
273 | return -EINVAL; | ||
274 | |||
275 | reg = ep->msi_cap + PCI_MSI_FLAGS; | ||
276 | val = dw_pcie_readw_dbi(pci, reg); | ||
277 | val &= ~PCI_MSI_FLAGS_QMASK; | ||
278 | val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK; | ||
279 | dw_pcie_dbi_ro_wr_en(pci); | ||
280 | dw_pcie_writew_dbi(pci, reg, val); | ||
281 | dw_pcie_dbi_ro_wr_dis(pci); | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) | ||
287 | { | ||
288 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
289 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
290 | u32 val, reg; | ||
219 | 291 | ||
220 | val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); | 292 | if (!ep->msix_cap) |
221 | if (!(val & MSI_CAP_MSI_EN_MASK)) | ||
222 | return -EINVAL; | 293 | return -EINVAL; |
223 | 294 | ||
224 | val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; | 295 | reg = ep->msix_cap + PCI_MSIX_FLAGS; |
296 | val = dw_pcie_readw_dbi(pci, reg); | ||
297 | if (!(val & PCI_MSIX_FLAGS_ENABLE)) | ||
298 | return -EINVAL; | ||
299 | |||
300 | val &= PCI_MSIX_FLAGS_QSIZE; | ||
301 | |||
225 | return val; | 302 | return val; |
226 | } | 303 | } |
227 | 304 | ||
228 | static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int) | 305 | static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) |
229 | { | 306 | { |
230 | int val; | ||
231 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | 307 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); |
232 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | 308 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
309 | u32 val, reg; | ||
310 | |||
311 | if (!ep->msix_cap) | ||
312 | return -EINVAL; | ||
233 | 313 | ||
234 | val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); | 314 | reg = ep->msix_cap + PCI_MSIX_FLAGS; |
235 | val &= ~MSI_CAP_MMC_MASK; | 315 | val = dw_pcie_readw_dbi(pci, reg); |
236 | val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK; | 316 | val &= ~PCI_MSIX_FLAGS_QSIZE; |
317 | val |= interrupts; | ||
237 | dw_pcie_dbi_ro_wr_en(pci); | 318 | dw_pcie_dbi_ro_wr_en(pci); |
238 | dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val); | 319 | dw_pcie_writew_dbi(pci, reg, val); |
239 | dw_pcie_dbi_ro_wr_dis(pci); | 320 | dw_pcie_dbi_ro_wr_dis(pci); |
240 | 321 | ||
241 | return 0; | 322 | return 0; |
242 | } | 323 | } |
243 | 324 | ||
244 | static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, | 325 | static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, |
245 | enum pci_epc_irq_type type, u8 interrupt_num) | 326 | enum pci_epc_irq_type type, u16 interrupt_num) |
246 | { | 327 | { |
247 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | 328 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); |
248 | 329 | ||
@@ -282,32 +363,52 @@ static const struct pci_epc_ops epc_ops = { | |||
282 | .unmap_addr = dw_pcie_ep_unmap_addr, | 363 | .unmap_addr = dw_pcie_ep_unmap_addr, |
283 | .set_msi = dw_pcie_ep_set_msi, | 364 | .set_msi = dw_pcie_ep_set_msi, |
284 | .get_msi = dw_pcie_ep_get_msi, | 365 | .get_msi = dw_pcie_ep_get_msi, |
366 | .set_msix = dw_pcie_ep_set_msix, | ||
367 | .get_msix = dw_pcie_ep_get_msix, | ||
285 | .raise_irq = dw_pcie_ep_raise_irq, | 368 | .raise_irq = dw_pcie_ep_raise_irq, |
286 | .start = dw_pcie_ep_start, | 369 | .start = dw_pcie_ep_start, |
287 | .stop = dw_pcie_ep_stop, | 370 | .stop = dw_pcie_ep_stop, |
288 | }; | 371 | }; |
289 | 372 | ||
373 | int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) | ||
374 | { | ||
375 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
376 | struct device *dev = pci->dev; | ||
377 | |||
378 | dev_err(dev, "EP cannot trigger legacy IRQs\n"); | ||
379 | |||
380 | return -EINVAL; | ||
381 | } | ||
382 | |||
290 | int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | 383 | int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, |
291 | u8 interrupt_num) | 384 | u8 interrupt_num) |
292 | { | 385 | { |
293 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | 386 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
294 | struct pci_epc *epc = ep->epc; | 387 | struct pci_epc *epc = ep->epc; |
295 | u16 msg_ctrl, msg_data; | 388 | u16 msg_ctrl, msg_data; |
296 | u32 msg_addr_lower, msg_addr_upper; | 389 | u32 msg_addr_lower, msg_addr_upper, reg; |
297 | u64 msg_addr; | 390 | u64 msg_addr; |
298 | bool has_upper; | 391 | bool has_upper; |
299 | int ret; | 392 | int ret; |
300 | 393 | ||
394 | if (!ep->msi_cap) | ||
395 | return -EINVAL; | ||
396 | |||
301 | /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ | 397 | /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ |
302 | msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); | 398 | reg = ep->msi_cap + PCI_MSI_FLAGS; |
399 | msg_ctrl = dw_pcie_readw_dbi(pci, reg); | ||
303 | has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); | 400 | has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); |
304 | msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); | 401 | reg = ep->msi_cap + PCI_MSI_ADDRESS_LO; |
402 | msg_addr_lower = dw_pcie_readl_dbi(pci, reg); | ||
305 | if (has_upper) { | 403 | if (has_upper) { |
306 | msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); | 404 | reg = ep->msi_cap + PCI_MSI_ADDRESS_HI; |
307 | msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64); | 405 | msg_addr_upper = dw_pcie_readl_dbi(pci, reg); |
406 | reg = ep->msi_cap + PCI_MSI_DATA_64; | ||
407 | msg_data = dw_pcie_readw_dbi(pci, reg); | ||
308 | } else { | 408 | } else { |
309 | msg_addr_upper = 0; | 409 | msg_addr_upper = 0; |
310 | msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32); | 410 | reg = ep->msi_cap + PCI_MSI_DATA_32; |
411 | msg_data = dw_pcie_readw_dbi(pci, reg); | ||
311 | } | 412 | } |
312 | msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; | 413 | msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; |
313 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, | 414 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, |
@@ -322,6 +423,64 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
322 | return 0; | 423 | return 0; |
323 | } | 424 | } |
324 | 425 | ||
426 | int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
427 | u16 interrupt_num) | ||
428 | { | ||
429 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
430 | struct pci_epc *epc = ep->epc; | ||
431 | u16 tbl_offset, bir; | ||
432 | u32 bar_addr_upper, bar_addr_lower; | ||
433 | u32 msg_addr_upper, msg_addr_lower; | ||
434 | u32 reg, msg_data, vec_ctrl; | ||
435 | u64 tbl_addr, msg_addr, reg_u64; | ||
436 | void __iomem *msix_tbl; | ||
437 | int ret; | ||
438 | |||
439 | reg = ep->msix_cap + PCI_MSIX_TABLE; | ||
440 | tbl_offset = dw_pcie_readl_dbi(pci, reg); | ||
441 | bir = (tbl_offset & PCI_MSIX_TABLE_BIR); | ||
442 | tbl_offset &= PCI_MSIX_TABLE_OFFSET; | ||
443 | tbl_offset >>= 3; | ||
444 | |||
445 | reg = PCI_BASE_ADDRESS_0 + (4 * bir); | ||
446 | bar_addr_upper = 0; | ||
447 | bar_addr_lower = dw_pcie_readl_dbi(pci, reg); | ||
448 | reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK); | ||
449 | if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
450 | bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4); | ||
451 | |||
452 | tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower; | ||
453 | tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE)); | ||
454 | tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK; | ||
455 | |||
456 | msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr, | ||
457 | PCI_MSIX_ENTRY_SIZE); | ||
458 | if (!msix_tbl) | ||
459 | return -EINVAL; | ||
460 | |||
461 | msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR); | ||
462 | msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR); | ||
463 | msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; | ||
464 | msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA); | ||
465 | vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL); | ||
466 | |||
467 | iounmap(msix_tbl); | ||
468 | |||
469 | if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) | ||
470 | return -EPERM; | ||
471 | |||
472 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, | ||
473 | epc->mem->page_size); | ||
474 | if (ret) | ||
475 | return ret; | ||
476 | |||
477 | writel(msg_data, ep->msi_mem); | ||
478 | |||
479 | dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); | ||
480 | |||
481 | return 0; | ||
482 | } | ||
483 | |||
325 | void dw_pcie_ep_exit(struct dw_pcie_ep *ep) | 484 | void dw_pcie_ep_exit(struct dw_pcie_ep *ep) |
326 | { | 485 | { |
327 | struct pci_epc *epc = ep->epc; | 486 | struct pci_epc *epc = ep->epc; |
@@ -386,15 +545,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |||
386 | return -ENOMEM; | 545 | return -ENOMEM; |
387 | ep->outbound_addr = addr; | 546 | ep->outbound_addr = addr; |
388 | 547 | ||
389 | if (ep->ops->ep_init) | ||
390 | ep->ops->ep_init(ep); | ||
391 | |||
392 | epc = devm_pci_epc_create(dev, &epc_ops); | 548 | epc = devm_pci_epc_create(dev, &epc_ops); |
393 | if (IS_ERR(epc)) { | 549 | if (IS_ERR(epc)) { |
394 | dev_err(dev, "Failed to create epc device\n"); | 550 | dev_err(dev, "Failed to create epc device\n"); |
395 | return PTR_ERR(epc); | 551 | return PTR_ERR(epc); |
396 | } | 552 | } |
397 | 553 | ||
554 | ep->epc = epc; | ||
555 | epc_set_drvdata(epc, ep); | ||
556 | |||
557 | if (ep->ops->ep_init) | ||
558 | ep->ops->ep_init(ep); | ||
559 | |||
398 | ret = of_property_read_u8(np, "max-functions", &epc->max_functions); | 560 | ret = of_property_read_u8(np, "max-functions", &epc->max_functions); |
399 | if (ret < 0) | 561 | if (ret < 0) |
400 | epc->max_functions = 1; | 562 | epc->max_functions = 1; |
@@ -409,15 +571,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |||
409 | ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, | 571 | ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, |
410 | epc->mem->page_size); | 572 | epc->mem->page_size); |
411 | if (!ep->msi_mem) { | 573 | if (!ep->msi_mem) { |
412 | dev_err(dev, "Failed to reserve memory for MSI\n"); | 574 | dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); |
413 | return -ENOMEM; | 575 | return -ENOMEM; |
414 | } | 576 | } |
577 | ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI); | ||
415 | 578 | ||
416 | epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER; | 579 | ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); |
417 | EPC_FEATURE_SET_BAR(epc->features, BAR_0); | ||
418 | 580 | ||
419 | ep->epc = epc; | ||
420 | epc_set_drvdata(epc, ep); | ||
421 | dw_pcie_setup(pci); | 581 | dw_pcie_setup(pci); |
422 | 582 | ||
423 | return 0; | 583 | return 0; |