1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe Endpoint controller driver
5 * Copyright (C) 2017 Texas Instruments
9 #include <linux/align.h>
10 #include <linux/bitfield.h>
12 #include <linux/platform_device.h>
14 #include "pcie-designware.h"
15 #include <linux/pci-epc.h>
16 #include <linux/pci-epf.h>
19 * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
20 * the endpoint function
22 * @func_no: Function number of the endpoint device
24 * Return: struct dw_pcie_ep_func if success, NULL otherwise.
26 struct dw_pcie_ep_func *
27 dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
29 struct dw_pcie_ep_func *ep_func;
31 list_for_each_entry(ep_func, &ep->func_list, list) {
32 if (ep_func->func_no == func_no)
39 static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
40 enum pci_barno bar, int flags)
42 struct dw_pcie_ep *ep = &pci->ep;
45 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
46 dw_pcie_dbi_ro_wr_en(pci);
47 dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0);
48 dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0);
49 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
50 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0);
51 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0);
53 dw_pcie_dbi_ro_wr_dis(pci);
57 * dw_pcie_ep_reset_bar - Reset endpoint BAR
58 * @pci: DWC PCI device
59 * @bar: BAR number of the endpoint
61 void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
65 funcs = pci->ep.epc->max_functions;
67 for (func_no = 0; func_no < funcs; func_no++)
68 __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
70 EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
72 static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
75 u8 cap_id, next_cap_ptr;
81 reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr);
82 cap_id = (reg & 0x00ff);
84 if (cap_id > PCI_CAP_ID_MAX)
90 next_cap_ptr = (reg & 0xff00) >> 8;
91 return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
94 static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
99 reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST);
100 next_cap_ptr = (reg & 0x00ff);
102 return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
105 static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
106 struct pci_epf_header *hdr)
108 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
109 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
111 dw_pcie_dbi_ro_wr_en(pci);
112 dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid);
113 dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid);
114 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid);
115 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code);
116 dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE,
117 hdr->subclass_code | hdr->baseclass_code << 8);
118 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE,
119 hdr->cache_line_size);
120 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID,
121 hdr->subsys_vendor_id);
122 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id);
123 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN,
125 dw_pcie_dbi_ro_wr_dis(pci);
130 static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
131 dma_addr_t cpu_addr, enum pci_barno bar)
135 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
137 if (!ep->bar_to_atu[bar])
138 free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
140 free_win = ep->bar_to_atu[bar] - 1;
142 if (free_win >= pci->num_ib_windows) {
143 dev_err(pci->dev, "No free inbound window\n");
147 ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
150 dev_err(pci->dev, "Failed to program IB window\n");
155 * Always increment free_win before assignment, since value 0 is used to identify
156 * unallocated mapping.
158 ep->bar_to_atu[bar] = free_win + 1;
159 set_bit(free_win, ep->ib_window_map);
164 static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
165 struct dw_pcie_ob_atu_cfg *atu)
167 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
171 free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
172 if (free_win >= pci->num_ob_windows) {
173 dev_err(pci->dev, "No free outbound window\n");
177 atu->index = free_win;
178 ret = dw_pcie_prog_outbound_atu(pci, atu);
182 set_bit(free_win, ep->ob_window_map);
183 ep->outbound_addr[free_win] = atu->cpu_addr;
188 static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
189 struct pci_epf_bar *epf_bar)
191 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
192 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
193 enum pci_barno bar = epf_bar->barno;
194 u32 atu_index = ep->bar_to_atu[bar] - 1;
196 if (!ep->bar_to_atu[bar])
199 __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
201 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
202 clear_bit(atu_index, ep->ib_window_map);
203 ep->epf_bar[bar] = NULL;
204 ep->bar_to_atu[bar] = 0;
207 static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
208 struct pci_epf_bar *epf_bar)
210 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
211 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
212 enum pci_barno bar = epf_bar->barno;
213 size_t size = epf_bar->size;
214 int flags = epf_bar->flags;
219 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
220 * 1 and 2 to form a 64-bit BAR.
222 if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
225 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
227 if (!(flags & PCI_BASE_ADDRESS_SPACE))
228 type = PCIE_ATU_TYPE_MEM;
230 type = PCIE_ATU_TYPE_IO;
232 ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
236 if (ep->epf_bar[bar])
239 dw_pcie_dbi_ro_wr_en(pci);
241 dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
242 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
244 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
245 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
246 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
249 ep->epf_bar[bar] = epf_bar;
250 dw_pcie_dbi_ro_wr_dis(pci);
255 static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
259 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
261 for (index = 0; index < pci->num_ob_windows; index++) {
262 if (ep->outbound_addr[index] != addr)
271 static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
276 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
277 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
279 ret = dw_pcie_find_index(ep, addr, &atu_index);
283 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
284 clear_bit(atu_index, ep->ob_window_map);
287 static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
288 phys_addr_t addr, u64 pci_addr, size_t size)
291 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
292 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
293 struct dw_pcie_ob_atu_cfg atu = { 0 };
295 atu.func_no = func_no;
296 atu.type = PCIE_ATU_TYPE_MEM;
298 atu.pci_addr = pci_addr;
300 ret = dw_pcie_ep_outbound_atu(ep, &atu);
302 dev_err(pci->dev, "Failed to enable address\n");
309 static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
311 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
312 struct dw_pcie_ep_func *ep_func;
315 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
316 if (!ep_func || !ep_func->msi_cap)
319 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
320 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
321 if (!(val & PCI_MSI_FLAGS_ENABLE))
324 val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
329 static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
332 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
333 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
334 struct dw_pcie_ep_func *ep_func;
337 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
338 if (!ep_func || !ep_func->msi_cap)
341 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
342 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
343 val &= ~PCI_MSI_FLAGS_QMASK;
344 val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts);
345 dw_pcie_dbi_ro_wr_en(pci);
346 dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
347 dw_pcie_dbi_ro_wr_dis(pci);
352 static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
354 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
355 struct dw_pcie_ep_func *ep_func;
358 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
359 if (!ep_func || !ep_func->msix_cap)
362 reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
363 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
364 if (!(val & PCI_MSIX_FLAGS_ENABLE))
367 val &= PCI_MSIX_FLAGS_QSIZE;
372 static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
373 u16 interrupts, enum pci_barno bir, u32 offset)
375 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
376 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
377 struct dw_pcie_ep_func *ep_func;
380 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
381 if (!ep_func || !ep_func->msix_cap)
384 dw_pcie_dbi_ro_wr_en(pci);
386 reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
387 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
388 val &= ~PCI_MSIX_FLAGS_QSIZE;
390 dw_pcie_writew_dbi(pci, reg, val);
392 reg = ep_func->msix_cap + PCI_MSIX_TABLE;
394 dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
396 reg = ep_func->msix_cap + PCI_MSIX_PBA;
397 val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
398 dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
400 dw_pcie_dbi_ro_wr_dis(pci);
405 static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
406 unsigned int type, u16 interrupt_num)
408 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
410 if (!ep->ops->raise_irq)
413 return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
416 static void dw_pcie_ep_stop(struct pci_epc *epc)
418 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
419 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
421 dw_pcie_stop_link(pci);
424 static int dw_pcie_ep_start(struct pci_epc *epc)
426 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
427 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
429 return dw_pcie_start_link(pci);
432 static const struct pci_epc_features*
433 dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
435 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
437 if (!ep->ops->get_features)
440 return ep->ops->get_features(ep);
443 static const struct pci_epc_ops epc_ops = {
444 .write_header = dw_pcie_ep_write_header,
445 .set_bar = dw_pcie_ep_set_bar,
446 .clear_bar = dw_pcie_ep_clear_bar,
447 .map_addr = dw_pcie_ep_map_addr,
448 .unmap_addr = dw_pcie_ep_unmap_addr,
449 .set_msi = dw_pcie_ep_set_msi,
450 .get_msi = dw_pcie_ep_get_msi,
451 .set_msix = dw_pcie_ep_set_msix,
452 .get_msix = dw_pcie_ep_get_msix,
453 .raise_irq = dw_pcie_ep_raise_irq,
454 .start = dw_pcie_ep_start,
455 .stop = dw_pcie_ep_stop,
456 .get_features = dw_pcie_ep_get_features,
460 * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
462 * @func_no: Function number of the endpoint
464 * Return: 0 if success, errono otherwise.
466 int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
468 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
469 struct device *dev = pci->dev;
471 dev_err(dev, "EP cannot raise INTX IRQs\n");
475 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
478 * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
480 * @func_no: Function number of the endpoint
481 * @interrupt_num: Interrupt number to be raised
483 * Return: 0 if success, errono otherwise.
485 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
488 u32 msg_addr_lower, msg_addr_upper, reg;
489 struct dw_pcie_ep_func *ep_func;
490 struct pci_epc *epc = ep->epc;
491 unsigned int aligned_offset;
492 u16 msg_ctrl, msg_data;
497 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
498 if (!ep_func || !ep_func->msi_cap)
501 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
502 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
503 msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg);
504 has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
505 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO;
506 msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg);
508 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI;
509 msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg);
510 reg = ep_func->msi_cap + PCI_MSI_DATA_64;
511 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
514 reg = ep_func->msi_cap + PCI_MSI_DATA_32;
515 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
517 msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
519 aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
520 msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
521 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
522 epc->mem->window.page_size);
526 writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
528 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
532 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
535 * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
538 * @func_no: Function number of the endpoint device
539 * @interrupt_num: Interrupt number to be raised
541 * Return: 0 if success, errno otherwise.
543 int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
546 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
547 struct dw_pcie_ep_func *ep_func;
550 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
551 if (!ep_func || !ep_func->msix_cap)
554 msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
557 dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
563 * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
565 * @func_no: Function number of the endpoint device
566 * @interrupt_num: Interrupt number to be raised
568 * Return: 0 if success, errno otherwise.
570 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
573 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
574 struct pci_epf_msix_tbl *msix_tbl;
575 struct dw_pcie_ep_func *ep_func;
576 struct pci_epc *epc = ep->epc;
577 u32 reg, msg_data, vec_ctrl;
578 unsigned int aligned_offset;
584 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
585 if (!ep_func || !ep_func->msix_cap)
588 reg = ep_func->msix_cap + PCI_MSIX_TABLE;
589 tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg);
590 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
591 tbl_offset &= PCI_MSIX_TABLE_OFFSET;
593 msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
594 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
595 msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
596 vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
598 if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
599 dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
603 aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
604 msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
605 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
606 epc->mem->window.page_size);
610 writel(msg_data, ep->msi_mem + aligned_offset);
612 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
618 * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
621 * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
622 * reset like PERST#. Note that this API is only applicable for drivers
623 * supporting PERST# or any other methods of fundamental reset.
625 void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
627 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
629 dw_pcie_edma_remove(pci);
631 EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
634 * dw_pcie_ep_deinit - Deinitialize the endpoint device
637 * Deinitialize the endpoint device. EPC device is not destroyed since that will
638 * be taken care by Devres.
640 void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
642 struct pci_epc *epc = ep->epc;
644 dw_pcie_ep_cleanup(ep);
646 pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
647 epc->mem->window.page_size);
649 pci_epc_mem_exit(epc);
651 EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
653 static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
656 int pos = PCI_CFG_SPACE_SIZE;
659 header = dw_pcie_readl_dbi(pci, pos);
660 if (PCI_EXT_CAP_ID(header) == cap)
663 pos = PCI_EXT_CAP_NEXT(header);
671 static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
677 offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
679 dw_pcie_dbi_ro_wr_en(pci);
682 reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
683 nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
684 PCI_REBAR_CTRL_NBAR_SHIFT;
687 * PCIe r6.0, sec 7.8.6.2 require us to support at least one
688 * size in the range from 1 MB to 512 GB. Advertise support
689 * for 1 MB BAR size only.
691 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
692 dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
696 dw_pcie_dbi_ro_wr_dis(pci);
700 * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
703 * Initialize the registers (CSRs) specific to DWC EP. This API should be called
704 * only when the endpoint receives an active refclk (either from host or
705 * generated locally).
707 int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
709 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
710 struct dw_pcie_ep_func *ep_func;
711 struct device *dev = pci->dev;
712 struct pci_epc *epc = ep->epc;
713 u32 ptm_cap_base, reg;
719 hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
720 PCI_HEADER_TYPE_MASK;
721 if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
723 "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
728 dw_pcie_version_detect(pci);
730 dw_pcie_iatu_detect(pci);
732 ret = dw_pcie_edma_detect(pci);
736 if (!ep->ib_window_map) {
737 ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
739 if (!ep->ib_window_map)
740 goto err_remove_edma;
743 if (!ep->ob_window_map) {
744 ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
746 if (!ep->ob_window_map)
747 goto err_remove_edma;
750 if (!ep->outbound_addr) {
751 addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
754 goto err_remove_edma;
755 ep->outbound_addr = addr;
758 for (func_no = 0; func_no < epc->max_functions; func_no++) {
760 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
764 ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
766 goto err_remove_edma;
768 ep_func->func_no = func_no;
769 ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
771 ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
774 list_add_tail(&ep_func->list, &ep->func_list);
780 ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
783 * PTM responder capability can be disabled only after disabling
784 * PTM root capability.
787 dw_pcie_dbi_ro_wr_en(pci);
788 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
789 reg &= ~PCI_PTM_CAP_ROOT;
790 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
792 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
793 reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK);
794 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
795 dw_pcie_dbi_ro_wr_dis(pci);
798 dw_pcie_ep_init_non_sticky_registers(pci);
803 dw_pcie_edma_remove(pci);
807 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
810 * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
813 void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
815 struct pci_epc *epc = ep->epc;
819 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
822 * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
825 * Non-sticky registers are also initialized before sending the notification to
826 * the EPF drivers. This is needed since the registers need to be initialized
827 * before the link comes back again.
829 void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
831 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
832 struct pci_epc *epc = ep->epc;
835 * Initialize the non-sticky DWC registers as they would've reset post
836 * Link Down. This is specifically needed for drivers not supporting
837 * PERST# as they have no way to reinitialize the registers before the
838 * link comes back again.
840 dw_pcie_ep_init_non_sticky_registers(pci);
842 pci_epc_linkdown(epc);
844 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
847 * dw_pcie_ep_init - Initialize the endpoint device
850 * Initialize the endpoint device. Allocate resources and create the EPC
851 * device with the endpoint framework.
853 * Return: 0 if success, errno otherwise.
855 int dw_pcie_ep_init(struct dw_pcie_ep *ep)
858 struct resource *res;
860 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
861 struct device *dev = pci->dev;
862 struct platform_device *pdev = to_platform_device(dev);
863 struct device_node *np = dev->of_node;
865 INIT_LIST_HEAD(&ep->func_list);
867 ret = dw_pcie_get_resources(pci);
871 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
875 ep->phys_base = res->start;
876 ep->addr_size = resource_size(res);
878 if (ep->ops->pre_init)
879 ep->ops->pre_init(ep);
881 epc = devm_pci_epc_create(dev, &epc_ops);
883 dev_err(dev, "Failed to create epc device\n");
888 epc_set_drvdata(epc, ep);
890 ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
892 epc->max_functions = 1;
894 ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
897 dev_err(dev, "Failed to initialize address space\n");
901 ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
902 epc->mem->window.page_size);
905 dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
906 goto err_exit_epc_mem;
912 pci_epc_mem_exit(epc);
916 EXPORT_SYMBOL_GPL(dw_pcie_ep_init);