diff options
Diffstat (limited to 'drivers')
44 files changed, 2719 insertions, 498 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 43ca16b6eee2..bbac5d5d1fcf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1152,16 +1152,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero return; if (state == VGA_SWITCHEROO_ON) { - unsigned d3_delay = dev->pdev->d3_delay; - pr_info("amdgpu: switched on\n"); /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_resume(dev, true, true); - dev->pdev->d3_delay = d3_delay; - dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); } else { diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 6ecf42783d4b..aecaafbc8417 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -113,7 +113,6 @@ static inline bool radeon_is_atpx_hybrid(void) { return false; } #endif #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) -#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) struct radeon_px_quirk { u32 chip_vendor; @@ -136,8 +135,6 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { * https://bugzilla.kernel.org/show_bug.cgi?id=51381 */ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, - /* macbook pro 8.2 */ - { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { 0, 0, 0, 0, 0 }, }; @@ -1241,25 +1238,17 @@ static void radeon_check_arguments(struct radeon_device *rdev) static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); - struct radeon_device *rdev = dev->dev_private; if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { - unsigned d3_delay = dev->pdev->d3_delay; - pr_info("radeon: switched on\n"); /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP)) - dev->pdev->d3_delay = 20; - radeon_resume_kms(dev, true, true); - dev->pdev->d3_delay = d3_delay; - dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); } else { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 3e26d27ad213..63784576ae8b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2348,30 +2348,19 @@ static void fm10k_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); } -/** - * fm10k_io_reset_notify - called when PCI function is reset - * @pdev: Pointer to PCI device - * - * This callback is called when the PCI function is reset such as from - * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it - * means we should prepare for a function reset. If prepare is false, it means - * the function reset just occurred. - */ -static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare) +static void fm10k_io_reset_prepare(struct pci_dev *pdev) { - struct fm10k_intfc *interface = pci_get_drvdata(pdev); - int err = 0; - - if (prepare) { - /* warn incase we have any active VF devices */ - if (pci_num_vf(pdev)) - dev_warn(&pdev->dev, - "PCIe FLR may cause issues for any active VF devices\n"); + /* warn incase we have any active VF devices */ + if (pci_num_vf(pdev)) + dev_warn(&pdev->dev, + "PCIe FLR may cause issues for any active VF devices\n"); + fm10k_prepare_suspend(pci_get_drvdata(pdev)); +} - fm10k_prepare_suspend(interface); - } else { - err = fm10k_handle_resume(interface); - } +static void fm10k_io_reset_done(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + int err = fm10k_handle_resume(interface); if (err) { dev_warn(&pdev->dev, @@ -2384,7 +2373,8 @@ static const struct pci_error_handlers fm10k_err_handler = { .error_detected = fm10k_io_error_detected, .slot_reset = fm10k_io_slot_reset, .resume = fm10k_io_resume, - .reset_notify = fm10k_io_reset_notify, + .reset_prepare = fm10k_io_reset_prepare, + .reset_done = fm10k_io_reset_done, }; static struct pci_driver fm10k_driver = { diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index ac62bce50e96..279adf124fc9 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -346,11 +346,13 @@ static const struct pci_device_id mwifiex_ids[] = { MODULE_DEVICE_TABLE(pci, mwifiex_ids); -static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) +/* + * Cleanup all software without cleaning anything related to PCIe and HW. + */ +static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev) { struct pcie_service_card *card = pci_get_drvdata(pdev); struct mwifiex_adapter *adapter = card->adapter; - int ret; if (!adapter) { dev_err(&pdev->dev, "%s: adapter structure is not valid\n", @@ -359,37 +361,48 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) } mwifiex_dbg(adapter, INFO, - "%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n", - __func__, pdev->vendor, pdev->device, - pdev->revision, - prepare ? "Pre-FLR" : "Post-FLR"); - - if (prepare) { - /* Kernel would be performing FLR after this notification. - * Cleanup all software without cleaning anything related to - * PCIe and HW. - */ - mwifiex_shutdown_sw(adapter); - adapter->surprise_removed = true; - clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); - clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); - } else { - /* Kernel stores and restores PCIe function context before and - * after performing FLR respectively. Reconfigure the software - * and firmware including firmware redownload - */ - adapter->surprise_removed = false; - ret = mwifiex_reinit_sw(adapter); - if (ret) { - dev_err(&pdev->dev, "reinit failed: %d\n", ret); - return; - } - } + "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Pre-FLR\n", + __func__, pdev->vendor, pdev->device, pdev->revision); + + mwifiex_shutdown_sw(adapter); + adapter->surprise_removed = true; + clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); + clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); } -static const struct pci_error_handlers mwifiex_pcie_err_handler[] = { - { .reset_notify = mwifiex_pcie_reset_notify, }, +/* + * Kernel stores and restores PCIe function context before and after performing + * FLR respectively. Reconfigure the software and firmware including firmware + * redownload. + */ +static void mwifiex_pcie_reset_done(struct pci_dev *pdev) +{ + struct pcie_service_card *card = pci_get_drvdata(pdev); + struct mwifiex_adapter *adapter = card->adapter; + int ret; + + if (!adapter) { + dev_err(&pdev->dev, "%s: adapter structure is not valid\n", + __func__); + return; + } + + mwifiex_dbg(adapter, INFO, + "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Post-FLR\n", + __func__, pdev->vendor, pdev->device, pdev->revision); + + adapter->surprise_removed = false; + ret = mwifiex_reinit_sw(adapter); + if (ret) + dev_err(&pdev->dev, "reinit failed: %d\n", ret); + else + mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); +} + +static const struct pci_error_handlers mwifiex_pcie_err_handler = { + .reset_prepare = mwifiex_pcie_reset_prepare, + .reset_done = mwifiex_pcie_reset_done, }; #ifdef CONFIG_PM_SLEEP @@ -410,7 +423,7 @@ static struct pci_driver __refdata mwifiex_pcie = { }, #endif .shutdown = mwifiex_pcie_shutdown, - .err_handler = mwifiex_pcie_err_handler, + .err_handler = &mwifiex_pcie_err_handler, }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fed803232edc..9a3d69b8df98 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2145,14 +2145,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) return result; } -static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) +static void nvme_reset_prepare(struct pci_dev *pdev) { - struct nvme_dev *dev = pci_get_drvdata(pdev); + nvme_dev_disable(pci_get_drvdata(pdev), false); +} - if (prepare) - nvme_dev_disable(dev, false); - else - nvme_reset(dev); +static void nvme_reset_done(struct pci_dev *pdev) +{ + nvme_reset(pci_get_drvdata(pdev)); } static void nvme_shutdown(struct pci_dev *pdev) @@ -2275,7 +2275,8 @@ static const struct pci_error_handlers nvme_err_handler = { .error_detected = nvme_error_detected, .slot_reset = nvme_slot_reset, .resume = nvme_error_resume, - .reset_notify = nvme_reset_notify, + .reset_prepare = nvme_reset_prepare, + .reset_done = nvme_reset_done, }; static const struct pci_device_id nvme_id_table[] = { diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index eeb9fb2b47aa..ad8ddbbbf245 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -153,23 +153,27 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs) u32 max_requests; int pos; + if (WARN_ON(pdev->pri_enabled)) + return -EBUSY; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); if (!pos) return -EINVAL; - pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); - if ((control & PCI_PRI_CTRL_ENABLE) || - !(status & PCI_PRI_STATUS_STOPPED)) + if (!(status & PCI_PRI_STATUS_STOPPED)) return -EBUSY; pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests); reqs = min(max_requests, reqs); + pdev->pri_reqs_alloc = reqs; pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); - control |= PCI_PRI_CTRL_ENABLE; + control = PCI_PRI_CTRL_ENABLE; pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); + pdev->pri_enabled = 1; + return 0; } EXPORT_SYMBOL_GPL(pci_enable_pri); @@ -185,6 +189,9 @@ void pci_disable_pri(struct pci_dev *pdev) u16 control; int pos; + if (WARN_ON(!pdev->pri_enabled)) + return; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); if (!pos) return; @@ -192,10 +199,34 @@ void pci_disable_pri(struct pci_dev *pdev) pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); control &= ~PCI_PRI_CTRL_ENABLE; pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); + + pdev->pri_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_pri); /** + * pci_restore_pri_state - Restore PRI + * @pdev: PCI device structure + */ +void pci_restore_pri_state(struct pci_dev *pdev) +{ + u16 control = PCI_PRI_CTRL_ENABLE; + u32 reqs = pdev->pri_reqs_alloc; + int pos; + + if (!pdev->pri_enabled) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); + if (!pos) + return; + + pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); + pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); +} +EXPORT_SYMBOL_GPL(pci_restore_pri_state); + +/** * pci_reset_pri - Resets device's PRI state * @pdev: PCI device structure * @@ -207,16 +238,14 @@ int pci_reset_pri(struct pci_dev *pdev) u16 control; int pos; + if (WARN_ON(pdev->pri_enabled)) + return -EBUSY; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); if (!pos) return -EINVAL; - pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); - if (control & PCI_PRI_CTRL_ENABLE) - return -EBUSY; - - control |= PCI_PRI_CTRL_RESET; - + control = PCI_PRI_CTRL_RESET; pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); return 0; @@ -239,16 +268,14 @@ int pci_enable_pasid(struct pci_dev *pdev, int features) u16 control, supported; int pos; + if (WARN_ON(pdev->pasid_enabled)) + return -EBUSY; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); if (!pos) return -EINVAL; - pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control); pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported); - - if (control & PCI_PASID_CTRL_ENABLE) - return -EINVAL; - supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; /* User wants to enable anything unsupported? */ @@ -256,9 +283,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features) return -EINVAL; control = PCI_PASID_CTRL_ENABLE | features; + pdev->pasid_features = features; pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); + pdev->pasid_enabled = 1; + return 0; } EXPORT_SYMBOL_GPL(pci_enable_pasid); @@ -266,22 +296,47 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid); /** * pci_disable_pasid - Disable the PASID capability * @pdev: PCI device structure - * */ void pci_disable_pasid(struct pci_dev *pdev) { u16 control = 0; int pos; + if (WARN_ON(!pdev->pasid_enabled)) + return; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); if (!pos) return; pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); + + pdev->pasid_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_pasid); /** + * pci_restore_pasid_state - Restore PASID capabilities + * @pdev: PCI device structure + */ +void pci_restore_pasid_state(struct pci_dev *pdev) +{ + u16 control; + int pos; + + if (!pdev->pasid_enabled) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); + if (!pos) + return; + + control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features; + pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); +} +EXPORT_SYMBOL_GPL(pci_restore_pasid_state); + +/** * pci_pasid_features - Check which PASID features are supported * @pdev: PCI device structure * diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig index b7e15526d676..d275aadc47ee 100644 --- a/drivers/pci/dwc/Kconfig +++ b/drivers/pci/dwc/Kconfig @@ -16,6 +16,7 @@ config PCIE_DW_EP config PCI_DRA7XX bool "TI DRA7xx PCIe controller" + depends on SOC_DRA7XX || COMPILE_TEST depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT depends on OF && HAS_IOMEM && TI_PIPE3 help @@ -158,4 +159,14 @@ config PCIE_ARTPEC6 Say Y here to enable PCIe controller support on Axis ARTPEC-6 SoCs. This PCIe controller uses the DesignWare core. +config PCIE_KIRIN + depends on OF && ARM64 + bool "HiSilicon Kirin series SoCs PCIe controllers" + depends on PCI + select PCIEPORTBUS + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support + on HiSilicon Kirin series SoCs. + endmenu diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile index f31a8596442a..c61be9738cce 100644 --- a/drivers/pci/dwc/Makefile +++ b/drivers/pci/dwc/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o +obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o # The following drivers are for devices that use the generic ACPI # pci_root.c driver but don't support standard ECAM config access. diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index 8decf46cf525..f2fc5f47064e 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c @@ -174,7 +174,7 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci) static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, - ~LEG_EP_INTERRUPTS & ~MSI); + LEG_EP_INTERRUPTS | MSI); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, @@ -184,7 +184,7 @@ static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, - ~INTERRUPTS); + INTERRUPTS); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS); } @@ -208,7 +208,7 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp) dra7xx_pcie_enable_interrupts(dra7xx); } -static struct dw_pcie_host_ops dra7xx_pcie_host_ops = { +static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { .host_init = dra7xx_pcie_host_init, }; diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 546082ad5a3f..c78c06552590 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c @@ -590,7 +590,7 @@ static void exynos_pcie_host_init(struct pcie_port *pp) exynos_pcie_enable_interrupts(ep); } -static struct dw_pcie_host_ops exynos_pcie_host_ops = { +static const struct dw_pcie_host_ops exynos_pcie_host_ops = { .rd_own_conf = exynos_pcie_rd_own_conf, .wr_own_conf = exynos_pcie_wr_own_conf, .host_init = exynos_pcie_host_init, diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index a98cba55c7f0..bf5c3616e344 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c @@ -24,6 +24,7 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <linux/resource.h> #include <linux/signal.h> #include <linux/types.h> @@ -59,6 +60,7 @@ struct imx6_pcie { u32 tx_swing_full; u32 tx_swing_low; int link_gen; + struct regulator *vpcie; }; /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ @@ -252,11 +254,40 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) static int imx6q_pcie_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - return 0; + unsigned long pc = instruction_pointer(regs); + unsigned long instr = *(unsigned long *)pc; + int reg = (instr >> 12) & 15; + + /* + * If the instruction being executed was a read, + * make it look like it read all-ones. + */ + if ((instr & 0x0c100000) == 0x04100000) { + unsigned long val; + + if (instr & 0x00400000) + val = 255; + else + val = -1; + + regs->uregs[reg] = val; + regs->ARM_pc += 4; + return 0; + } + + if ((instr & 0x0e100090) == 0x00100090) { + regs->uregs[reg] = -1; + regs->ARM_pc += 4; + return 0; + } + + return 1; } static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) { + struct device *dev = imx6_pcie->pci->dev; + switch (imx6_pcie->variant) { case IMX7D: reset_control_assert(imx6_pcie->pciephy_reset); @@ -283,6 +314,14 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); break; } + + if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { + int ret = regulator_disable(imx6_pcie->vpcie); + + if (ret) + dev_err(dev, "failed to disable vpcie regulator: %d\n", + ret); + } } static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) @@ -349,10 +388,19 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) struct device *dev = pci->dev; int ret; + if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { + ret = regulator_enable(imx6_pcie->vpcie); + if (ret) { + dev_err(dev, "failed to enable vpcie regulator: %d\n", + ret); + return; + } + } + ret = clk_prepare_enable(imx6_pcie->pcie_phy); if (ret) { dev_err(dev, "unable to enable pcie_phy clock\n"); - return; + goto err_pcie_phy; } ret = clk_prepare_enable(imx6_pcie->pcie_bus); @@ -412,6 +460,13 @@ err_pcie: clk_disable_unprepare(imx6_pcie->pcie_bus); err_pcie_bus: clk_disable_unprepare(imx6_pcie->pcie_phy); +err_pcie_phy: + if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { + ret = regulator_disable(imx6_pcie->vpcie); + if (ret) + dev_err(dev, "failed to disable vpcie regulator: %d\n", + ret); + } } static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) @@ -602,7 +657,7 @@ static int imx6_pcie_link_up(struct dw_pcie *pci) PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; } -static struct dw_pcie_host_ops imx6_pcie_host_ops = { +static const struct dw_pcie_host_ops imx6_pcie_host_ops = { .host_init = imx6_pcie_host_init, }; @@ -775,6 +830,13 @@ static int imx6_pcie_probe(struct platform_device *pdev) if (ret) imx6_pcie->link_gen = 1; + imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); + if (IS_ERR(imx6_pcie->vpcie)) { + if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) + return -EPROBE_DEFER; + imx6_pcie->vpcie = NULL; + } + platform_set_drvdata(pdev, imx6_pcie); ret = imx6_add_pcie_port(imx6_pcie, pdev); @@ -819,8 +881,8 @@ static int __init imx6_pcie_init(void) * we can install the handler here without risking it * accessing some uninitialized driver state. */ - hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, - "imprecise external abort"); + hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, + "external abort on non-linefetch"); return platform_driver_register(&imx6_pcie_driver); } diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index fcc9723bad6e..4783cec1f78d 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c @@ -291,7 +291,7 @@ static void __init ks_pcie_host_init(struct pcie_port *pp) "Asynchronous external abort"); } -static struct dw_pcie_host_ops keystone_pcie_host_ops = { +static const struct dw_pcie_host_ops keystone_pcie_host_ops = { .rd_other_conf = ks_dw_pcie_rd_other_conf, .wr_other_conf = ks_dw_pcie_wr_other_conf, .host_init = ks_pcie_host_init, diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c index 27d638c4e134..fd861289ad8b 100644 --- a/drivers/pci/dwc/pci-layerscape.c +++ b/drivers/pci/dwc/pci-layerscape.c @@ -39,7 +39,7 @@ struct ls_pcie_drvdata { u32 lut_offset; u32 ltssm_shift; u32 lut_dbg; - struct dw_pcie_host_ops *ops; + const struct dw_pcie_host_ops *ops; const struct dw_pcie_ops *dw_pcie_ops; }; @@ -185,12 +185,12 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp, return 0; } -static struct dw_pcie_host_ops ls1021_pcie_host_ops = { +static const struct dw_pcie_host_ops ls1021_pcie_host_ops = { .host_init = ls1021_pcie_host_init, .msi_host_init = ls_pcie_msi_host_init, }; -static struct dw_pcie_host_ops ls_pcie_host_ops = { +static const struct dw_pcie_host_ops ls_pcie_host_ops = { .host_init = ls_pcie_host_init, .msi_host_init = ls_pcie_msi_host_init, }; diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c index 495b023042b3..ea8f34af6a85 100644 --- a/drivers/pci/dwc/pcie-armada8k.c +++ b/drivers/pci/dwc/pcie-armada8k.c @@ -160,7 +160,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static struct dw_pcie_host_ops armada8k_pcie_host_ops = { +static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { .host_init = armada8k_pcie_host_init, }; diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index 82a04acc42fd..01c6f7823672 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c @@ -184,7 +184,7 @@ static void artpec6_pcie_host_init(struct pcie_port *pp) artpec6_pcie_enable_interrupts(artpec6_pcie); } -static struct dw_pcie_host_ops artpec6_pcie_host_ops = { +static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { .host_init = artpec6_pcie_host_init, }; diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index 32091b32f6e1..091b4e7ad059 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c @@ -46,7 +46,7 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp) dw_pcie_msi_init(pp); } -static struct dw_pcie_host_ops dw_plat_pcie_host_ops = { +static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { .host_init = dw_plat_pcie_host_init, }; @@ -67,7 +67,8 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp, ret = devm_request_irq(dev, pp->msi_irq, dw_plat_pcie_msi_irq_handler, - IRQF_SHARED, "dw-plat-pcie-msi", pp); + IRQF_SHARED | IRQF_NO_THREAD, + "dw-plat-pcie-msi", pp); if (ret) { dev_err(dev, "failed to request MSI IRQ\n"); return ret; diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h index c6a840575796..b4d2a89f8e58 100644 --- a/drivers/pci/dwc/pcie-designware.h +++ b/drivers/pci/dwc/pcie-designware.h @@ -162,7 +162,7 @@ struct pcie_port { struct resource *mem; struct resource *busn; int irq; - struct dw_pcie_host_ops *ops; + const struct dw_pcie_host_ops *ops; int msi_irq; struct irq_domain *irq_domain; unsigned long msi_data; diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c new file mode 100644 index 000000000000..33fddb9f6739 --- /dev/null +++ b/drivers/pci/dwc/pcie-kirin.c @@ -0,0 +1,517 @@ +/* + * PCIe host controller driver for Kirin Phone SoCs + * + * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. + * http://www.huawei.com + * + * Author: Xiaowei Song <songxiaowei@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <asm/compiler.h> +#include <linux/compiler.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/mfd/syscon.h> +#include <linux/of_address.h> +#include <linux/of_gpio.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/resource.h> +#include <linux/types.h> +#include "pcie-designware.h" + +#define to_kirin_pcie(x) dev_get_drvdata((x)->dev) + +#define REF_CLK_FREQ 100000000 + +/* PCIe ELBI registers */ +#define SOC_PCIECTRL_CTRL0_ADDR 0x000 +#define SOC_PCIECTRL_CTRL1_ADDR 0x004 +#define SOC_PCIEPHY_CTRL2_ADDR 0x008 +#define SOC_PCIEPHY_CTRL3_ADDR 0x00c +#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) + +/* info located in APB */ +#define PCIE_APP_LTSSM_ENABLE 0x01c +#define PCIE_APB_PHY_CTRL0 0x0 +#define PCIE_APB_PHY_CTRL1 0x4 +#define PCIE_APB_PHY_STATUS0 0x400 +#define PCIE_LINKUP_ENABLE (0x8020) +#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11) +#define PIPE_CLK_STABLE (0x1 << 19) +#define PHY_REF_PAD_BIT (0x1 << 8) +#define PHY_PWR_DOWN_BIT (0x1 << 22) +#define PHY_RST_ACK_BIT (0x1 << 16) + +/* info located in sysctrl */ +#define SCTRL_PCIE_CMOS_OFFSET 0x60 +#define SCTRL_PCIE_CMOS_BIT 0x10 +#define SCTRL_PCIE_ISO_OFFSET 0x44 +#define SCTRL_PCIE_ISO_BIT 0x30 +#define SCTRL_PCIE_HPCLK_OFFSET 0x190 +#define SCTRL_PCIE_HPCLK_BIT 0x184000 +#define SCTRL_PCIE_OE_OFFSET 0x14a +#define PCIE_DEBOUNCE_PARAM 0xF0F400 +#define PCIE_OE_BYPASS (0x3 << 28) + +/* peri_crg ctrl */ +#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88 +#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000 + +/* Time for delay */ +#define REF_2_PERST_MIN 20000 +#define REF_2_PERST_MAX 25000 +#define PERST_2_ACCESS_MIN 10000 +#define PERST_2_ACCESS_MAX 12000 +#define LINK_WAIT_MIN 900 +#define LINK_WAIT_MAX 1000 +#define PIPE_CLK_WAIT_MIN 550 +#define PIPE_CLK_WAIT_MAX 600 +#define TIME_CMOS_MIN 100 +#define TIME_CMOS_MAX 105 +#define TIME_PHY_PD_MIN 10 +#define TIME_PHY_PD_MAX 11 + +struct kirin_pcie { + struct dw_pcie *pci; + void __iomem *apb_base; + void __iomem *phy_base; + struct regmap *crgctrl; + struct regmap *sysctrl; + struct clk *apb_sys_clk; + struct clk *apb_phy_clk; + struct clk *phy_ref_clk; + struct clk *pcie_aclk; + struct clk *pcie_aux_clk; + int gpio_id_reset; +}; + +/* Registers in PCIeCTRL */ +static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie, + u32 val, u32 reg) +{ + writel(val, kirin_pcie->apb_base + reg); +} + +static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg) +{ + return readl(kirin_pcie->apb_base + reg); +} + +/* Registers in PCIePHY */ +static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie, + u32 val, u32 reg) +{ + writel(val, kirin_pcie->phy_base + reg); +} + +static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg) +{ + return readl(kirin_pcie->phy_base + reg); +} + +static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref"); + if (IS_ERR(kirin_pcie->phy_ref_clk)) + return PTR_ERR(kirin_pcie->phy_ref_clk); + + kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux"); + if (IS_ERR(kirin_pcie->pcie_aux_clk)) + return PTR_ERR(kirin_pcie->pcie_aux_clk); + + kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy"); + if (IS_ERR(kirin_pcie->apb_phy_clk)) + return PTR_ERR(kirin_pcie->apb_phy_clk); + + kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys"); + if (IS_ERR(kirin_pcie->apb_sys_clk)) + return PTR_ERR(kirin_pcie->apb_sys_clk); + + kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk"); + if (IS_ERR(kirin_pcie->pcie_aclk)) + return PTR_ERR(kirin_pcie->pcie_aclk); + + return 0; +} + +static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *apb; + struct resource *phy; + struct resource *dbi; + + apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); + kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); + if (IS_ERR(kirin_pcie->apb_base)) + return PTR_ERR(kirin_pcie->apb_base); + + phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); + if (IS_ERR(kirin_pcie->phy_base)) + return PTR_ERR(kirin_pcie->phy_base); + + dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); + if (IS_ERR(kirin_pcie->pci->dbi_base)) + return PTR_ERR(kirin_pcie->pci->dbi_base); + + kirin_pcie->crgctrl = + syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl"); + if (IS_ERR(kirin_pcie->crgctrl)) + return PTR_ERR(kirin_pcie->crgctrl); + + kirin_pcie->sysctrl = + syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl"); + if (IS_ERR(kirin_pcie->sysctrl)) + return PTR_ERR(kirin_pcie->sysctrl); + + return 0; +} + +static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie) +{ + struct device *dev = kirin_pcie->pci->dev; + u32 reg_val; + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); + reg_val &= ~PHY_REF_PAD_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0); + reg_val &= ~PHY_PWR_DOWN_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0); + usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX); + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); + reg_val &= ~PHY_RST_ACK_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); + + usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); + if (reg_val & PIPE_CLK_STABLE) { + dev_err(dev, "PIPE clk is not stable\n"); + return -EINVAL; + } + + return 0; +} + +static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie) +{ + u32 val; + + regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val); + val |= PCIE_DEBOUNCE_PARAM; + val &= ~PCIE_OE_BYPASS; + regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val); +} + +static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable) +{ + int ret = 0; + + if (!enable) + goto close_clk; + + ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ); + if (ret) + return ret; + + ret = clk_prepare_enable(kirin_pcie->phy_ref_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(kirin_pcie->apb_sys_clk); + if (ret) + goto apb_sys_fail; + + ret = clk_prepare_enable(kirin_pcie->apb_phy_clk); + if (ret) + goto apb_phy_fail; + + ret = clk_prepare_enable(kirin_pcie->pcie_aclk); + if (ret) + goto aclk_fail; + + ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk); + if (ret) + goto aux_clk_fail; + + return 0; + +close_clk: + clk_disable_unprepare(kirin_pcie->pcie_aux_clk); +aux_clk_fail: + clk_disable_unprepare(kirin_pcie->pcie_aclk); +aclk_fail: + clk_disable_unprepare(kirin_pcie->apb_phy_clk); +apb_phy_fail: + clk_disable_unprepare(kirin_pcie->apb_sys_clk); +apb_sys_fail: + clk_disable_unprepare(kirin_pcie->phy_ref_clk); + + return ret; +} + +static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie) +{ + int ret; + + /* Power supply for Host */ + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT); + usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX); + kirin_pcie_oe_enable(kirin_pcie); + + ret = kirin_pcie_clk_ctrl(kirin_pcie, true); + if (ret) + return ret; + + /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */ + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT); + regmap_write(kirin_pcie->crgctrl, + CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT); + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT); + + ret = kirin_pcie_phy_init(kirin_pcie); + if (ret) + goto close_clk; + + /* perst assert Endpoint */ + if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) { + usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); + ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1); + if (ret) + goto close_clk; + usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); + + return 0; + } + +close_clk: + kirin_pcie_clk_ctrl(kirin_pcie, false); + return ret; +} + +static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, + bool on) +{ + u32 val; + + val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR); + if (on) + val = val | PCIE_ELBI_SLV_DBI_ENABLE; + else + val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; + + kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR); +} + +static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie, + bool on) +{ + u32 val; + + val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR); + if (on) + val = val | PCIE_ELBI_SLV_DBI_ENABLE; + else + val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; + + kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR); +} + +static int kirin_pcie_rd_own_conf(struct pcie_port *pp, + int where, int size, u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + int ret; + + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); + ret = dw_pcie_read(pci->dbi_base + where, size, val); + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); + + return ret; +} + +static int kirin_pcie_wr_own_conf(struct pcie_port *pp, + int where, int size, u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + int ret; + + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); + ret = dw_pcie_write(pci->dbi_base + where, size, val); + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); + + return ret; +} + +static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + u32 ret; + + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); + dw_pcie_read(base + reg, size, &ret); + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); + + return ret; +} + +static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); + dw_pcie_write(base + reg, size, val); + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); +} + +static int kirin_pcie_link_up(struct dw_pcie *pci) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); + + if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) + return 1; + + return 0; +} + +static int kirin_pcie_establish_link(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + struct device *dev = kirin_pcie->pci->dev; + int count = 0; + + if (kirin_pcie_link_up(pci)) + return 0; + + dw_pcie_setup_rc(pp); + + /* assert LTSSM enable */ + kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT, + PCIE_APP_LTSSM_ENABLE); + + /* check if the link is up or not */ + while (!kirin_pcie_link_up(pci)) { + usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); + count++; + if (count == 1000) { + dev_err(dev, "Link Fail\n"); + return -EINVAL; + } + } + + return 0; +} + +static void kirin_pcie_host_init(struct pcie_port *pp) +{ + kirin_pcie_establish_link(pp); +} + +static struct dw_pcie_ops kirin_dw_pcie_ops = { + .read_dbi = kirin_pcie_read_dbi, + .write_dbi = kirin_pcie_write_dbi, + .link_up = kirin_pcie_link_up, +}; + +static struct dw_pcie_host_ops kirin_pcie_host_ops = { + .rd_own_conf = kirin_pcie_rd_own_conf, + .wr_own_conf = kirin_pcie_wr_own_conf, + .host_init = kirin_pcie_host_init, +}; + +static int __init kirin_add_pcie_port(struct dw_pcie *pci, + struct platform_device *pdev) +{ + pci->pp.ops = &kirin_pcie_host_ops; + + return dw_pcie_host_init(&pci->pp); +} + +static int kirin_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct kirin_pcie *kirin_pcie; + struct dw_pcie *pci; + int ret; + + if (!dev->of_node) { + dev_err(dev, "NULL node\n"); + return -EINVAL; + } + + kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); + if (!kirin_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &kirin_dw_pcie_ops; + kirin_pcie->pci = pci; + + ret = kirin_pcie_get_clk(kirin_pcie, pdev); + if (ret) + return ret; + + ret = kirin_pcie_get_resource(kirin_pcie, pdev); + if (ret) + return ret; + + kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, + "reset-gpio", 0); + if (kirin_pcie->gpio_id_reset < 0) + return -ENODEV; + + ret = kirin_pcie_power_on(kirin_pcie); + if (ret) + return ret; + + platform_set_drvdata(pdev, kirin_pcie); + + return kirin_add_pcie_port(pci, pdev); +} + +static const struct of_device_id kirin_pcie_match[] = { + { .compatible = "hisilicon,kirin960-pcie" }, + {}, +}; + +struct platform_driver kirin_pcie_driver = { + .probe = kirin_pcie_probe, + .driver = { + .name = "kirin-pcie", + .of_match_table = kirin_pcie_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(kirin_pcie_driver); diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index 5bf23d432fdb..68c5f2ab5bc8 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c @@ -51,6 +51,12 @@ #define PCIE20_ELBI_SYS_CTRL 0x04 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 +#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 +#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c +#define CFG_BRIDGE_SB_INIT BIT(0) + #define PCIE20_CAP 0x70 #define PERST_DELAY_US 1000 @@ -86,10 +92,29 @@ struct qcom_pcie_resources_v2 { struct clk *pipe_clk; }; +struct qcom_pcie_resources_v3 { + struct clk *aux_clk; + struct clk *master_clk; + struct clk *slave_clk; + struct reset_control *axi_m_reset; + struct reset_control *axi_s_reset; + struct reset_control *pipe_reset; + struct reset_control *axi_m_vmid_reset; + struct reset_control *axi_s_xpu_reset; + struct reset_control *parf_reset; + struct reset_control *phy_reset; + struct reset_control *axi_m_sticky_reset; + struct reset_control *pipe_sticky_reset; + struct reset_control *pwr_reset; + struct reset_control *ahb_reset; + struct reset_control *phy_ahb_reset; +}; + union qcom_pcie_resources { struct qcom_pcie_resources_v0 v0; struct qcom_pcie_resources_v1 v1; struct qcom_pcie_resources_v2 v2; + struct qcom_pcie_resources_v3 v3; }; struct qcom_pcie; @@ -133,26 +158,6 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg) return dw_handle_msi_irq(pp); } -static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) -{ - u32 val; - - /* enable link training */ - val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); - val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; - writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); -} - -static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) -{ - u32 val; - - /* enable link training */ - val = readl(pcie->parf + PCIE20_PARF_LTSSM); - val |= BIT(8); - writel(val, pcie->parf + PCIE20_PARF_LTSSM); -} - static int qcom_pcie_establish_link(struct qcom_pcie *pcie) { struct dw_pcie *pci = pcie->pci; @@ -167,6 +172,16 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie) return dw_pcie_wait_for_link(pci); } +static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) +{ + u32 val; + + /* enable link training */ + val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); + val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; + writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); +} + static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v0 *res = &pcie->res.v0; @@ -217,36 +232,6 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) return PTR_ERR_OR_ZERO(res->phy_reset); } -static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_v1 *res = &pcie->res.v1; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - - res->vdda = devm_regulator_get(dev, "vdda"); - if (IS_ERR(res->vdda)) - return PTR_ERR(res->vdda); - - res->iface = devm_clk_get(dev, "iface"); - if (IS_ERR(res->iface)) - return PTR_ERR(res->iface); - - res->aux = devm_clk_get(dev, "aux"); - if (IS_ERR(res->aux)) - return PTR_ERR(res->aux); - - res->master_bus = devm_clk_get(dev, "master_bus"); - if (IS_ERR(res->master_bus)) - return PTR_ERR(res->master_bus); - - res->slave_bus = devm_clk_get(dev, "slave_bus"); - if (IS_ERR(res->slave_bus)) - return PTR_ERR(res->slave_bus); - - res->core = devm_reset_control_get(dev, "core"); - return PTR_ERR_OR_ZERO(res->core); -} - static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v0 *res = &pcie->res.v0; @@ -357,6 +342,13 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie) /* wait for clock acquisition */ usleep_range(1000, 1500); + + /* Set the Max TLP size to 2K, instead of using default of 4K */ + writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, + pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); + writel(CFG_BRIDGE_SB_INIT, + pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); + return 0; err_deassert_ahb: @@ -375,6 +367,36 @@ err_refclk: return ret; } +static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->iface = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface)) + return PTR_ERR(res->iface); + + res->aux = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux)) + return PTR_ERR(res->aux); + + res->master_bus = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_bus)) + return PTR_ERR(res->master_bus); + + res->slave_bus = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_bus)) + return PTR_ERR(res->slave_bus); + + res->core = devm_reset_control_get(dev, "core"); + return PTR_ERR_OR_ZERO(res->core); +} + static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v1 *res = &pcie->res.v1; @@ -455,6 +477,16 @@ err_res: return ret; } +static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) +{ + u32 val; + + /* enable link training */ + val = readl(pcie->parf + PCIE20_PARF_LTSSM); + val |= BIT(8); + writel(val, pcie->parf + PCIE20_PARF_LTSSM); +} + static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v2 *res = &pcie->res.v2; @@ -481,6 +513,17 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) return PTR_ERR_OR_ZERO(res->pipe_clk); } +static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + + clk_disable_unprepare(res->pipe_clk); + clk_disable_unprepare(res->slave_clk); + clk_disable_unprepare(res->master_clk); + clk_disable_unprepare(res->cfg_clk); + clk_disable_unprepare(res->aux_clk); +} + static int qcom_pcie_init_v2(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v2 *res = &pcie->res.v2; @@ -562,22 +605,290 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie) return 0; } -static int qcom_pcie_link_up(struct dw_pcie *pci) +static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie) { - u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; - return !!(val & PCI_EXP_LNKSTA_DLLLA); + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->master_clk = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_clk)) + return PTR_ERR(res->master_clk); + + res->slave_clk = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_clk)) + return PTR_ERR(res->slave_clk); + + res->axi_m_reset = devm_reset_control_get(dev, "axi_m"); + if (IS_ERR(res->axi_m_reset)) + return PTR_ERR(res->axi_m_reset); + + res->axi_s_reset = devm_reset_control_get(dev, "axi_s"); + if (IS_ERR(res->axi_s_reset)) + return PTR_ERR(res->axi_s_reset); + + res->pipe_reset = devm_reset_control_get(dev, "pipe"); + if (IS_ERR(res->pipe_reset)) + return PTR_ERR(res->pipe_reset); + + res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid"); + if (IS_ERR(res->axi_m_vmid_reset)) + return PTR_ERR(res->axi_m_vmid_reset); + + res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu"); + if (IS_ERR(res->axi_s_xpu_reset)) + return PTR_ERR(res->axi_s_xpu_reset); + + res->parf_reset = devm_reset_control_get(dev, "parf"); + if (IS_ERR(res->parf_reset)) + return PTR_ERR(res->parf_reset); + + res->phy_reset = devm_reset_control_get(dev, "phy"); + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + + res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky"); + if (IS_ERR(res->axi_m_sticky_reset)) + return PTR_ERR(res->axi_m_sticky_reset); + + res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky"); + if (IS_ERR(res->pipe_sticky_reset)) + return PTR_ERR(res->pipe_sticky_reset); + + res->pwr_reset = devm_reset_control_get(dev, "pwr"); + if (IS_ERR(res->pwr_reset)) + return PTR_ERR(res->pwr_reset); + + res->ahb_reset = devm_reset_control_get(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb"); + if (IS_ERR(res->phy_ahb_reset)) + return PTR_ERR(res->phy_ahb_reset); + + return 0; } -static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) +static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v2 *res = &pcie->res.v2; - - clk_disable_unprepare(res->pipe_clk); + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + + reset_control_assert(res->axi_m_reset); + reset_control_assert(res->axi_s_reset); + reset_control_assert(res->pipe_reset); + reset_control_assert(res->pipe_sticky_reset); + reset_control_assert(res->phy_reset); + reset_control_assert(res->phy_ahb_reset); + reset_control_assert(res->axi_m_sticky_reset); + reset_control_assert(res->pwr_reset); + reset_control_assert(res->ahb_reset); + clk_disable_unprepare(res->aux_clk); + clk_disable_unprepare(res->master_clk); clk_disable_unprepare(res->slave_clk); +} + +static int qcom_pcie_init_v3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + u32 val; + int ret; + + ret = reset_control_assert(res->axi_m_reset); + if (ret) { + dev_err(dev, "cannot assert axi master reset\n"); + return ret; + } + + ret = reset_control_assert(res->axi_s_reset); + if (ret) { + dev_err(dev, "cannot assert axi slave reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_assert(res->pipe_reset); + if (ret) { + dev_err(dev, "cannot assert pipe reset\n"); + return ret; + } + + ret = reset_control_assert(res->pipe_sticky_reset); + if (ret) { + dev_err(dev, "cannot assert pipe sticky reset\n"); + return ret; + } + + ret = reset_control_assert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot assert phy reset\n"); + return ret; + } + + ret = reset_control_assert(res->phy_ahb_reset); + if (ret) { + dev_err(dev, "cannot assert phy ahb reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_assert(res->axi_m_sticky_reset); + if (ret) { + dev_err(dev, "cannot assert axi master sticky reset\n"); + return ret; + } + + ret = reset_control_assert(res->pwr_reset); + if (ret) { + dev_err(dev, "cannot assert power reset\n"); + return ret; + } + + ret = reset_control_assert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot assert ahb reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_deassert(res->phy_ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert phy ahb reset\n"); + return ret; + } + + ret = reset_control_deassert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot deassert phy reset\n"); + goto err_rst_phy; + } + + ret = reset_control_deassert(res->pipe_reset); + if (ret) { + dev_err(dev, "cannot deassert pipe reset\n"); + goto err_rst_pipe; + } + + ret = reset_control_deassert(res->pipe_sticky_reset); + if (ret) { + dev_err(dev, "cannot deassert pipe sticky reset\n"); + goto err_rst_pipe_sticky; + } + + usleep_range(10000, 12000); + + ret = reset_control_deassert(res->axi_m_reset); + if (ret) { + dev_err(dev, "cannot deassert axi master reset\n"); + goto err_rst_axi_m; + } + + ret = reset_control_deassert(res->axi_m_sticky_reset); + if (ret) { + dev_err(dev, "cannot deassert axi master sticky reset\n"); + goto err_rst_axi_m_sticky; + } + + ret = reset_control_deassert(res->axi_s_reset); + if (ret) { + dev_err(dev, "cannot deassert axi slave reset\n"); + goto err_rst_axi_s; + } + + ret = reset_control_deassert(res->pwr_reset); + if (ret) { + dev_err(dev, "cannot deassert power reset\n"); + goto err_rst_pwr; + } + + ret = reset_control_deassert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert ahb reset\n"); + goto err_rst_ahb; + } + + usleep_range(10000, 12000); + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_clk_aux; + } + + ret = clk_prepare_enable(res->master_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_axi_m; + } + + ret = clk_prepare_enable(res->slave_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_clk_axi_s; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= !BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + /* MAC PHY_POWERDOWN MUX DISABLE */ + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); + val &= ~BIT(29); + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + val |= BIT(4); + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + + return 0; + +err_clk_axi_s: clk_disable_unprepare(res->master_clk); - clk_disable_unprepare(res->cfg_clk); +err_clk_axi_m: clk_disable_unprepare(res->aux_clk); +err_clk_aux: + reset_control_assert(res->ahb_reset); +err_rst_ahb: + reset_control_assert(res->pwr_reset); +err_rst_pwr: + reset_control_assert(res->axi_s_reset); +err_rst_axi_s: + reset_control_assert(res->axi_m_sticky_reset); +err_rst_axi_m_sticky: + reset_control_assert(res->axi_m_reset); +err_rst_axi_m: + reset_control_assert(res->pipe_sticky_reset); +err_rst_pipe_sticky: + reset_control_assert(res->pipe_reset); +err_rst_pipe: + reset_control_assert(res->phy_reset); +err_rst_phy: + reset_control_assert(res->phy_ahb_reset); + return ret; +} + +static int qcom_pcie_link_up(struct dw_pcie *pci) +{ + u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); + + return !!(val & PCI_EXP_LNKSTA_DLLLA); } static void qcom_pcie_host_init(struct pcie_port *pp) @@ -634,7 +945,7 @@ static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, return dw_pcie_read(pci->dbi_base + where, size, val); } -static struct dw_pcie_host_ops qcom_pcie_dw_ops = { +static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { .host_init = qcom_pcie_host_init, .rd_own_conf = qcom_pcie_rd_own_conf, }; @@ -665,6 +976,13 @@ static const struct dw_pcie_ops dw_pcie_ops = { .link_up = qcom_pcie_link_up, }; +static const struct qcom_pcie_ops ops_v3 = { + .get_resources = qcom_pcie_get_resources_v3, + .init = qcom_pcie_init_v3, + .deinit = qcom_pcie_deinit_v3, + .ltssm_enable = qcom_pcie_v2_ltssm_enable, +}; + static int qcom_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -727,7 +1045,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) ret = devm_request_irq(dev, pp->msi_irq, qcom_pcie_msi_irq_handler, - IRQF_SHARED, "qcom-pcie-msi", pp); + IRQF_SHARED | IRQF_NO_THREAD, + "qcom-pcie-msi", pp); if (ret) { dev_err(dev, "cannot request msi irq\n"); return ret; @@ -754,6 +1073,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 }, + { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 }, { } }; diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c index 8ff36b3dbbdf..80897291e0fb 100644 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ b/drivers/pci/dwc/pcie-spear13xx.c @@ -186,7 +186,7 @@ static void spear13xx_pcie_host_init(struct pcie_port *pp) spear13xx_pcie_enable_interrupts(spear13xx_pcie); } -static struct dw_pcie_host_ops spear13xx_pcie_host_ops = { +static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { .host_init = spear13xx_pcie_host_init, }; diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig index c23f146fb5a6..c09623ca8c3b 100644 --- a/drivers/pci/endpoint/Kconfig +++ b/drivers/pci/endpoint/Kconfig @@ -6,6 +6,7 @@ menu "PCI Endpoint" config PCI_ENDPOINT bool "PCI Endpoint Support" + depends on HAS_DMA help Enable this configuration option to support configurable PCI endpoint. This should be enabled if the platform has a PCI diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 7f47cd5e10a5..0cd5b30dccb1 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -180,6 +180,17 @@ config PCIE_ROCKCHIP There is 1 internal PCIe port available to support GEN2 with 4 slots. +config PCIE_MEDIATEK + bool "MediaTek PCIe controller" + depends on ARM && (ARCH_MEDIATEK || COMPILE_TEST) + depends on OF + depends on PCI + select PCIEPORTBUS + help + Say Y here if you want to enable PCIe controller support on + MT7623 series SoCs. There is one single root complex with 3 root + ports available. Each port supports Gen2 lane x1. + config VMD depends on PCI_MSI && X86_64 && SRCU tristate "Intel Volume Management Device Driver" diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index cab879578003..b10d104c85fd 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o +obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o obj-$(CONFIG_VMD) += vmd.o # The following drivers are for devices that use the generic ACPI diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c index e938eebcb180..5162dffc102b 100644 --- a/drivers/pci/host/pci-ftpci100.c +++ b/drivers/pci/host/pci-ftpci100.c @@ -25,6 +25,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/bitops.h> #include <linux/irq.h> +#include <linux/clk.h> /* * Special configuration registers directly in the first few words @@ -37,6 +38,7 @@ #define PCI_CONFIG 0x28 /* PCI configuration command register */ #define PCI_DATA 0x2C +#define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */ #define FARADAY_PCI_PMC 0x40 /* Power management control */ #define FARADAY_PCI_PMCSR 0x44 /* Power management status */ #define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ @@ -45,6 +47,8 @@ #define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ #define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ +#define PCI_STATUS_66MHZ_CAPABLE BIT(21) + /* Bits 31..28 gives INTD..INTA status */ #define PCI_CTRL2_INTSTS_SHIFT 28 #define PCI_CTRL2_INTMASK_CMDERR BIT(27) @@ -117,6 +121,7 @@ struct faraday_pci { void __iomem *base; struct irq_domain *irqdomain; struct pci_bus *bus; + struct clk *bus_clk; }; static int faraday_res_to_memcfg(resource_size_t mem_base, @@ -444,6 +449,9 @@ static int faraday_pci_probe(struct platform_device *pdev) struct resource *mem; struct resource *io; struct pci_host_bridge *host; + struct clk *clk; + unsigned char max_bus_speed = PCI_SPEED_33MHz; + unsigned char cur_bus_speed = PCI_SPEED_33MHz; int ret; u32 val; LIST_HEAD(res); @@ -462,6 +470,24 @@ static int faraday_pci_probe(struct platform_device *pdev) host->sysdata = p; p->dev = dev; + /* Retrieve and enable optional clocks */ + clk = devm_clk_get(dev, "PCLK"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "could not prepare PCLK\n"); + return ret; + } + p->bus_clk = devm_clk_get(dev, "PCICLK"); + if (IS_ERR(p->bus_clk)) + return PTR_ERR(clk); + ret = clk_prepare_enable(p->bus_clk); + if (ret) { + dev_err(dev, "could not prepare PCICLK\n"); + return ret; + } + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); p->base = devm_ioremap_resource(dev, regs); if (IS_ERR(p->base)) @@ -524,6 +550,34 @@ static int faraday_pci_probe(struct platform_device *pdev) } } + /* Check bus clock if we can gear up to 66 MHz */ + if (!IS_ERR(p->bus_clk)) { + unsigned long rate; + u32 val; + + faraday_raw_pci_read_config(p, 0, 0, + FARADAY_PCI_STATUS_CMD, 4, &val); + rate = clk_get_rate(p->bus_clk); + + if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) { + dev_info(dev, "33MHz bus is 66MHz capable\n"); + max_bus_speed = PCI_SPEED_66MHz; + ret = clk_set_rate(p->bus_clk, 66000000); + if (ret) + dev_err(dev, "failed to set bus clock\n"); + } else { + dev_info(dev, "33MHz only bus\n"); + max_bus_speed = PCI_SPEED_33MHz; + } + + /* Bumping the clock may fail so read back the rate */ + rate = clk_get_rate(p->bus_clk); + if (rate == 33000000) + cur_bus_speed = PCI_SPEED_33MHz; + if (rate == 66000000) + cur_bus_speed = PCI_SPEED_66MHz; + } + ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); if (ret) return ret; @@ -535,6 +589,8 @@ static int faraday_pci_probe(struct platform_device *pdev) return ret; } p->bus = host->bus; + p->bus->max_bus_speed = max_bus_speed; + p->bus->cur_bus_speed = cur_bus_speed; pci_bus_assign_resources(p->bus); pci_bus_add_devices(p->bus); diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 84936383e269..415dcc69a502 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -64,22 +64,39 @@ * major version. */ -#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (major))) +#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor))) #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) -enum { - PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), - PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1 +enum pci_protocol_version_t { + PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ + PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ }; #define CPU_AFFINITY_ALL -1ULL + +/* + * Supported protocol versions in the order of probing - highest go + * first. + */ +static enum pci_protocol_version_t pci_protocol_versions[] = { + PCI_PROTOCOL_VERSION_1_2, + PCI_PROTOCOL_VERSION_1_1, +}; + +/* + * Protocol version negotiated by hv_pci_protocol_negotiation(). + */ +static enum pci_protocol_version_t pci_protocol_version; + #define PCI_CONFIG_MMIO_LENGTH 0x2000 #define CFG_PAGE_OFFSET 0x1000 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) #define MAX_SUPPORTED_MSI_MESSAGES 0x400 +#define STATUS_REVISION_MISMATCH 0xC0000059 + /* * Message Types */ @@ -109,6 +126,9 @@ enum pci_message_type { PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, + PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, + PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, + PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ PCI_MESSAGE_MAXIMUM }; @@ -179,6 +199,30 @@ struct hv_msi_desc { } __packed; /** + * struct hv_msi_desc2 - 1.2 version of hv_msi_desc + * @vector: IDT entry + * @delivery_mode: As defined in Intel's Programmer's + * Reference Manual, Volume 3, Chapter 8. + * @vector_count: Number of contiguous entries in the + * Interrupt Descriptor Table that are + * occupied by this Message-Signaled + * Interrupt. For "MSI", as first defined + * in PCI 2.2, this can be between 1 and + * 32. For "MSI-X," as first defined in PCI + * 3.0, this must be 1, as each MSI-X table + * entry would have its own descriptor. + * @processor_count: number of bits enabled in array. + * @processor_array: All the target virtual processors. + */ +struct hv_msi_desc2 { + u8 vector; + u8 delivery_mode; + u16 vector_count; + u16 processor_count; + u16 processor_array[32]; +} __packed; + +/** * struct tran_int_desc * @reserved: unused, padding * @vector_count: same as in hv_msi_desc @@ -245,7 +289,7 @@ struct pci_packet { struct pci_version_request { struct pci_message message_type; - enum pci_message_type protocol_version; + u32 protocol_version; } __packed; /* @@ -294,6 +338,14 @@ struct pci_resources_assigned { u32 reserved[4]; } __packed; +struct pci_resources_assigned2 { + struct pci_message message_type; + union win_slot_encoding wslot; + u8 memory_range[0x14][6]; /* not used here */ + u32 msi_descriptor_count; + u8 reserved[70]; +} __packed; + struct pci_create_interrupt { struct pci_message message_type; union win_slot_encoding wslot; @@ -306,6 +358,12 @@ struct pci_create_int_response { struct tran_int_desc int_desc; } __packed; +struct pci_create_interrupt2 { + struct pci_message message_type; + union win_slot_encoding wslot; + struct hv_msi_desc2 int_desc; +} __packed; + struct pci_delete_interrupt { struct pci_message message_type; union win_slot_encoding wslot; @@ -331,17 +389,42 @@ static int pci_ring_size = (4 * PAGE_SIZE); #define HV_PARTITION_ID_SELF ((u64)-1) #define HVCALL_RETARGET_INTERRUPT 0x7e -struct retarget_msi_interrupt { - u64 partition_id; /* use "self" */ - u64 device_id; +struct hv_interrupt_entry { u32 source; /* 1 for MSI(-X) */ u32 reserved1; u32 address; u32 data; - u64 reserved2; +}; + +#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */ + +struct hv_vp_set { + u64 format; /* 0 (HvGenericSetSparse4k) */ + u64 valid_banks; + u64 masks[HV_VP_SET_BANK_COUNT_MAX]; +}; + +/* + * flags for hv_device_interrupt_target.flags + */ +#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 +#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 + +struct hv_device_interrupt_target { u32 vector; u32 flags; - u64 vp_mask; + union { + u64 vp_mask; + struct hv_vp_set vp_set; + }; +}; + +struct retarget_msi_interrupt { + u64 partition_id; /* use "self" */ + u64 device_id; + struct hv_interrupt_entry int_entry; + u64 reserved2; + struct hv_device_interrupt_target int_target; } __packed; /* @@ -382,7 +465,10 @@ struct hv_pcibus_device { struct msi_domain_info msi_info; struct msi_controller msi_chip; struct irq_domain *irq_domain; + + /* hypercall arg, must not cross page boundary */ struct retarget_msi_interrupt retarget_msi_interrupt_params; + spinlock_t retarget_msi_interrupt_lock; }; @@ -476,6 +562,52 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev, static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); + +/* + * Temporary CPU to vCPU mapping to address transitioning + * vmbus_cpu_number_to_vp_number() being migrated to + * hv_cpu_number_to_vp_number() in a separate patch. Once that patch + * has been picked up in the main line, remove this code here and use + * the official code. + */ +static struct hv_tmpcpumap +{ + bool initialized; + u32 vp_index[NR_CPUS]; +} hv_tmpcpumap; + +static void hv_tmpcpumap_init_cpu(void *_unused) +{ + int cpu = smp_processor_id(); + u64 vp_index; + + hv_get_vp_index(vp_index); + + hv_tmpcpumap.vp_index[cpu] = vp_index; +} + +static void hv_tmpcpumap_init(void) +{ + if (hv_tmpcpumap.initialized) + return; + + memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index)); + on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true); + hv_tmpcpumap.initialized = true; +} + +/** + * hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr + * + * Remove once vmbus_cpu_number_to_vp_number() has been converted to + * hv_cpu_number_to_vp_number() and replace callers appropriately. + */ +static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu) +{ + return hv_tmpcpumap.vp_index[cpu]; +} + + /** * devfn_to_wslot() - Convert from Linux PCI slot to Windows * @devfn: The Linux representation of PCI slot @@ -786,8 +918,11 @@ static void hv_irq_unmask(struct irq_data *data) struct cpumask *dest; struct pci_bus *pbus; struct pci_dev *pdev; - int cpu; unsigned long flags; + u32 var_size = 0; + int cpu_vmbus; + int cpu; + u64 res; dest = irq_data_get_affinity_mask(data); pdev = msi_desc_to_pci_dev(msi_desc); @@ -799,23 +934,74 @@ static void hv_irq_unmask(struct irq_data *data) params = &hbus->retarget_msi_interrupt_params; memset(params, 0, sizeof(*params)); params->partition_id = HV_PARTITION_ID_SELF; - params->source = 1; /* MSI(-X) */ - params->address = msi_desc->msg.address_lo; - params->data = msi_desc->msg.data; + params->int_entry.source = 1; /* MSI(-X) */ + params->int_entry.address = msi_desc->msg.address_lo; + params->int_entry.data = msi_desc->msg.data; params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | (hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[7] << 8) | (hbus->hdev->dev_instance.b[6] & 0xf8) | PCI_FUNC(pdev->devfn); - params->vector = cfg->vector; + params->int_target.vector = cfg->vector; + + /* + * Honoring apic->irq_delivery_mode set to dest_Fixed by + * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a + * spurious interrupt storm. Not doing so does not seem to have a + * negative effect (yet?). + */ + + if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) { + /* + * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the + * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides + * with >64 VP support. + * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED + * is not sufficient for this hypercall. + */ + params->int_target.flags |= + HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; + params->int_target.vp_set.valid_banks = + (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1; + + /* + * var-sized hypercall, var-size starts after vp_mask (thus + * vp_set.format does not count, but vp_set.valid_banks does). + */ + var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; - for_each_cpu_and(cpu, dest, cpu_online_mask) - params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu)); + for_each_cpu_and(cpu, dest, cpu_online_mask) { + cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu); - hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL); + if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { + dev_err(&hbus->hdev->device, + "too high CPU %d", cpu_vmbus); + res = 1; + goto exit_unlock; + } + params->int_target.vp_set.masks[cpu_vmbus / 64] |= + (1ULL << (cpu_vmbus & 63)); + } + } else { + for_each_cpu_and(cpu, dest, cpu_online_mask) { + params->int_target.vp_mask |= + (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); + } + } + + res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), + params, NULL); + +exit_unlock: spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); + if (res) { + dev_err(&hbus->hdev->device, + "%s() failed: %#llx", __func__, res); + return; + } + pci_msi_unmask_irq(data); } @@ -836,6 +1022,53 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp, complete(&comp_pkt->comp_pkt.host_event); } +static u32 hv_compose_msi_req_v1( + struct pci_create_interrupt *int_pkt, struct cpumask *affinity, + u32 slot, u8 vector) +{ + int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; + int_pkt->wslot.slot = slot; + int_pkt->int_desc.vector = vector; + int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.delivery_mode = + (apic->irq_delivery_mode == dest_LowestPrio) ? + dest_LowestPrio : dest_Fixed; + + /* + * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in + * hv_irq_unmask(). + */ + int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; + + return sizeof(*int_pkt); +} + +static u32 hv_compose_msi_req_v2( + struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, + u32 slot, u8 vector) +{ + int cpu; + + int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; + int_pkt->wslot.slot = slot; + int_pkt->int_desc.vector = vector; + int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.delivery_mode = + (apic->irq_delivery_mode == dest_LowestPrio) ? + dest_LowestPrio : dest_Fixed; + + /* + * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten + * by subsequent retarget in hv_irq_unmask(). + */ + cpu = cpumask_first_and(affinity, cpu_online_mask); + int_pkt->int_desc.processor_array[0] = + hv_tmp_cpu_nr_to_vp_nr(cpu); + int_pkt->int_desc.processor_count = 1; + + return sizeof(*int_pkt); +} + /** * hv_compose_msi_msg() - Supplies a valid MSI address/data * @data: Everything about this MSI @@ -854,15 +1087,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; - struct pci_create_interrupt *int_pkt; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; - struct cpumask *affinity; struct { - struct pci_packet pkt; - u8 buffer[sizeof(struct pci_create_interrupt)]; - } ctxt; - int cpu; + struct pci_packet pci_pkt; + union { + struct pci_create_interrupt v1; + struct pci_create_interrupt2 v2; + } int_pkts; + } __packed ctxt; + + u32 size; int ret; pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); @@ -885,36 +1120,44 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) memset(&ctxt, 0, sizeof(ctxt)); init_completion(&comp.comp_pkt.host_event); - ctxt.pkt.completion_func = hv_pci_compose_compl; - ctxt.pkt.compl_ctxt = ∁ - int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message; - int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; - int_pkt->wslot.slot = hpdev->desc.win_slot.slot; - int_pkt->int_desc.vector = cfg->vector; - int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = - (apic->irq_delivery_mode == dest_LowestPrio) ? 1 : 0; + ctxt.pci_pkt.completion_func = hv_pci_compose_compl; + ctxt.pci_pkt.compl_ctxt = ∁ + + switch (pci_protocol_version) { + case PCI_PROTOCOL_VERSION_1_1: + size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, + irq_data_get_affinity_mask(data), + hpdev->desc.win_slot.slot, + cfg->vector); + break; - /* - * This bit doesn't have to work on machines with more than 64 - * processors because Hyper-V only supports 64 in a guest. - */ - affinity = irq_data_get_affinity_mask(data); - if (cpumask_weight(affinity) >= 32) { - int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; - } else { - for_each_cpu_and(cpu, affinity, cpu_online_mask) { - int_pkt->int_desc.cpu_mask |= - (1ULL << vmbus_cpu_number_to_vp_number(cpu)); - } + case PCI_PROTOCOL_VERSION_1_2: + size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, + irq_data_get_affinity_mask(data), + hpdev->desc.win_slot.slot, + cfg->vector); + break; + + default: + /* As we only negotiate protocol versions known to this driver, + * this path should never hit. However, this is it not a hot + * path so we print a message to aid future updates. + */ + dev_err(&hbus->hdev->device, + "Unexpected vPCI protocol, update driver."); + goto free_int_desc; } - ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, - sizeof(*int_pkt), (unsigned long)&ctxt.pkt, + ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, + size, (unsigned long)&ctxt.pci_pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) + if (ret) { + dev_err(&hbus->hdev->device, + "Sending request for interrupt failed: 0x%x", + comp.comp_pkt.completion_status); goto free_int_desc; + } wait_for_completion(&comp.comp_pkt.host_event); @@ -1513,12 +1756,12 @@ static void pci_devices_present_work(struct work_struct *work) put_pcichild(hpdev, hv_pcidev_ref_initial); } - switch(hbus->state) { + switch (hbus->state) { case hv_pcibus_installed: /* - * Tell the core to rescan bus - * because there may have been changes. - */ + * Tell the core to rescan bus + * because there may have been changes. + */ pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); pci_unlock_rescan_remove(); @@ -1800,6 +2043,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) struct hv_pci_compl comp_pkt; struct pci_packet *pkt; int ret; + int i; /* * Initiate the handshake with the host and negotiate @@ -1816,26 +2060,44 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) pkt->compl_ctxt = &comp_pkt; version_req = (struct pci_version_request *)&pkt->message; version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; - version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT; - ret = vmbus_sendpacket(hdev->channel, version_req, - sizeof(struct pci_version_request), - (unsigned long)pkt, VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) - goto exit; + for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) { + version_req->protocol_version = pci_protocol_versions[i]; + ret = vmbus_sendpacket(hdev->channel, version_req, + sizeof(struct pci_version_request), + (unsigned long)pkt, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) { + dev_err(&hdev->device, + "PCI Pass-through VSP failed sending version reqquest: %#x", + ret); + goto exit; + } - wait_for_completion(&comp_pkt.host_event); + wait_for_completion(&comp_pkt.host_event); - if (comp_pkt.completion_status < 0) { - dev_err(&hdev->device, - "PCI Pass-through VSP failed version request %x\n", - comp_pkt.completion_status); - ret = -EPROTO; - goto exit; + if (comp_pkt.completion_status >= 0) { + pci_protocol_version = pci_protocol_versions[i]; + dev_info(&hdev->device, + "PCI VMBus probing: Using version %#x\n", + pci_protocol_version); + goto exit; + } + + if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { + dev_err(&hdev->device, + "PCI Pass-through VSP failed version request: %#x", + comp_pkt.completion_status); + ret = -EPROTO; + goto exit; + } + + reinit_completion(&comp_pkt.host_event); } - ret = 0; + dev_err(&hdev->device, + "PCI pass-through VSP failed to find supported version"); + ret = -EPROTO; exit: kfree(pkt); @@ -2094,13 +2356,18 @@ static int hv_send_resources_allocated(struct hv_device *hdev) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); struct pci_resources_assigned *res_assigned; + struct pci_resources_assigned2 *res_assigned2; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev; struct pci_packet *pkt; + size_t size_res; u32 wslot; int ret; - pkt = kmalloc(sizeof(*pkt) + sizeof(*res_assigned), GFP_KERNEL); + size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) + ? sizeof(*res_assigned) : sizeof(*res_assigned2); + + pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL); if (!pkt) return -ENOMEM; @@ -2111,22 +2378,30 @@ static int hv_send_resources_allocated(struct hv_device *hdev) if (!hpdev) continue; - memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned)); + memset(pkt, 0, sizeof(*pkt) + size_res); init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; - res_assigned = (struct pci_resources_assigned *)&pkt->message; - res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; - res_assigned->wslot.slot = hpdev->desc.win_slot.slot; + if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) { + res_assigned = + (struct pci_resources_assigned *)&pkt->message; + res_assigned->message_type.type = + PCI_RESOURCES_ASSIGNED; + res_assigned->wslot.slot = hpdev->desc.win_slot.slot; + } else { + res_assigned2 = + (struct pci_resources_assigned2 *)&pkt->message; + res_assigned2->message_type.type = + PCI_RESOURCES_ASSIGNED2; + res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; + } put_pcichild(hpdev, hv_pcidev_ref_by_slot); - ret = vmbus_sendpacket( - hdev->channel, &pkt->message, - sizeof(*res_assigned), - (unsigned long)pkt, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket(hdev->channel, &pkt->message, + size_res, (unsigned long)pkt, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) break; @@ -2204,11 +2479,19 @@ static int hv_pci_probe(struct hv_device *hdev, struct hv_pcibus_device *hbus; int ret; - hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); + /* + * hv_pcibus_device contains the hypercall arguments for retargeting in + * hv_irq_unmask(). Those must not cross a page boundary. + */ + BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE); + + hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL); if (!hbus) return -ENOMEM; hbus->state = hv_pcibus_init; + hv_tmpcpumap_init(); + /* * The PCI bus "domain" is what is called "segment" in ACPI and * other specs. Pull it from the instance ID, to get something @@ -2308,7 +2591,7 @@ free_config: close: vmbus_close(hdev->channel); free_bus: - kfree(hbus); + free_page((unsigned long)hbus); return ret; } @@ -2386,7 +2669,7 @@ static int hv_pci_remove(struct hv_device *hdev) irq_domain_free_fwnode(hbus->sysdata.fwnode); put_hvpcibus(hbus); wait_for_completion(&hbus->remove_event); - kfree(hbus); + free_page((unsigned long)hbus); return 0; } diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 85348590848b..6f879685fedd 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c @@ -429,7 +429,7 @@ static int rcar_pci_probe(struct platform_device *pdev) return 0; } -static struct of_device_id rcar_pci_of_match[] = { +static const struct of_device_id rcar_pci_of_match[] = { { .compatible = "renesas,pci-r8a7790", }, { .compatible = "renesas,pci-r8a7791", }, { .compatible = "renesas,pci-r8a7794", }, diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 0dadb81eca70..b3722b7709df 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -233,8 +233,8 @@ struct tegra_msi { struct msi_controller chip; DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; - unsigned long pages; struct mutex lock; + u64 phys; int irq; }; @@ -1448,9 +1448,8 @@ static int tegra_msi_setup_irq(struct msi_controller *chip, irq_set_msi_desc(irq, desc); - msg.address_lo = virt_to_phys((void *)msi->pages); - /* 32 bit address only */ - msg.address_hi = 0; + msg.address_lo = lower_32_bits(msi->phys); + msg.address_hi = upper_32_bits(msi->phys); msg.data = hwirq; pci_write_msi_msg(irq, &msg); @@ -1499,7 +1498,6 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_msi *msi = &pcie->msi; struct device *dev = pcie->dev; - unsigned long base; int err; u32 reg; @@ -1531,12 +1529,25 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) goto err; } - /* setup AFI/FPCI range */ - msi->pages = __get_free_pages(GFP_KERNEL, 0); - base = virt_to_phys((void *)msi->pages); + /* + * The PCI host bridge on Tegra contains some logic that intercepts + * MSI writes, which means that the MSI target address doesn't have + * to point to actual physical memory. Rather than allocating one 4 + * KiB page of system memory that's never used, we can simply pick + * an arbitrary address within an area reserved for system memory + * in the FPCI address map. + * + * However, in order to avoid confusion, we pick an address that + * doesn't map to physical memory. The FPCI address map reserves a + * 1012 GiB region for system memory and memory-mapped I/O. Since + * none of the Tegra SoCs that contain this PCI host bridge can + * address more than 16 GiB of system memory, the last 4 KiB of + * these 1012 GiB is a good candidate. + */ + msi->phys = 0xfcfffff000; - afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); - afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST); + afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); + afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); /* this register is in 4K increments */ afi_writel(pcie, 1, AFI_MSI_BAR_SZ); @@ -1585,8 +1596,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) afi_writel(pcie, 0, AFI_MSI_EN_VEC6); afi_writel(pcie, 0, AFI_MSI_EN_VEC7); - free_pages(msi->pages, 0); - if (msi->irq > 0) free_irq(msi->irq, pcie); diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c new file mode 100644 index 000000000000..5a9d8589ea0b --- /dev/null +++ b/drivers/pci/host/pcie-mediatek.c @@ -0,0 +1,554 @@ +/* + * MediaTek PCIe host controller driver. + * + * Copyright (c) 2017 MediaTek Inc. + * Author: Ryder Lee <ryder.lee@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +/* PCIe shared registers */ +#define PCIE_SYS_CFG 0x00 +#define PCIE_INT_ENABLE 0x0c +#define PCIE_CFG_ADDR 0x20 +#define PCIE_CFG_DATA 0x24 + +/* PCIe per port registers */ +#define PCIE_BAR0_SETUP 0x10 +#define PCIE_CLASS 0x34 +#define PCIE_LINK_STATUS 0x50 + +#define PCIE_PORT_INT_EN(x) BIT(20 + (x)) +#define PCIE_PORT_PERST(x) BIT(1 + (x)) +#define PCIE_PORT_LINKUP BIT(0) +#define PCIE_BAR_MAP_MAX GENMASK(31, 16) + +#define PCIE_BAR_ENABLE BIT(0) +#define PCIE_REVISION_ID BIT(0) +#define PCIE_CLASS_CODE (0x60400 << 8) +#define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ + ((((regn) >> 8) & GENMASK(3, 0)) << 24)) +#define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) +#define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) +#define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) +#define PCIE_CONF_ADDR(regn, fun, dev, bus) \ + (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ + PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) + +/* MediaTek specific configuration registers */ +#define PCIE_FTS_NUM 0x70c +#define PCIE_FTS_NUM_MASK GENMASK(15, 8) +#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) + +#define PCIE_FC_CREDIT 0x73c +#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) +#define PCIE_FC_CREDIT_VAL(x) ((x) << 16) + +/** + * struct mtk_pcie_port - PCIe port information + * @base: IO mapped register base + * @list: port list + * @pcie: pointer to PCIe host info + * @reset: pointer to port reset control + * @sys_ck: pointer to bus clock + * @phy: pointer to phy control block + * @lane: lane count + * @index: port index + */ +struct mtk_pcie_port { + void __iomem *base; + struct list_head list; + struct mtk_pcie *pcie; + struct reset_control *reset; + struct clk *sys_ck; + struct phy *phy; + u32 lane; + u32 index; +}; + +/** + * struct mtk_pcie - PCIe host information + * @dev: pointer to PCIe device + * @base: IO mapped register base + * @free_ck: free-run reference clock + * @io: IO resource + * @pio: PIO resource + * @mem: non-prefetchable memory resource + * @busn: bus range + * @offset: IO / Memory offset + * @ports: pointer to PCIe port information + */ +struct mtk_pcie { + struct device *dev; + void __iomem *base; + struct clk *free_ck; + + struct resource io; + struct resource pio; + struct resource mem; + struct resource busn; + struct { + resource_size_t mem; + resource_size_t io; + } offset; + struct list_head ports; +}; + +static inline bool mtk_pcie_link_up(struct mtk_pcie_port *port) +{ + return !!(readl(port->base + PCIE_LINK_STATUS) & PCIE_PORT_LINKUP); +} + +static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) +{ + struct device *dev = pcie->dev; + + clk_disable_unprepare(pcie->free_ck); + + if (dev->pm_domain) { + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + } +} + +static void mtk_pcie_port_free(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct device *dev = pcie->dev; + + devm_iounmap(dev, port->base); + list_del(&port->list); + devm_kfree(dev, port); +} + +static void mtk_pcie_put_resources(struct mtk_pcie *pcie) +{ + struct mtk_pcie_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) { + phy_power_off(port->phy); + clk_disable_unprepare(port->sys_ck); + mtk_pcie_port_free(port); + } + + mtk_pcie_subsys_powerdown(pcie); +} + +static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_host_bridge *host = pci_find_host_bridge(bus); + struct mtk_pcie *pcie = pci_host_bridge_priv(host); + + writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), + bus->number), pcie->base + PCIE_CFG_ADDR); + + return pcie->base + PCIE_CFG_DATA + (where & 3); +} + +static struct pci_ops mtk_pcie_ops = { + .map_bus = mtk_pcie_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +static void mtk_pcie_configure_rc(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + u32 func = PCI_FUNC(port->index << 3); + u32 slot = PCI_SLOT(port->index << 3); + u32 val; + + /* enable interrupt */ + val = readl(pcie->base + PCIE_INT_ENABLE); + val |= PCIE_PORT_INT_EN(port->index); + writel(val, pcie->base + PCIE_INT_ENABLE); + + /* map to all DDR region. We need to set it before cfg operation. */ + writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, + port->base + PCIE_BAR0_SETUP); + + /* configure class code and revision ID */ + writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); + + /* configure FC credit */ + writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + val = readl(pcie->base + PCIE_CFG_DATA); + val &= ~PCIE_FC_CREDIT_MASK; + val |= PCIE_FC_CREDIT_VAL(0x806c); + writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + writel(val, pcie->base + PCIE_CFG_DATA); + + /* configure RC FTS number to 250 when it leaves L0s */ + writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + val = readl(pcie->base + PCIE_CFG_DATA); + val &= ~PCIE_FTS_NUM_MASK; + val |= PCIE_FTS_NUM_L0(0x50); + writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + writel(val, pcie->base + PCIE_CFG_DATA); +} + +static void mtk_pcie_assert_ports(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + u32 val; + + /* assert port PERST_N */ + val = readl(pcie->base + PCIE_SYS_CFG); + val |= PCIE_PORT_PERST(port->index); + writel(val, pcie->base + PCIE_SYS_CFG); + + /* de-assert port PERST_N */ + val = readl(pcie->base + PCIE_SYS_CFG); + val &= ~PCIE_PORT_PERST(port->index); + writel(val, pcie->base + PCIE_SYS_CFG); + + /* PCIe v2.0 need at least 100ms delay to train from Gen1 to Gen2 */ + msleep(100); +} + +static void mtk_pcie_enable_ports(struct mtk_pcie_port *port) +{ + struct device *dev = port->pcie->dev; + int err; + + err = clk_prepare_enable(port->sys_ck); + if (err) { + dev_err(dev, "failed to enable port%d clock\n", port->index); + goto err_sys_clk; + } + + reset_control_assert(port->reset); + reset_control_deassert(port->reset); + + err = phy_power_on(port->phy); + if (err) { + dev_err(dev, "failed to power on port%d phy\n", port->index); + goto err_phy_on; + } + + mtk_pcie_assert_ports(port); + + /* if link up, then setup root port configuration space */ + if (mtk_pcie_link_up(port)) { + mtk_pcie_configure_rc(port); + return; + } + + dev_info(dev, "Port%d link down\n", port->index); + + phy_power_off(port->phy); +err_phy_on: + clk_disable_unprepare(port->sys_ck); +err_sys_clk: + mtk_pcie_port_free(port); +} + +static int mtk_pcie_parse_ports(struct mtk_pcie *pcie, + struct device_node *node, + int index) +{ + struct mtk_pcie_port *port; + struct resource *regs; + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + char name[10]; + int err; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + err = of_property_read_u32(node, "num-lanes", &port->lane); + if (err) { + dev_err(dev, "missing num-lanes property\n"); + return err; + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, index + 1); + port->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(port->base)) { + dev_err(dev, "failed to map port%d base\n", index); + return PTR_ERR(port->base); + } + + snprintf(name, sizeof(name), "sys_ck%d", index); + port->sys_ck = devm_clk_get(dev, name); + if (IS_ERR(port->sys_ck)) { + dev_err(dev, "failed to get port%d clock\n", index); + return PTR_ERR(port->sys_ck); + } + + snprintf(name, sizeof(name), "pcie-rst%d", index); + port->reset = devm_reset_control_get_optional(dev, name); + if (PTR_ERR(port->reset) == -EPROBE_DEFER) + return PTR_ERR(port->reset); + + /* some platforms may use default PHY setting */ + snprintf(name, sizeof(name), "pcie-phy%d", index); + port->phy = devm_phy_optional_get(dev, name); + if (IS_ERR(port->phy)) + return PTR_ERR(port->phy); + + port->index = index; + port->pcie = pcie; + + INIT_LIST_HEAD(&port->list); + list_add_tail(&port->list, &pcie->ports); + + return 0; +} + +static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *regs; + int err; + + /* get shared registers */ + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pcie->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(pcie->base)) { + dev_err(dev, "failed to map shared register\n"); + return PTR_ERR(pcie->base); + } + + pcie->free_ck = devm_clk_get(dev, "free_ck"); + if (IS_ERR(pcie->free_ck)) { + if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + pcie->free_ck = NULL; + } + + if (dev->pm_domain) { + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + } + + /* enable top level clock */ + err = clk_prepare_enable(pcie->free_ck); + if (err) { + dev_err(dev, "failed to enable free_ck\n"); + goto err_free_ck; + } + + return 0; + +err_free_ck: + if (dev->pm_domain) { + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + } + + return err; +} + +static int mtk_pcie_setup(struct mtk_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node, *child; + struct of_pci_range_parser parser; + struct of_pci_range range; + struct resource res; + struct mtk_pcie_port *port, *tmp; + int err; + + if (of_pci_range_parser_init(&parser, node)) { + dev_err(dev, "missing \"ranges\" property\n"); + return -EINVAL; + } + + for_each_of_pci_range(&parser, &range) { + err = of_pci_range_to_resource(&range, node, &res); + if (err < 0) + return err; + + switch (res.flags & IORESOURCE_TYPE_BITS) { + case IORESOURCE_IO: + pcie->offset.io = res.start - range.pci_addr; + + memcpy(&pcie->pio, &res, sizeof(res)); + pcie->pio.name = node->full_name; + + pcie->io.start = range.cpu_addr; + pcie->io.end = range.cpu_addr + range.size - 1; + pcie->io.flags = IORESOURCE_MEM; + pcie->io.name = "I/O"; + + memcpy(&res, &pcie->io, sizeof(res)); + break; + + case IORESOURCE_MEM: + pcie->offset.mem = res.start - range.pci_addr; + + memcpy(&pcie->mem, &res, sizeof(res)); + pcie->mem.name = "non-prefetchable"; + break; + } + } + + err = of_pci_parse_bus_range(node, &pcie->busn); + if (err < 0) { + dev_err(dev, "failed to parse bus ranges property: %d\n", err); + pcie->busn.name = node->name; + pcie->busn.start = 0; + pcie->busn.end = 0xff; + pcie->busn.flags = IORESOURCE_BUS; + } + + for_each_available_child_of_node(node, child) { + int index; + + err = of_pci_get_devfn(child); + if (err < 0) { + dev_err(dev, "failed to parse devfn: %d\n", err); + return err; + } + + index = PCI_SLOT(err); + + err = mtk_pcie_parse_ports(pcie, child, index); + if (err) + return err; + } + + err = mtk_pcie_subsys_powerup(pcie); + if (err) + return err; + + /* enable each port, and then check link status */ + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + mtk_pcie_enable_ports(port); + + /* power down PCIe subsys if slots are all empty (link down) */ + if (list_empty(&pcie->ports)) + mtk_pcie_subsys_powerdown(pcie); + + return 0; +} + +static int mtk_pcie_request_resources(struct mtk_pcie *pcie) +{ + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct list_head *windows = &host->windows; + struct device *dev = pcie->dev; + int err; + + pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); + pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); + pci_add_resource(windows, &pcie->busn); + + err = devm_request_pci_bus_resources(dev, windows); + if (err < 0) + return err; + + pci_remap_iospace(&pcie->pio, pcie->io.start); + + return 0; +} + +static int mtk_pcie_register_host(struct pci_host_bridge *host) +{ + struct mtk_pcie *pcie = pci_host_bridge_priv(host); + struct pci_bus *child; + int err; + + host->busnr = pcie->busn.start; + host->dev.parent = pcie->dev; + host->ops = &mtk_pcie_ops; + host->map_irq = of_irq_parse_and_map_pci; + host->swizzle_irq = pci_common_swizzle; + + err = pci_scan_root_bus_bridge(host); + if (err < 0) + return err; + + pci_bus_size_bridges(host->bus); + pci_bus_assign_resources(host->bus); + + list_for_each_entry(child, &host->bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(host->bus); + + return 0; +} + +static int mtk_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_pcie *pcie; + struct pci_host_bridge *host; + int err; + + host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!host) + return -ENOMEM; + + pcie = pci_host_bridge_priv(host); + + pcie->dev = dev; + platform_set_drvdata(pdev, pcie); + INIT_LIST_HEAD(&pcie->ports); + + err = mtk_pcie_setup(pcie); + if (err) + return err; + + err = mtk_pcie_request_resources(pcie); + if (err) + goto put_resources; + + err = mtk_pcie_register_host(host); + if (err) + goto put_resources; + + return 0; + +put_resources: + if (!list_empty(&pcie->ports)) + mtk_pcie_put_resources(pcie); + + return err; +} + +static const struct of_device_id mtk_pcie_ids[] = { + { .compatible = "mediatek,mt7623-pcie"}, + { .compatible = "mediatek,mt2701-pcie"}, + {}, +}; + +static struct platform_driver mtk_pcie_driver = { + .probe = mtk_pcie_probe, + .driver = { + .name = "mtk-pcie", + .of_match_table = mtk_pcie_ids, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(mtk_pcie_driver); diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index 29332ba06bc1..5acf8694fb23 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -139,6 +139,7 @@ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ PCIE_CORE_INT_MMVC) +#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 #define PCIE_RC_CONFIG_BASE 0xa00000 #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) #define PCIE_RC_CONFIG_SCC_SHIFT 16 @@ -146,6 +147,9 @@ #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 #define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff #define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 +#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) +#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) +#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) #define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) #define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) #define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) @@ -175,6 +179,8 @@ #define IB_ROOT_PORT_REG_SIZE_SHIFT 3 #define AXI_WRAPPER_IO_WRITE 0x6 #define AXI_WRAPPER_MEM_WRITE 0x2 +#define AXI_WRAPPER_TYPE0_CFG 0xa +#define AXI_WRAPPER_TYPE1_CFG 0xb #define AXI_WRAPPER_NOR_MSG 0xc #define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 @@ -198,6 +204,7 @@ #define RC_REGION_0_ADDR_TRANS_H 0x00000000 #define RC_REGION_0_ADDR_TRANS_L 0x00000000 #define RC_REGION_0_PASS_BITS (25 - 1) +#define RC_REGION_0_TYPE_MASK GENMASK(3, 0) #define MAX_AXI_WRAPPER_REGION_NUM 33 struct rockchip_pcie { @@ -295,7 +302,9 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 *val) { - void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where; + void __iomem *addr; + + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; if (!IS_ALIGNED((uintptr_t)addr, size)) { *val = 0; @@ -319,11 +328,13 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 val) { u32 mask, tmp, offset; + void __iomem *addr; offset = where & ~0x3; + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; if (size == 4) { - writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); + writel(val, addr); return PCIBIOS_SUCCESSFUL; } @@ -334,13 +345,33 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, * corrupt RW1C bits in adjacent registers. But the hardware * doesn't support smaller writes. */ - tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask; + tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); - writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); + writel(tmp, addr); return PCIBIOS_SUCCESSFUL; } +static void rockchip_pcie_cfg_configuration_accesses( + struct rockchip_pcie *rockchip, u32 type) +{ + u32 ob_desc_0; + + /* Configuration Accesses for region 0 */ + rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); + + rockchip_pcie_write(rockchip, + (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), + PCIE_CORE_OB_REGION_ADDR0); + rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, + PCIE_CORE_OB_REGION_ADDR1); + ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0); + ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK); + ob_desc_0 |= (type | (0x1 << 23)); + rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0); + rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); +} + static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) @@ -355,6 +386,13 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, return PCIBIOS_BAD_REGISTER_NUMBER; } + if (bus->parent->number == rockchip->root_bus_nr) + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + else + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE1_CFG); + if (size == 4) { *val = readl(rockchip->reg_base + busdev); } else if (size == 2) { @@ -379,6 +417,13 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, if (!IS_ALIGNED(busdev, size)) return PCIBIOS_BAD_REGISTER_NUMBER; + if (bus->parent->number == rockchip->root_bus_nr) + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + else + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE1_CFG); + if (size == 4) writel(val, rockchip->reg_base + busdev); else if (size == 2) @@ -664,15 +709,10 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); } - rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); - - rockchip_pcie_write(rockchip, - (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), - PCIE_CORE_OB_REGION_ADDR0); - rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, - PCIE_CORE_OB_REGION_ADDR1); - rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0); - rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); + status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; + status |= PCIE_RC_CONFIG_DCSR_MPS_256; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); return 0; } @@ -1156,13 +1196,16 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, return 0; } -static int rockchip_cfg_atu(struct rockchip_pcie *rockchip) +static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int offset; int err; int reg_no; + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, AXI_WRAPPER_MEM_WRITE, @@ -1251,6 +1294,9 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) clk_disable_unprepare(rockchip->aclk_perf_pcie); clk_disable_unprepare(rockchip->aclk_pcie); + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); + return ret; } @@ -1259,24 +1305,54 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) struct rockchip_pcie *rockchip = dev_get_drvdata(dev); int err; - clk_prepare_enable(rockchip->clk_pcie_pm); - clk_prepare_enable(rockchip->hclk_pcie); - clk_prepare_enable(rockchip->aclk_perf_pcie); - clk_prepare_enable(rockchip->aclk_pcie); + if (!IS_ERR(rockchip->vpcie0v9)) { + err = regulator_enable(rockchip->vpcie0v9); + if (err) { + dev_err(dev, "fail to enable vpcie0v9 regulator\n"); + return err; + } + } + + err = clk_prepare_enable(rockchip->clk_pcie_pm); + if (err) + goto err_pcie_pm; + + err = clk_prepare_enable(rockchip->hclk_pcie); + if (err) + goto err_hclk_pcie; + + err = clk_prepare_enable(rockchip->aclk_perf_pcie); + if (err) + goto err_aclk_perf_pcie; + + err = clk_prepare_enable(rockchip->aclk_pcie); + if (err) + goto err_aclk_pcie; err = rockchip_pcie_init_port(rockchip); if (err) - return err; + goto err_pcie_resume; - err = rockchip_cfg_atu(rockchip); + err = rockchip_pcie_cfg_atu(rockchip); if (err) - return err; + goto err_pcie_resume; /* Need this to enter L1 again */ rockchip_pcie_update_txcredit_mui(rockchip); rockchip_pcie_enable_interrupts(rockchip); return 0; + +err_pcie_resume: + clk_disable_unprepare(rockchip->aclk_pcie); +err_aclk_pcie: + clk_disable_unprepare(rockchip->aclk_perf_pcie); +err_aclk_perf_pcie: + clk_disable_unprepare(rockchip->hclk_pcie); +err_hclk_pcie: + clk_disable_unprepare(rockchip->clk_pcie_pm); +err_pcie_pm: + return err; } static int rockchip_pcie_probe(struct platform_device *pdev) @@ -1388,19 +1464,18 @@ static int rockchip_pcie_probe(struct platform_device *pdev) } } - err = rockchip_cfg_atu(rockchip); + err = rockchip_pcie_cfg_atu(rockchip); if (err) goto err_free_res; - rockchip->msg_region = devm_ioremap(rockchip->dev, - rockchip->msg_bus_addr, SZ_1M); + rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); if (!rockchip->msg_region) { err = -ENOMEM; goto err_free_res; } list_splice_init(&res, &bridge->windows); - bridge->dev.parent = &pdev->dev; + bridge->dev.parent = dev; bridge->sysdata = rockchip; bridge->busnr = 0; bridge->ops = &rockchip_pcie_ops; diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index e27ad2a3bd33..784f63beb487 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c @@ -704,7 +704,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) INIT_LIST_HEAD(&vmd->irqs[i].irq_list); err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), - vmd_irq, 0, "vmd", &vmd->irqs[i]); + vmd_irq, IRQF_NO_THREAD, + "vmd", &vmd->irqs[i]); if (err) return err; } diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index d9dc7363ac77..120485d6f352 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -461,8 +461,6 @@ found: else iov->dev = dev; - mutex_init(&iov->lock); - dev->sriov = iov; dev->is_physfn = 1; rc = compute_max_vf_buses(dev); @@ -491,8 +489,6 @@ static void sriov_release(struct pci_dev *dev) if (dev != dev->sriov->dev) pci_dev_put(dev->sriov->dev); - mutex_destroy(&dev->sriov->lock); - kfree(dev->sriov); dev->sriov = NULL; } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index ba44fdfda66b..9e1569107cd6 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1058,7 +1058,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, for (;;) { if (affd) { - nvec = irq_calc_affinity_vectors(nvec, affd); + nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; } @@ -1097,7 +1097,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, for (;;) { if (affd) { - nvec = irq_calc_affinity_vectors(nvec, affd); + nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; } @@ -1165,16 +1165,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, if (flags & PCI_IRQ_AFFINITY) { if (!affd) affd = &msi_default_affd; - - if (affd->pre_vectors + affd->post_vectors > min_vecs) - return -EINVAL; - - /* - * If there aren't any vectors left after applying the pre/post - * vectors don't bother with assigning affinity. - */ - if (affd->pre_vectors + affd->post_vectors == min_vecs) - affd = NULL; } else { if (WARN_ON(affd)) affd = NULL; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 11167c65ca37..50f93a3bc4c0 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -966,6 +966,7 @@ static int pci_pm_thaw_noirq(struct device *dev) return pci_legacy_resume_early(dev); pci_update_current_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); if (drv && drv->pm && drv->pm->thaw_noirq) error = drv->pm->thaw_noirq(dev); diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index 51357377efbc..1d828a614ac0 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -43,9 +43,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, { const struct dmi_device *dmi; struct dmi_dev_onboard *donboard; + int domain_nr; int bus; int devfn; + domain_nr = pci_domain_nr(pdev->bus); bus = pdev->bus->number; devfn = pdev->devfn; @@ -53,8 +55,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, NULL, dmi)) != NULL) { donboard = dmi->device_data; - if (donboard && donboard->bus == bus && - donboard->devfn == devfn) { + if (donboard && donboard->segment == domain_nr && + donboard->bus == bus && + donboard->devfn == devfn) { if (buf) { if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) return scnprintf(buf, PAGE_SIZE, diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 31e99613a12e..2f3780b50723 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -154,6 +154,129 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(resource); +static ssize_t max_link_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u32 linkcap; + int err; + const char *speed; + + err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap); + if (err) + return -EINVAL; + + switch (linkcap & PCI_EXP_LNKCAP_SLS) { + case PCI_EXP_LNKCAP_SLS_8_0GB: + speed = "8 GT/s"; + break; + case PCI_EXP_LNKCAP_SLS_5_0GB: + speed = "5 GT/s"; + break; + case PCI_EXP_LNKCAP_SLS_2_5GB: + speed = "2.5 GT/s"; + break; + default: + speed = "Unknown speed"; + } + + return sprintf(buf, "%s\n", speed); +} +static DEVICE_ATTR_RO(max_link_speed); + +static ssize_t max_link_width_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u32 linkcap; + int err; + + err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap); + if (err) + return -EINVAL; + + return sprintf(buf, "%u\n", (linkcap & PCI_EXP_LNKCAP_MLW) >> 4); +} +static DEVICE_ATTR_RO(max_link_width); + +static ssize_t current_link_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u16 linkstat; + int err; + const char *speed; + + err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + if (err) + return -EINVAL; + + switch (linkstat & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_8_0GB: + speed = "8 GT/s"; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + speed = "5 GT/s"; + break; + case PCI_EXP_LNKSTA_CLS_2_5GB: + speed = "2.5 GT/s"; + break; + default: + speed = "Unknown speed"; + } + + return sprintf(buf, "%s\n", speed); +} +static DEVICE_ATTR_RO(current_link_speed); + +static ssize_t current_link_width_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u16 linkstat; + int err; + + err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + if (err) + return -EINVAL; + + return sprintf(buf, "%u\n", + (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); +} +static DEVICE_ATTR_RO(current_link_width); + +static ssize_t secondary_bus_number_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u8 sec_bus; + int err; + + err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); + if (err) + return -EINVAL; + + return sprintf(buf, "%u\n", sec_bus); +} +static DEVICE_ATTR_RO(secondary_bus_number); + +static ssize_t subordinate_bus_number_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u8 sub_bus; + int err; + + err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); + if (err) + return -EINVAL; + + return sprintf(buf, "%u\n", sub_bus); +} +static DEVICE_ATTR_RO(subordinate_bus_number); + static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -472,7 +595,6 @@ static ssize_t sriov_numvfs_store(struct device *dev, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - struct pci_sriov *iov = pdev->sriov; int ret; u16 num_vfs; @@ -483,7 +605,7 @@ static ssize_t sriov_numvfs_store(struct device *dev, if (num_vfs > pci_sriov_get_totalvfs(pdev)) return -ERANGE; - mutex_lock(&iov->dev->sriov->lock); + device_lock(&pdev->dev); if (num_vfs == pdev->sriov->num_VFs) goto exit; @@ -518,7 +640,7 @@ static ssize_t sriov_numvfs_store(struct device *dev, num_vfs, ret); exit: - mutex_unlock(&iov->dev->sriov->lock); + device_unlock(&pdev->dev); if (ret < 0) return ret; @@ -629,12 +751,17 @@ static struct attribute *pci_dev_attrs[] = { NULL, }; -static const struct attribute_group pci_dev_group = { - .attrs = pci_dev_attrs, +static struct attribute *pci_bridge_attrs[] = { + &dev_attr_subordinate_bus_number.attr, + &dev_attr_secondary_bus_number.attr, + NULL, }; -const struct attribute_group *pci_dev_groups[] = { - &pci_dev_group, +static struct attribute *pcie_dev_attrs[] = { + &dev_attr_current_link_speed.attr, + &dev_attr_current_link_width.attr, + &dev_attr_max_link_width.attr, + &dev_attr_max_link_speed.attr, NULL, }; @@ -1557,6 +1684,57 @@ static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, return a->mode; } +static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct pci_dev *pdev = to_pci_dev(dev); + + if (pci_is_bridge(pdev)) + return a->mode; + + return 0; +} + +static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct pci_dev *pdev = to_pci_dev(dev); + + if (pci_is_pcie(pdev)) + return a->mode; + + return 0; +} + +static const struct attribute_group pci_dev_group = { + .attrs = pci_dev_attrs, +}; + +const struct attribute_group *pci_dev_groups[] = { + &pci_dev_group, + NULL, +}; + +static const struct attribute_group pci_bridge_group = { + .attrs = pci_bridge_attrs, +}; + +const struct attribute_group *pci_bridge_groups[] = { + &pci_bridge_group, + NULL, +}; + +static const struct attribute_group pcie_dev_group = { + .attrs = pcie_dev_attrs, +}; + +const struct attribute_group *pcie_dev_groups[] = { + &pcie_dev_group, + NULL, +}; + static struct attribute_group pci_dev_hp_attr_group = { .attrs = pci_dev_hp_attrs, .is_visible = pci_dev_hp_attrs_are_visible, @@ -1592,12 +1770,24 @@ static struct attribute_group pci_dev_attr_group = { .is_visible = pci_dev_attrs_are_visible, }; +static struct attribute_group pci_bridge_attr_group = { + .attrs = pci_bridge_attrs, + .is_visible = pci_bridge_attrs_are_visible, +}; + +static struct attribute_group pcie_dev_attr_group = { + .attrs = pcie_dev_attrs, + .is_visible = pcie_dev_attrs_are_visible, +}; + static const struct attribute_group *pci_dev_attr_groups[] = { &pci_dev_attr_group, &pci_dev_hp_attr_group, #ifdef CONFIG_PCI_IOV &sriov_dev_attr_group, #endif + &pci_bridge_attr_group, + &pcie_dev_attr_group, NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b01bd5bba8e6..34b5fe025dd4 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -28,6 +28,7 @@ #include <linux/pm_runtime.h> #include <linux/pci_hotplug.h> #include <linux/vmalloc.h> +#include <linux/pci-ats.h> #include <asm/setup.h> #include <asm/dma.h> #include <linux/aer.h> @@ -455,7 +456,7 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev, pci_bus_for_each_resource(bus, r, i) { if (!r) continue; - if (res->start && resource_contains(r, res)) { + if (resource_contains(r, res)) { /* * If the window is prefetchable but the BAR is @@ -1173,6 +1174,8 @@ void pci_restore_state(struct pci_dev *dev) /* PCI Express register must be restored first */ pci_restore_pcie_state(dev); + pci_restore_pasid_state(dev); + pci_restore_pri_state(dev); pci_restore_ats_state(dev); pci_restore_vc_state(dev); @@ -1960,12 +1963,13 @@ EXPORT_SYMBOL(pci_wake_from_d3); /** * pci_target_state - find an appropriate low power state for a given PCI dev * @dev: PCI device + * @wakeup: Whether or not wakeup functionality will be enabled for the device. * * Use underlying platform code to find a supported low power state for @dev. * If the platform can't manage @dev, return the deepest state from which it * can generate wake events, based on any available PME info. */ -static pci_power_t pci_target_state(struct pci_dev *dev) +static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) { pci_power_t target_state = PCI_D3hot; @@ -2002,7 +2006,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev) if (dev->current_state == PCI_D3cold) target_state = PCI_D3cold; - if (device_may_wakeup(&dev->dev)) { + if (wakeup) { /* * Find the deepest state from which the device can generate * wake-up events, make it the target state and enable device @@ -2028,13 +2032,14 @@ static pci_power_t pci_target_state(struct pci_dev *dev) */ int pci_prepare_to_sleep(struct pci_dev *dev) { - pci_power_t target_state = pci_target_state(dev); + bool wakeup = device_may_wakeup(&dev->dev); + pci_power_t target_state = pci_target_state(dev, wakeup); int error; if (target_state == PCI_POWER_ERROR) return -EIO; - pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); + pci_enable_wake(dev, target_state, wakeup); error = pci_set_power_state(dev, target_state); @@ -2067,9 +2072,10 @@ EXPORT_SYMBOL(pci_back_from_sleep); */ int pci_finish_runtime_suspend(struct pci_dev *dev) { - pci_power_t target_state = pci_target_state(dev); + pci_power_t target_state; int error; + target_state = pci_target_state(dev, device_can_wakeup(&dev->dev)); if (target_state == PCI_POWER_ERROR) return -EIO; @@ -2105,8 +2111,8 @@ bool pci_dev_run_wake(struct pci_dev *dev) if (!dev->pme_support) return false; - /* PME-capable in principle, but not from the intended sleep state */ - if (!pci_pme_capable(dev, pci_target_state(dev))) + /* PME-capable in principle, but not from the target power state */ + if (!pci_pme_capable(dev, pci_target_state(dev, false))) return false; while (bus->parent) { @@ -2141,10 +2147,12 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake); bool pci_dev_keep_suspended(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; + bool wakeup = device_may_wakeup(dev); if (!pm_runtime_suspended(dev) - || pci_target_state(pci_dev) != pci_dev->current_state - || platform_pci_need_resume(pci_dev)) + || pci_target_state(pci_dev, wakeup) != pci_dev->current_state + || platform_pci_need_resume(pci_dev) + || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) return false; /* @@ -2160,7 +2168,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev) spin_lock_irq(&dev->power.lock); if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold && - !device_may_wakeup(dev)) + !wakeup) __pci_pme_active(pci_dev, false); spin_unlock_irq(&dev->power.lock); @@ -3708,46 +3716,6 @@ void pci_intx(struct pci_dev *pdev, int enable) } EXPORT_SYMBOL_GPL(pci_intx); -/** - * pci_intx_mask_supported - probe for INTx masking support - * @dev: the PCI device to operate on - * - * Check if the device dev support INTx masking via the config space - * command word. - */ -bool pci_intx_mask_supported(struct pci_dev *dev) -{ - bool mask_supported = false; - u16 orig, new; - - if (dev->broken_intx_masking) - return false; - - pci_cfg_access_lock(dev); - - pci_read_config_word(dev, PCI_COMMAND, &orig); - pci_write_config_word(dev, PCI_COMMAND, - orig ^ PCI_COMMAND_INTX_DISABLE); - pci_read_config_word(dev, PCI_COMMAND, &new); - - /* - * There's no way to protect against hardware bugs or detect them - * reliably, but as long as we know what the value should be, let's - * go ahead and check it. - */ - if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { - dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n", - orig, new); - } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) { - mask_supported = true; - pci_write_config_word(dev, PCI_COMMAND, orig); - } - - pci_cfg_access_unlock(dev); - return mask_supported; -} -EXPORT_SYMBOL_GPL(pci_intx_mask_supported); - static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) { struct pci_bus *bus = dev->bus; @@ -3798,7 +3766,7 @@ done: * @dev: the PCI device to operate on * * Check if the device dev has its INTx line asserted, mask it and - * return true in that case. False is returned if not interrupt was + * return true in that case. False is returned if no interrupt was * pending. */ bool pci_check_and_mask_intx(struct pci_dev *dev) @@ -4068,40 +4036,6 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) return pci_reset_hotplug_slot(dev->slot->hotplug, probe); } -static int __pci_dev_reset(struct pci_dev *dev, int probe) -{ - int rc; - - might_sleep(); - - rc = pci_dev_specific_reset(dev, probe); - if (rc != -ENOTTY) - goto done; - - if (pcie_has_flr(dev)) { - if (!probe) - pcie_flr(dev); - rc = 0; - goto done; - } - - rc = pci_af_flr(dev, probe); - if (rc != -ENOTTY) - goto done; - - rc = pci_pm_reset(dev, probe); - if (rc != -ENOTTY) - goto done; - - rc = pci_dev_reset_slot_function(dev, probe); - if (rc != -ENOTTY) - goto done; - - rc = pci_parent_bus_reset(dev, probe); -done: - return rc; -} - static void pci_dev_lock(struct pci_dev *dev) { pci_cfg_access_lock(dev); @@ -4127,26 +4061,18 @@ static void pci_dev_unlock(struct pci_dev *dev) pci_cfg_access_unlock(dev); } -/** - * pci_reset_notify - notify device driver of reset - * @dev: device to be notified of reset - * @prepare: 'true' if device is about to be reset; 'false' if reset attempt - * completed - * - * Must be called prior to device access being disabled and after device - * access is restored. - */ -static void pci_reset_notify(struct pci_dev *dev, bool prepare) +static void pci_dev_save_and_disable(struct pci_dev *dev) { const struct pci_error_handlers *err_handler = dev->driver ? dev->driver->err_handler : NULL; - if (err_handler && err_handler->reset_notify) - err_handler->reset_notify(dev, prepare); -} -static void pci_dev_save_and_disable(struct pci_dev *dev) -{ - pci_reset_notify(dev, true); + /* + * dev->driver->err_handler->reset_prepare() is protected against + * races with ->remove() by the device lock, which must be held by + * the caller. + */ + if (err_handler && err_handler->reset_prepare) + err_handler->reset_prepare(dev); /* * Wake-up device prior to save. PM registers default to D0 after @@ -4168,23 +4094,18 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) static void pci_dev_restore(struct pci_dev *dev) { - pci_restore_state(dev); - pci_reset_notify(dev, false); -} - -static int pci_dev_reset(struct pci_dev *dev, int probe) -{ - int rc; - - if (!probe) - pci_dev_lock(dev); - - rc = __pci_dev_reset(dev, probe); + const struct pci_error_handlers *err_handler = + dev->driver ? dev->driver->err_handler : NULL; - if (!probe) - pci_dev_unlock(dev); + pci_restore_state(dev); - return rc; + /* + * dev->driver->err_handler->reset_done() is protected against + * races with ->remove() by the device lock, which must be held by + * the caller. + */ + if (err_handler && err_handler->reset_done) + err_handler->reset_done(dev); } /** @@ -4206,7 +4127,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) */ int __pci_reset_function(struct pci_dev *dev) { - return pci_dev_reset(dev, 0); + int ret; + + pci_dev_lock(dev); + ret = __pci_reset_function_locked(dev); + pci_dev_unlock(dev); + + return ret; } EXPORT_SYMBOL_GPL(__pci_reset_function); @@ -4231,7 +4158,27 @@ EXPORT_SYMBOL_GPL(__pci_reset_function); */ int __pci_reset_function_locked(struct pci_dev *dev) { - return __pci_dev_reset(dev, 0); + int rc; + + might_sleep(); + + rc = pci_dev_specific_reset(dev, 0); + if (rc != -ENOTTY) + return rc; + if (pcie_has_flr(dev)) { + pcie_flr(dev); + return 0; + } + rc = pci_af_flr(dev, 0); + if (rc != -ENOTTY) + return rc; + rc = pci_pm_reset(dev, 0); + if (rc != -ENOTTY) + return rc; + rc = pci_dev_reset_slot_function(dev, 0); + if (rc != -ENOTTY) + return rc; + return pci_parent_bus_reset(dev, 0); } EXPORT_SYMBOL_GPL(__pci_reset_function_locked); @@ -4248,7 +4195,26 @@ EXPORT_SYMBOL_GPL(__pci_reset_function_locked); */ int pci_probe_reset_function(struct pci_dev *dev) { - return pci_dev_reset(dev, 1); + int rc; + + might_sleep(); + + rc = pci_dev_specific_reset(dev, 1); + if (rc != -ENOTTY) + return rc; + if (pcie_has_flr(dev)) + return 0; + rc = pci_af_flr(dev, 1); + if (rc != -ENOTTY) + return rc; + rc = pci_pm_reset(dev, 1); + if (rc != -ENOTTY) + return rc; + rc = pci_dev_reset_slot_function(dev, 1); + if (rc != -ENOTTY) + return rc; + + return pci_parent_bus_reset(dev, 1); } /** @@ -4271,15 +4237,17 @@ int pci_reset_function(struct pci_dev *dev) { int rc; - rc = pci_dev_reset(dev, 1); + rc = pci_probe_reset_function(dev); if (rc) return rc; + pci_dev_lock(dev); pci_dev_save_and_disable(dev); - rc = pci_dev_reset(dev, 0); + rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); + pci_dev_unlock(dev); return rc; } @@ -4295,20 +4263,18 @@ int pci_try_reset_function(struct pci_dev *dev) { int rc; - rc = pci_dev_reset(dev, 1); + rc = pci_probe_reset_function(dev); if (rc) return rc; - pci_dev_save_and_disable(dev); + if (!pci_dev_trylock(dev)) + return -EAGAIN; - if (pci_dev_trylock(dev)) { - rc = __pci_dev_reset(dev, 0); - pci_dev_unlock(dev); - } else - rc = -EAGAIN; + pci_dev_save_and_disable(dev); + rc = __pci_reset_function_locked(dev); + pci_dev_unlock(dev); pci_dev_restore(dev); - return rc; } EXPORT_SYMBOL_GPL(pci_try_reset_function); @@ -4458,7 +4424,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus) struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); pci_dev_save_and_disable(dev); + pci_dev_unlock(dev); if (dev->subordinate) pci_bus_save_and_disable(dev->subordinate); } @@ -4473,7 +4441,9 @@ static void pci_bus_restore(struct pci_bus *bus) struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); pci_dev_restore(dev); + pci_dev_unlock(dev); if (dev->subordinate) pci_bus_restore(dev->subordinate); } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f8113e5b9812..93f4044b8f4b 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -272,7 +272,6 @@ struct pci_sriov { u16 driver_max_VFs; /* max num VFs driver supports */ struct pci_dev *dev; /* lowest numbered PF */ struct pci_dev *self; /* this PF */ - struct mutex lock; /* lock for setting sriov_numvfs in sysfs */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ bool drivers_autoprobe; /* auto probing of VFs by driver */ }; diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c index 77d2ca99d2ec..c39f32e42b4d 100644 --- a/drivers/pci/pcie/pcie-dpc.c +++ b/drivers/pci/pcie/pcie-dpc.c @@ -92,7 +92,7 @@ static irqreturn_t dpc_irq(int irq, void *context) pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID, &source); - if (!status) + if (!status || status == (u16)(~0)) return IRQ_NONE; dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n", @@ -144,7 +144,7 @@ static int dpc_probe(struct pcie_device *dev) dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT); - ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; + ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 587aef36030d..4334fd5d7de9 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -13,10 +13,11 @@ #define PCIE_PORT_DEVICE_MAXSERVICES 5 /* - * According to the PCI Express Base Specification 2.0, the indices of - * the MSI-X table entries used by port services must not exceed 31 + * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must + * be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI + * supports a maximum of 32 vectors per function. */ -#define PCIE_PORT_MAX_MSIX_ENTRIES 32 +#define PCIE_PORT_MAX_MSI_ENTRIES 32 #define get_descriptor_id(type, service) (((type - 4) << 8) | service) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index cea504f6f478..313a21df1692 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -44,14 +44,15 @@ static void release_pcie_device(struct device *dev) } /** - * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port + * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode + * for given port * @dev: PCI Express port to handle * @irqs: Array of interrupt vectors to populate * @mask: Bitmask of port capabilities returned by get_port_device_capability() * * Return value: 0 on success, error code on failure */ -static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) +static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) { int nr_entries, entry, nvec = 0; @@ -61,8 +62,8 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) * equal to the number of entries this port actually uses, we'll happily * go through without any tricks. */ - nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES, - PCI_IRQ_MSIX); + nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES, + PCI_IRQ_MSIX | PCI_IRQ_MSI); if (nr_entries < 0) return nr_entries; @@ -70,14 +71,19 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) u16 reg16; /* - * The code below follows the PCI Express Base Specification 2.0 - * stating in Section 6.1.6 that "PME and Hot-Plug Event - * interrupts (when both are implemented) always share the same - * MSI or MSI-X vector, as indicated by the Interrupt Message - * Number field in the PCI Express Capabilities register", where - * according to Section 7.8.2 of the specification "For MSI-X, - * the value in this field indicates which MSI-X Table entry is - * used to generate the interrupt message." + * Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event + * interrupts (when both are implemented) always share the + * same MSI or MSI-X vector, as indicated by the Interrupt + * Message Number field in the PCI Express Capabilities + * register". + * + * Per sec 7.8.2, "For MSI, the [Interrupt Message Number] + * indicates the offset between the base Message Data and + * the interrupt message that is generated." + * + * "For MSI-X, the [Interrupt Message Number] indicates + * which MSI-X Table entry is used to generate the + * interrupt message." */ pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16); entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; @@ -94,13 +100,17 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) u32 reg32, pos; /* - * The code below follows Section 7.10.10 of the PCI Express - * Base Specification 2.0 stating that bits 31-27 of the Root - * Error Status Register contain a value indicating which of the - * MSI/MSI-X vectors assigned to the port is going to be used - * for AER, where "For MSI-X, the value in this register - * indicates which MSI-X Table entry is used to generate the - * interrupt message." + * Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt + * Message Number in the Root Error Status register + * indicates which MSI/MSI-X vector is used for AER. + * + * "For MSI, the [Advanced Error Interrupt Message Number] + * indicates the offset between the base Message Data and + * the interrupt message that is generated." + * + * "For MSI-X, the [Advanced Error Interrupt Message + * Number] indicates which MSI-X Table entry is used to + * generate the interrupt message." */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); @@ -113,6 +123,33 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) nvec = max(nvec, entry + 1); } + if (mask & PCIE_PORT_SERVICE_DPC) { + u16 reg16, pos; + + /* + * Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt + * Message Number in the DPC Capability register indicates + * which MSI/MSI-X vector is used for DPC. + * + * "For MSI, the [DPC Interrupt Message Number] indicates + * the offset between the base Message Data and the + * interrupt message that is generated." + * + * "For MSI-X, the [DPC Interrupt Message Number] indicates + * which MSI-X Table entry is used to generate the + * interrupt message." + */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC); + pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, ®16); + entry = reg16 & 0x1f; + if (entry >= nr_entries) + goto out_free_irqs; + + irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry); + + nvec = max(nvec, entry + 1); + } + /* * If nvec is equal to the allocated number of entries, we can just use * what we have. Otherwise, the port has some extra entries not for the @@ -124,7 +161,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) /* Now allocate the MSI-X vectors for real */ nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec, - PCI_IRQ_MSIX); + PCI_IRQ_MSIX | PCI_IRQ_MSI); if (nr_entries < 0) return nr_entries; } @@ -146,26 +183,29 @@ out_free_irqs: */ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask) { - unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI; int ret, i; for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) irqs[i] = -1; /* - * If MSI cannot be used for PCIe PME or hotplug, we have to use - * INTx or other interrupts, e.g. system shared interrupt. + * If we support PME or hotplug, but we can't use MSI/MSI-X for + * them, we have to fall back to INTx or other interrupts, e.g., a + * system shared interrupt. */ - if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) || - ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) { - flags &= ~PCI_IRQ_MSI; - } else { - /* Try to use MSI-X if supported */ - if (!pcie_port_enable_msix(dev, irqs, mask)) - return 0; - } + if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) + goto legacy_irq; + + if ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi()) + goto legacy_irq; + + /* Try to use MSI-X or MSI if supported */ + if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0) + return 0; - ret = pci_alloc_irq_vectors(dev, 1, 1, flags); +legacy_irq: + /* fall back to legacy IRQ */ + ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY); if (ret < 0) return -ENODEV; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index bd42ed42c199..c31310db0404 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1356,6 +1356,34 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev) } /** + * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability + * @dev: PCI device + * + * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this + * at enumeration-time to avoid modifying PCI_COMMAND at run-time. + */ +static int pci_intx_mask_broken(struct pci_dev *dev) +{ + u16 orig, toggle, new; + + pci_read_config_word(dev, PCI_COMMAND, &orig); + toggle = orig ^ PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(dev, PCI_COMMAND, toggle); + pci_read_config_word(dev, PCI_COMMAND, &new); + + pci_write_config_word(dev, PCI_COMMAND, orig); + + /* + * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI + * r2.3, so strictly speaking, a device is not *broken* if it's not + * writable. But we'll live with the misnomer for now. + */ + if (new != toggle) + return 1; + return 0; +} + +/** * pci_setup_device - fill in class and map information of a device * @dev: the device structure to fill * @@ -1425,6 +1453,8 @@ int pci_setup_device(struct pci_dev *dev) } } + dev->broken_intx_masking = pci_intx_mask_broken(dev); + switch (dev->hdr_type) { /* header type */ case PCI_HEADER_TYPE_NORMAL: /* standard header */ if (class == PCI_CLASS_BRIDGE_PCI) @@ -1700,6 +1730,11 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) /* Initialize Advanced Error Capabilities and Control Register */ pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; + /* Don't enable ECRC generation or checking if unsupported */ + if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) + reg32 &= ~PCI_ERR_CAP_ECRC_GENE; + if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) + reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); /* diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 085fb787aa9e..6967c6b4cf6b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -304,7 +304,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev) { int i; - for (i = 0; i < PCI_STD_RESOURCE_END; i++) { + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { struct resource *r = &dev->resource[i]; if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { @@ -1684,6 +1684,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); +static void quirk_radeon_pm(struct pci_dev *dev) +{ + if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE && + dev->subsystem_device == 0x00e2) { + if (dev->d3_delay < 20) { + dev->d3_delay = 20; + dev_info(&dev->dev, "extending delay after power-on from D3 to %d msec\n", + dev->d3_delay); + } + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm); + #ifdef CONFIG_X86_IO_APIC static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) { @@ -3236,6 +3249,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, + quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index cc6e085008fb..af81b2dec42e 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -120,6 +120,13 @@ struct sw_event_regs { u32 reserved16[4]; } __packed; +enum { + SWITCHTEC_CFG0_RUNNING = 0x04, + SWITCHTEC_CFG1_RUNNING = 0x05, + SWITCHTEC_IMG0_RUNNING = 0x03, + SWITCHTEC_IMG1_RUNNING = 0x07, +}; + struct sys_info_regs { u32 device_id; u32 device_version; @@ -129,7 +136,9 @@ struct sys_info_regs { u32 table_format_version; u32 partition_id; u32 cfg_file_fmt_version; - u32 reserved2[58]; + u16 cfg_running; + u16 img_running; + u32 reserved2[57]; char vendor_id[8]; char product_id[16]; char product_revision[4]; @@ -807,6 +816,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, { struct switchtec_ioctl_flash_part_info info = {0}; struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; + struct sys_info_regs __iomem *si = stdev->mmio_sys_info; u32 active_addr = -1; if (copy_from_user(&info, uinfo, sizeof(info))) @@ -816,18 +826,26 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, case SWITCHTEC_IOCTL_PART_CFG0: active_addr = ioread32(&fi->active_cfg); set_fw_info_part(&info, &fi->cfg0); + if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING) + info.active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_CFG1: active_addr = ioread32(&fi->active_cfg); set_fw_info_part(&info, &fi->cfg1); + if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING) + info.active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_IMG0: active_addr = ioread32(&fi->active_img); set_fw_info_part(&info, &fi->img0); + if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING) + info.active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_IMG1: active_addr = ioread32(&fi->active_img); set_fw_info_part(&info, &fi->img1); + if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING) + info.active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_NVLOG: set_fw_info_part(&info, &fi->nvlog); @@ -861,7 +879,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, } if (info.address == active_addr) - info.active = 1; + info.active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (copy_to_user(uinfo, &info, sizeof(info))) return -EFAULT; @@ -1291,7 +1309,6 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev) cdev = &stdev->cdev; cdev_init(cdev, &switchtec_fops); cdev->owner = THIS_MODULE; - cdev->kobj.parent = &dev->kobj; return stdev; @@ -1442,12 +1459,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev, stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; - stdev->partition = ioread8(&stdev->mmio_ntb->partition_id); + stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id); stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; + if (stdev->partition_count < 1) + stdev->partition_count = 1; + init_pff(stdev); pci_set_drvdata(pdev, stdev); @@ -1479,11 +1499,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev, SWITCHTEC_EVENT_EN_IRQ, &stdev->mmio_part_cfg->mrpc_comp_hdr); - rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1); - if (rc) - goto err_put; - - rc = device_add(&stdev->dev); + rc = cdev_device_add(&stdev->cdev, &stdev->dev); if (rc) goto err_devadd; @@ -1492,7 +1508,6 @@ static int switchtec_pci_probe(struct pci_dev *pdev, return 0; err_devadd: - cdev_del(&stdev->cdev); stdev_kill(stdev); err_put: ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); @@ -1506,8 +1521,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); - device_del(&stdev->dev); - cdev_del(&stdev->cdev); + cdev_device_del(&stdev->cdev, &stdev->dev); ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); dev_info(&stdev->dev, "unregistered.\n"); @@ -1544,6 +1558,24 @@ static const struct pci_device_id switchtec_pci_tbl[] = { SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3 SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3 + SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3 + SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3 + SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3 + SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3 + SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3 + SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3 + SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3 + SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3 + SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3 + SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3 + SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3 + SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3 + SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3 + SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3 + SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3 + SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3 + SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3 + SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3 {0} }; MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index b827a8113e26..ff01bed7112f 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -408,7 +408,7 @@ static void efifb_fixup_resources(struct pci_dev *dev) if (!base) return; - for (i = 0; i < PCI_STD_RESOURCE_END; i++) { + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { struct resource *res = &dev->resource[i]; if (!(res->flags & IORESOURCE_MEM)) |