diff options
Diffstat (limited to 'drivers')
539 files changed, 15260 insertions, 3909 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index e07782b1fbb6..eaea733b368a 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -73,6 +73,7 @@ module_param(device_id_scheme, bool, 0444); static int only_lcd = -1; module_param(only_lcd, int, 0444); +static bool may_report_brightness_keys; static int register_count; static DEFINE_MUTEX(register_count_mutex); static DEFINE_MUTEX(video_list_lock); @@ -1222,6 +1223,9 @@ acpi_video_bus_get_one_device(struct acpi_device *device, acpi_video_device_bind(video, data); acpi_video_device_find_cap(data); + if (data->cap._BCM && data->cap._BCL) + may_report_brightness_keys = true; + mutex_lock(&video->device_list_lock); list_add_tail(&data->entry, &video->video_device_list); mutex_unlock(&video->device_list_lock); @@ -1689,6 +1693,9 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) break; } + if (keycode) + may_report_brightness_keys = true; + acpi_notifier_call_chain(device, event, 0); if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) { @@ -2249,6 +2256,7 @@ void acpi_video_unregister(void) if (register_count) { acpi_bus_unregister_driver(&acpi_video_bus); register_count = 0; + may_report_brightness_keys = false; } mutex_unlock(®ister_count_mutex); } @@ -2270,13 +2278,7 @@ void acpi_video_unregister_backlight(void) bool acpi_video_handles_brightness_key_presses(void) { - bool have_video_busses; - - mutex_lock(&video_list_lock); - have_video_busses = !list_empty(&video_bus_head); - mutex_unlock(&video_list_lock); - - return have_video_busses && + return may_report_brightness_keys && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS); } EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 86fa61a21826..0dbb39ef4179 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(osc_cpc_flexible_adr_space_confirmed); bool osc_sb_native_usb4_support_confirmed; EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed); -bool osc_sb_cppc_not_supported; +bool osc_sb_cppc2_support_acked; static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; static void acpi_bus_osc_negotiate_platform_control(void) @@ -358,11 +358,6 @@ static void acpi_bus_osc_negotiate_platform_control(void) return; } -#ifdef CONFIG_ACPI_CPPC_LIB - osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] & - (OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT)); -#endif - /* * Now run _OSC again with query flag clear and with the caps * supported by both the OS and the platform. @@ -376,6 +371,10 @@ static void acpi_bus_osc_negotiate_platform_control(void) capbuf_ret = context.ret.pointer; if (context.ret.length > OSC_SUPPORT_DWORD) { +#ifdef CONFIG_ACPI_CPPC_LIB + osc_sb_cppc2_support_acked = capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPCV2_SUPPORT; +#endif + osc_sb_apei_support_acked = capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; osc_pc_lpi_support_confirmed = @@ -1145,6 +1144,9 @@ static int __init acpi_bus_init_irq(void) case ACPI_IRQ_MODEL_PLATFORM: message = "platform specific model"; break; + case ACPI_IRQ_MODEL_LPIC: + message = "LPIC"; + break; default: pr_info("Unknown interrupt routing model\n"); return -ENODEV; diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 903528f7e187..3c6d4ef87be0 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -578,6 +578,19 @@ bool __weak cpc_ffh_supported(void) } /** + * cpc_supported_by_cpu() - check if CPPC is supported by CPU + * + * Check if the architectural support for CPPC is present even + * if the _OSC hasn't prescribed it + * + * Return: true for supported, false for not supported + */ +bool __weak cpc_supported_by_cpu(void) +{ + return false; +} + +/** * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace * * Check and allocate the cppc_pcc_data memory. @@ -684,8 +697,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) acpi_status status; int ret = -ENODATA; - if (osc_sb_cppc_not_supported) - return -ENODEV; + if (!osc_sb_cppc2_support_acked) { + pr_debug("CPPC v2 _OSC not acked\n"); + if (!cpc_supported_by_cpu()) + return -ENODEV; + } /* Parse the ACPI _CPC table for this CPU. */ status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, @@ -766,7 +782,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) if (!osc_cpc_flexible_adr_space_confirmed) { pr_debug("Flexible address space capability not supported\n"); - goto out_free; + if (!cpc_supported_by_cpu()) + goto out_free; } addr = ioremap(gas_t->address, gas_t->bit_width/8); @@ -793,7 +810,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) } if (!osc_cpc_flexible_adr_space_confirmed) { pr_debug("Flexible address space capability not supported\n"); - goto out_free; + if (!cpc_supported_by_cpu()) + goto out_free; } } else { if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index c68e694fca26..dabe45eba055 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -12,7 +12,8 @@ enum acpi_irq_model_id acpi_irq_model; -static struct fwnode_handle *acpi_gsi_domain_id; +static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi); +static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi); /** * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI @@ -26,14 +27,18 @@ static struct fwnode_handle *acpi_gsi_domain_id; */ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) { - struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, - DOMAIN_BUS_ANY); + struct irq_domain *d; + d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi), + DOMAIN_BUS_ANY); *irq = irq_find_mapping(d, gsi); /* - * *irq == 0 means no mapping, that should - * be reported as a failure + * *irq == 0 means no mapping, that should be reported as a + * failure, unless there is an arch-specific fallback handler. */ + if (!*irq && acpi_gsi_to_irq_fallback) + *irq = acpi_gsi_to_irq_fallback(gsi); + return (*irq > 0) ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); @@ -53,12 +58,12 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, { struct irq_fwspec fwspec; - if (WARN_ON(!acpi_gsi_domain_id)) { + fwspec.fwnode = acpi_get_gsi_domain_id(gsi); + if (WARN_ON(!fwspec.fwnode)) { pr_warn("GSI: No registered irqchip, giving up\n"); return -EINVAL; } - fwspec.fwnode = acpi_gsi_domain_id; fwspec.param[0] = gsi; fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); fwspec.param_count = 2; @@ -73,13 +78,14 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi); */ void acpi_unregister_gsi(u32 gsi) { - struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, - DOMAIN_BUS_ANY); + struct irq_domain *d; int irq; if (WARN_ON(acpi_irq_model == ACPI_IRQ_MODEL_GIC && gsi < 16)) return; + d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi), + DOMAIN_BUS_ANY); irq = irq_find_mapping(d, gsi); irq_dispose_mapping(irq); } @@ -97,7 +103,8 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi); * The referenced device fwhandle or NULL on failure */ static struct fwnode_handle * -acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) +acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source, + u32 gsi) { struct fwnode_handle *result; struct acpi_device *device; @@ -105,7 +112,7 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) acpi_status status; if (!source->string_length) - return acpi_gsi_domain_id; + return acpi_get_gsi_domain_id(gsi); status = acpi_get_handle(NULL, source->string_ptr, &handle); if (WARN_ON(ACPI_FAILURE(status))) @@ -194,7 +201,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, ctx->index -= irq->interrupt_count; return AE_OK; } - fwnode = acpi_gsi_domain_id; + fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]); acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index], irq->triggering, irq->polarity, irq->shareable, ctx); @@ -207,7 +214,8 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, ctx->index -= eirq->interrupt_count; return AE_OK; } - fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source); + fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source, + eirq->interrupts[ctx->index]); acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index], eirq->triggering, eirq->polarity, eirq->shareable, ctx); @@ -291,10 +299,20 @@ EXPORT_SYMBOL_GPL(acpi_irq_get); * GSI interrupts */ void __init acpi_set_irq_model(enum acpi_irq_model_id model, - struct fwnode_handle *fwnode) + struct fwnode_handle *(*fn)(u32)) { acpi_irq_model = model; - acpi_gsi_domain_id = fwnode; + acpi_get_gsi_domain_id = fn; +} + +/** + * acpi_set_gsi_to_irq_fallback - Register a GSI transfer + * callback to fallback to arch specified implementation. + * @fn: arch-specific fallback handler + */ +void __init acpi_set_gsi_to_irq_fallback(u32 (*fn)(u32)) +{ + acpi_gsi_to_irq_fallback = fn; } /** @@ -312,8 +330,14 @@ struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, const struct irq_domain_ops *ops, void *host_data) { - struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, - DOMAIN_BUS_ANY); + struct irq_domain *d; + + /* This only works for the GIC model... */ + if (acpi_irq_model != ACPI_IRQ_MODEL_GIC) + return NULL; + + d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(0), + DOMAIN_BUS_ANY); if (!d) return NULL; diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 0e3ed5eb367b..0cb20324da16 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -493,13 +493,8 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) goto skip_probe; ret = amba_read_periphid(dev); - if (ret) { - if (ret != -EPROBE_DEFER) { - amba_device_put(dev); - goto err_out; - } + if (ret) goto err_release; - } skip_probe: ret = device_add(&dev->dev); @@ -546,6 +541,7 @@ static int amba_deferred_retry(void) continue; list_del_init(&ddev->node); + amba_device_put(ddev->dev); kfree(ddev); } diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index bb45a9c00514..1c9f4fb2595d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -148,7 +148,7 @@ config SATA_AHCI_PLATFORM config AHCI_BRCM tristate "Broadcom AHCI SATA support" depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \ - ARCH_BCM_63XX || COMPILE_TEST + ARCH_BCMBCA || COMPILE_TEST select SATA_HOST help This option enables support for the AHCI SATA3 controller found on diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index 6725931f3c35..c2c3238ff84b 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c @@ -90,7 +90,7 @@ static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev) static const u16 pio_cmd_timings[5] = { 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131 }; - u32 reg, dummy; + u32 reg, __maybe_unused dummy; struct ata_device *pair = ata_dev_pair(adev); int mode = adev->pio_mode - XFER_PIO_0; @@ -129,7 +129,7 @@ static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev) static const u32 mwdma_timings[3] = { 0x7F0FFFF3, 0x7F035352, 0x7F024241 }; - u32 reg, dummy; + u32 reg, __maybe_unused dummy; int mode = adev->dma_mode; rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy); diff --git a/drivers/base/core.c b/drivers/base/core.c index 7cd789c4985d..460d6f163e41 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -486,7 +486,18 @@ static void device_link_release_fn(struct work_struct *work) /* Ensure that all references to the link object have been dropped. */ device_link_synchronize_removal(); - pm_runtime_release_supplier(link, true); + pm_runtime_release_supplier(link); + /* + * If supplier_preactivated is set, the link has been dropped between + * the pm_runtime_get_suppliers() and pm_runtime_put_suppliers() calls + * in __driver_probe_device(). In that case, drop the supplier's + * PM-runtime usage counter to remove the reference taken by + * pm_runtime_get_suppliers(). + */ + if (link->supplier_preactivated) + pm_runtime_put_noidle(link->supplier); + + pm_request_idle(link->supplier); put_device(link->consumer); put_device(link->supplier); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index a97776ea9d99..4c98849577d4 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -570,6 +570,12 @@ ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_retbleed(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -580,6 +586,7 @@ static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); +static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -592,6 +599,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_itlb_multihit.attr, &dev_attr_srbds.attr, &dev_attr_mmio_stale_data.attr, + &dev_attr_retbleed.attr, NULL }; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 676dc72d912d..949907e2e242 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -308,13 +308,10 @@ static int rpm_get_suppliers(struct device *dev) /** * pm_runtime_release_supplier - Drop references to device link's supplier. * @link: Target device link. - * @check_idle: Whether or not to check if the supplier device is idle. * - * Drop all runtime PM references associated with @link to its supplier device - * and if @check_idle is set, check if that device is idle (and so it can be - * suspended). + * Drop all runtime PM references associated with @link to its supplier device. */ -void pm_runtime_release_supplier(struct device_link *link, bool check_idle) +void pm_runtime_release_supplier(struct device_link *link) { struct device *supplier = link->supplier; @@ -327,9 +324,6 @@ void pm_runtime_release_supplier(struct device_link *link, bool check_idle) while (refcount_dec_not_one(&link->rpm_active) && atomic_read(&supplier->power.usage_count) > 0) pm_runtime_put_noidle(supplier); - - if (check_idle) - pm_request_idle(supplier); } static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) @@ -337,8 +331,11 @@ static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) struct device_link *link; list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, - device_links_read_lock_held()) - pm_runtime_release_supplier(link, try_to_suspend); + device_links_read_lock_held()) { + pm_runtime_release_supplier(link); + if (try_to_suspend) + pm_request_idle(link->supplier); + } } static void rpm_put_suppliers(struct device *dev) @@ -1771,7 +1768,6 @@ void pm_runtime_get_suppliers(struct device *dev) if (link->flags & DL_FLAG_PM_RUNTIME) { link->supplier_preactivated = true; pm_runtime_get_sync(link->supplier); - refcount_inc(&link->rpm_active); } device_links_read_unlock(idx); @@ -1791,19 +1787,8 @@ void pm_runtime_put_suppliers(struct device *dev) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) if (link->supplier_preactivated) { - bool put; - link->supplier_preactivated = false; - - spin_lock_irq(&dev->power.lock); - - put = pm_runtime_status_suspended(dev) && - refcount_dec_not_one(&link->rpm_active); - - spin_unlock_irq(&dev->power.lock); - - if (put) - pm_runtime_put(link->supplier); + pm_runtime_put(link->supplier); } device_links_read_unlock(idx); @@ -1838,7 +1823,8 @@ void pm_runtime_drop_link(struct device_link *link) return; pm_runtime_drop_link_count(link->consumer); - pm_runtime_release_supplier(link, true); + pm_runtime_release_supplier(link); + pm_request_idle(link->supplier); } static bool pm_runtime_need_not_resume(struct device *dev) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 33f04ef78984..3646c0cae672 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -152,6 +152,10 @@ static unsigned int xen_blkif_max_ring_order; module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); +static bool __read_mostly xen_blkif_trusted = true; +module_param_named(trusted, xen_blkif_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define BLK_RING_SIZE(info) \ __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages) @@ -210,6 +214,7 @@ struct blkfront_info unsigned int feature_discard:1; unsigned int feature_secdiscard:1; unsigned int feature_persistent:1; + unsigned int bounce:1; unsigned int discard_granularity; unsigned int discard_alignment; /* Number of 4KB segments handled */ @@ -310,8 +315,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) if (!gnt_list_entry) goto out_of_memory; - if (info->feature_persistent) { - granted_page = alloc_page(GFP_NOIO); + if (info->bounce) { + granted_page = alloc_page(GFP_NOIO | __GFP_ZERO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; @@ -330,7 +335,7 @@ out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &rinfo->grants, node) { list_del(&gnt_list_entry->node); - if (info->feature_persistent) + if (info->bounce) __free_page(gnt_list_entry->page); kfree(gnt_list_entry); i--; @@ -376,7 +381,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (info->feature_persistent) + if (info->bounce) grant_foreign_access(gnt_list_entry, info); else { /* Grant access to the GFN passed by the caller */ @@ -400,7 +405,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (!info->feature_persistent) { + if (!info->bounce) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ @@ -703,7 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri .grant_idx = 0, .segments = NULL, .rinfo = rinfo, - .need_copy = rq_data_dir(req) && info->feature_persistent, + .need_copy = rq_data_dir(req) && info->bounce, }; /* @@ -981,11 +986,12 @@ static void xlvbd_flush(struct blkfront_info *info) { blk_queue_write_cache(info->rq, info->feature_flush ? true : false, info->feature_fua ? true : false); - pr_info("blkfront: %s: %s %s %s %s %s\n", + pr_info("blkfront: %s: %s %s %s %s %s %s %s\n", info->gd->disk_name, flush_info(info), "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", - info->max_indirect_segments ? "enabled;" : "disabled;"); + info->max_indirect_segments ? "enabled;" : "disabled;", + "bounce buffer:", info->bounce ? "enabled" : "disabled;"); } static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) @@ -1207,7 +1213,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) if (!list_empty(&rinfo->indirect_pages)) { struct page *indirect_page, *n; - BUG_ON(info->feature_persistent); + BUG_ON(info->bounce); list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); @@ -1224,7 +1230,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) NULL); rinfo->persistent_gnts_c--; } - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1245,7 +1251,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) for (j = 0; j < segs; j++) { persistent_gnt = rinfo->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, NULL); - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1428,7 +1434,7 @@ static int blkif_completion(unsigned long *id, data.s = s; num_sg = s->num_sg; - if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { + if (bret->operation == BLKIF_OP_READ && info->bounce) { for_each_sg(s->sg, sg, num_sg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE); @@ -1487,7 +1493,7 @@ static int blkif_completion(unsigned long *id, * Add the used indirect page back to the list of * available pages for indirect grefs. */ - if (!info->feature_persistent) { + if (!info->bounce) { indirect_page = s->indirect_grants[i]->page; list_add(&indirect_page->lru, &rinfo->indirect_pages); } @@ -1764,6 +1770,10 @@ static int talk_to_blkback(struct xenbus_device *dev, if (!info) return -ENODEV; + /* Check if backend is trusted. */ + info->bounce = !xen_blkif_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, "max-ring-page-order", 0); ring_page_order = min(xen_blkif_max_ring_order, max_page_order); @@ -2173,17 +2183,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) if (err) goto out_of_memory; - if (!info->feature_persistent && info->max_indirect_segments) { + if (!info->bounce && info->max_indirect_segments) { /* - * We are using indirect descriptors but not persistent - * grants, we need to allocate a set of pages that can be + * We are using indirect descriptors but don't have a bounce + * buffer, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info); BUG_ON(!list_empty(&rinfo->indirect_pages)); for (i = 0; i < num; i++) { - struct page *indirect_page = alloc_page(GFP_KERNEL); + struct page *indirect_page = alloc_page(GFP_KERNEL | + __GFP_ZERO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &rinfo->indirect_pages); @@ -2276,6 +2287,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) info->feature_persistent = !!xenbus_read_unsigned(info->xbdev->otherend, "feature-persistent", 0); + if (info->feature_persistent) + info->bounce = true; indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, "feature-max-indirect-segments", 0); @@ -2547,6 +2560,13 @@ static void blkfront_delay_work(struct work_struct *work) struct blkfront_info *info; bool need_schedule_work = false; + /* + * Note that when using bounce buffers but not persistent grants + * there's no need to run blkfront_delay_work because grants are + * revoked in blkif_completion or else an error is reported and the + * connection is closed. + */ + mutex_lock(&blkfront_mutex); list_for_each_entry(info, &info_list, info_list) { diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index b3f2d55dc551..3da8e85f8aae 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -87,7 +87,7 @@ config HW_RANDOM_BA431 config HW_RANDOM_BCM2835 tristate "Broadcom BCM2835/BCM63xx Random Number Generator support" depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \ - ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST + ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/random.c b/drivers/char/random.c index e3dd1dd3dd22..a1af90bacc9f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1174,7 +1174,7 @@ static void __cold entropy_timer(struct timer_list *timer) */ static void __cold try_to_generate_entropy(void) { - enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 }; + enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 30 }; struct entropy_timer_state stack; unsigned int i, num_different = 0; unsigned long last = random_get_entropy(); diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig index ec738f74a026..77266afb1c79 100644 --- a/drivers/clk/bcm/Kconfig +++ b/drivers/clk/bcm/Kconfig @@ -22,9 +22,9 @@ config CLK_BCM2835 config CLK_BCM_63XX bool "Broadcom BCM63xx clock support" - depends on ARCH_BCM_63XX || COMPILE_TEST + depends on ARCH_BCMBCA || COMPILE_TEST select COMMON_CLK_IPROC - default ARCH_BCM_63XX + default ARCH_BCMBCA help Enable common clock framework support for Broadcom BCM63xx DSL SoCs based on the ARM architecture diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c index d1535ac13e89..81cb90955d68 100644 --- a/drivers/clk/clk-lan966x.c +++ b/drivers/clk/clk-lan966x.c @@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev, hw_data->hws[i] = devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name, - "lan966x", 0, base, + "lan966x", 0, gate_base, clk_gate_desc[idx].bit_idx, 0, &clk_gate_lock); diff --git a/drivers/clk/stm32/reset-stm32.c b/drivers/clk/stm32/reset-stm32.c index 040870130e4b..e89381528af9 100644 --- a/drivers/clk/stm32/reset-stm32.c +++ b/drivers/clk/stm32/reset-stm32.c @@ -111,6 +111,7 @@ int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match, if (!reset_data) return -ENOMEM; + spin_lock_init(&reset_data->lock); reset_data->membase = base; reset_data->rcdev.owner = THIS_MODULE; reset_data->rcdev.ops = &stm32_reset_ops; diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 29a8c710ae06..b7962e5149a5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -138,6 +138,7 @@ static struct ccu_common *sun50i_h6_r_ccu_clks[] = { &r_apb2_rsb_clk.common, &r_apb1_ir_clk.common, &r_apb1_w1_clk.common, + &r_apb1_rtc_clk.common, &ir_clk.common, &w1_clk.common, }; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 3c0ee102fe73..4f2bb7315b67 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -22,7 +22,7 @@ config CLKEVT_I8253 config I8253_LOCK bool -config OMAP_DM_TIMER +config OMAP_DM_SYSTIMER bool select TIMER_OF @@ -56,6 +56,13 @@ config DIGICOLOR_TIMER help Enables the support for the digicolor timer driver. +config OMAP_DM_TIMER + bool "OMAP dual-mode timer driver" if ARCH_K3 || COMPILE_TEST + default y if ARCH_K3 + select TIMER_OF + help + Enables the support for the TI dual-mode timer driver. + config DW_APB_TIMER bool "DW APB timer driver" if COMPILE_TEST help @@ -150,6 +157,14 @@ config TEGRA_TIMER help Enables support for the Tegra driver. +config TEGRA186_TIMER + bool "NVIDIA Tegra186 timer driver" + depends on ARCH_TEGRA || COMPILE_TEST + depends on WATCHDOG && WATCHDOG_CORE + help + Enables support for the timers and watchdogs found on NVIDIA + Tegra186 and later SoCs. + config VT8500_TIMER bool "VT8500 timer driver" if COMPILE_TEST depends on HAS_IOMEM @@ -367,7 +382,7 @@ config ARM_GT_INITIAL_PRESCALER_VAL depends on ARM_GLOBAL_TIMER help When the ARM global timer initializes, its current rate is declared - to the kernel and maintained forever. Should it's parent clock + to the kernel and maintained forever. Should its parent clock change, the driver tries to fix the timer's internal prescaler. On some machs (i.e. Zynq) the initial prescaler value thus poses bounds about how much the parent clock is allowed to decrease or diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 6ca640019e10..64ab547de97b 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_CLKSRC_MMIO) += mmio.o obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o -obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm-systimer.o +obj-$(CONFIG_OMAP_DM_SYSTIMER) += timer-ti-dm-systimer.o obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o @@ -36,6 +36,7 @@ obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o obj-$(CONFIG_MESON6_TIMER) += timer-meson6.o obj-$(CONFIG_TEGRA_TIMER) += timer-tegra.o +obj-$(CONFIG_TEGRA186_TIMER) += timer-tegra186.o obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index dd0956ad969c..64dcb082d4cf 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -981,6 +981,14 @@ static const struct of_device_id sh_cmt_of_table[] __maybe_unused = { .compatible = "renesas,rcar-gen3-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] }, + { + .compatible = "renesas,rcar-gen4-cmt0", + .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] + }, + { + .compatible = "renesas,rcar-gen4-cmt1", + .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] + }, { } }; MODULE_DEVICE_TABLE(of, sh_cmt_of_table); diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c index 7bcb4a3f26fb..d5b29fd03ca2 100644 --- a/drivers/clocksource/timer-mediatek.c +++ b/drivers/clocksource/timer-mediatek.c @@ -22,6 +22,19 @@ #define TIMER_SYNC_TICKS (3) +/* cpux mcusys wrapper */ +#define CPUX_CON_REG 0x0 +#define CPUX_IDX_REG 0x4 + +/* cpux */ +#define CPUX_IDX_GLOBAL_CTRL 0x0 + #define CPUX_ENABLE BIT(0) + #define CPUX_CLK_DIV_MASK GENMASK(10, 8) + #define CPUX_CLK_DIV1 BIT(8) + #define CPUX_CLK_DIV2 BIT(9) + #define CPUX_CLK_DIV4 BIT(10) +#define CPUX_IDX_GLOBAL_IRQ 0x30 + /* gpt */ #define GPT_IRQ_EN_REG 0x00 #define GPT_IRQ_ENABLE(val) BIT((val) - 1) @@ -72,6 +85,52 @@ static void __iomem *gpt_sched_reg __read_mostly; +static u32 mtk_cpux_readl(u32 reg_idx, struct timer_of *to) +{ + writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG); + return readl(timer_of_base(to) + CPUX_CON_REG); +} + +static void mtk_cpux_writel(u32 val, u32 reg_idx, struct timer_of *to) +{ + writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG); + writel(val, timer_of_base(to) + CPUX_CON_REG); +} + +static void mtk_cpux_set_irq(struct timer_of *to, bool enable) +{ + const unsigned long *irq_mask = cpumask_bits(cpu_possible_mask); + u32 val; + + val = mtk_cpux_readl(CPUX_IDX_GLOBAL_IRQ, to); + + if (enable) + val |= *irq_mask; + else + val &= ~(*irq_mask); + + mtk_cpux_writel(val, CPUX_IDX_GLOBAL_IRQ, to); +} + +static int mtk_cpux_clkevt_shutdown(struct clock_event_device *clkevt) +{ + /* Clear any irq */ + mtk_cpux_set_irq(to_timer_of(clkevt), false); + + /* + * Disabling CPUXGPT timer will crash the platform, especially + * if Trusted Firmware is using it (usually, for sleep states), + * so we only mask the IRQ and call it a day. + */ + return 0; +} + +static int mtk_cpux_clkevt_resume(struct clock_event_device *clkevt) +{ + mtk_cpux_set_irq(to_timer_of(clkevt), true); + return 0; +} + static void mtk_syst_ack_irq(struct timer_of *to) { /* Clear and disable interrupt */ @@ -281,6 +340,60 @@ static struct timer_of to = { }, }; +static int __init mtk_cpux_init(struct device_node *node) +{ + static struct timer_of to_cpux; + u32 freq, val; + int ret; + + /* + * There are per-cpu interrupts for the CPUX General Purpose Timer + * but since this timer feeds the AArch64 System Timer we can rely + * on the CPU timer PPIs as well, so we don't declare TIMER_OF_IRQ. + */ + to_cpux.flags = TIMER_OF_BASE | TIMER_OF_CLOCK; + to_cpux.clkevt.name = "mtk-cpuxgpt"; + to_cpux.clkevt.rating = 10; + to_cpux.clkevt.cpumask = cpu_possible_mask; + to_cpux.clkevt.set_state_shutdown = mtk_cpux_clkevt_shutdown; + to_cpux.clkevt.tick_resume = mtk_cpux_clkevt_resume; + + /* If this fails, bad things are about to happen... */ + ret = timer_of_init(node, &to_cpux); + if (ret) { + WARN(1, "Cannot start CPUX timers.\n"); + return ret; + } + + /* + * Check if we're given a clock with the right frequency for this + * timer, otherwise warn but keep going with the setup anyway, as + * that makes it possible to still boot the kernel, even though + * it may not work correctly (random lockups, etc). + * The reason behind this is that having an early UART may not be + * possible for everyone and this gives a chance to retrieve kmsg + * for eventual debugging even on consumer devices. + */ + freq = timer_of_rate(&to_cpux); + if (freq > 13000000) + WARN(1, "Requested unsupported timer frequency %u\n", freq); + + /* Clock input is 26MHz, set DIV2 to achieve 13MHz clock */ + val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux); + val &= ~CPUX_CLK_DIV_MASK; + val |= CPUX_CLK_DIV2; + mtk_cpux_writel(val, CPUX_IDX_GLOBAL_CTRL, &to_cpux); + + /* Enable all CPUXGPT timers */ + val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to_cpux); + mtk_cpux_writel(val | CPUX_ENABLE, CPUX_IDX_GLOBAL_CTRL, &to_cpux); + + clockevents_config_and_register(&to_cpux.clkevt, timer_of_rate(&to_cpux), + TIMER_SYNC_TICKS, 0xffffffff); + + return 0; +} + static int __init mtk_syst_init(struct device_node *node) { int ret; @@ -339,3 +452,4 @@ static int __init mtk_gpt_init(struct device_node *node) } TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init); TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init); +TIMER_OF_DECLARE(mtk_mt6795, "mediatek,mt6795-systimer", mtk_cpux_init); diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c index abce83d2f00b..d5f1436f33d9 100644 --- a/drivers/clocksource/timer-microchip-pit64b.c +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -61,7 +61,7 @@ struct mchp_pit64b_timer { }; /** - * mchp_pit64b_clkevt - PIT64B clockevent data structure + * struct mchp_pit64b_clkevt - PIT64B clockevent data structure * @timer: PIT64B timer * @clkevt: clockevent */ @@ -75,7 +75,7 @@ struct mchp_pit64b_clkevt { struct mchp_pit64b_clkevt, clkevt)) /** - * mchp_pit64b_clksrc - PIT64B clocksource data structure + * struct mchp_pit64b_clksrc - PIT64B clocksource data structure * @timer: PIT64B timer * @clksrc: clocksource */ @@ -173,7 +173,8 @@ static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev) { struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); - writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR); + if (!clockevent_state_detached(cedev)) + mchp_pit64b_suspend(timer); return 0; } @@ -182,35 +183,37 @@ static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev) { struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); + if (clockevent_state_shutdown(cedev)) + mchp_pit64b_resume(timer); + mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT, MCHP_PIT64B_IER_PERIOD); return 0; } -static int mchp_pit64b_clkevt_set_next_event(unsigned long evt, - struct clock_event_device *cedev) +static int mchp_pit64b_clkevt_set_oneshot(struct clock_event_device *cedev) { struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); - mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT, + if (clockevent_state_shutdown(cedev)) + mchp_pit64b_resume(timer); + + mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_ONE_SHOT, MCHP_PIT64B_IER_PERIOD); return 0; } -static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev) +static int mchp_pit64b_clkevt_set_next_event(unsigned long evt, + struct clock_event_device *cedev) { struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); - mchp_pit64b_suspend(timer); -} - -static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev) -{ - struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev); + mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT, + MCHP_PIT64B_IER_PERIOD); - mchp_pit64b_resume(timer); + return 0; } static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id) @@ -242,8 +245,10 @@ static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate, } /** - * mchp_pit64b_init_mode - prepare PIT64B mode register value to be used at - * runtime; this includes prescaler and SGCLK bit + * mchp_pit64b_init_mode() - prepare PIT64B mode register value to be used at + * runtime; this includes prescaler and SGCLK bit + * @timer: pointer to pit64b timer to init + * @max_rate: maximum rate that timer's clock could use * * PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to * be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate @@ -341,6 +346,7 @@ static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer, if (!cs) return -ENOMEM; + mchp_pit64b_resume(timer); mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0); mchp_pit64b_cs_base = timer->base; @@ -362,8 +368,7 @@ static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer, pr_debug("clksrc: Failed to register PIT64B clocksource!\n"); /* Stop timer. */ - writel_relaxed(MCHP_PIT64B_CR_SWRST, - timer->base + MCHP_PIT64B_CR); + mchp_pit64b_suspend(timer); kfree(cs); return ret; @@ -395,9 +400,8 @@ static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer, ce->clkevt.rating = 150; ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown; ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic; + ce->clkevt.set_state_oneshot = mchp_pit64b_clkevt_set_oneshot; ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event; - ce->clkevt.suspend = mchp_pit64b_clkevt_suspend; - ce->clkevt.resume = mchp_pit64b_clkevt_resume; ce->clkevt.cpumask = cpumask_of(0); ce->clkevt.irq = irq; @@ -448,19 +452,10 @@ static int __init mchp_pit64b_dt_init_timer(struct device_node *node, if (ret) goto irq_unmap; - ret = clk_prepare_enable(timer.pclk); - if (ret) - goto irq_unmap; - - if (timer.mode & MCHP_PIT64B_MR_SGCLK) { - ret = clk_prepare_enable(timer.gclk); - if (ret) - goto pclk_unprepare; - + if (timer.mode & MCHP_PIT64B_MR_SGCLK) clk_rate = clk_get_rate(timer.gclk); - } else { + else clk_rate = clk_get_rate(timer.pclk); - } clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1); if (clkevt) @@ -469,15 +464,10 @@ static int __init mchp_pit64b_dt_init_timer(struct device_node *node, ret = mchp_pit64b_init_clksrc(&timer, clk_rate); if (ret) - goto gclk_unprepare; + goto irq_unmap; return 0; -gclk_unprepare: - if (timer.mode & MCHP_PIT64B_MR_SGCLK) - clk_disable_unprepare(timer.gclk); -pclk_unprepare: - clk_disable_unprepare(timer.pclk); irq_unmap: irq_dispose_mapping(irq); io_unmap: diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c index bb6ea6c19829..94dc6e42e983 100644 --- a/drivers/clocksource/timer-sun4i.c +++ b/drivers/clocksource/timer-sun4i.c @@ -128,7 +128,7 @@ static void sun4i_timer_clear_interrupt(void __iomem *base) static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) { - struct clock_event_device *evt = (struct clock_event_device *)dev_id; + struct clock_event_device *evt = dev_id; struct timer_of *to = to_timer_of(evt); sun4i_timer_clear_interrupt(timer_of_base(to)); diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 85900f7fc69f..7d5fa9069906 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -142,7 +142,7 @@ static int sun5i_clkevt_next_event(unsigned long evt, static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) { - struct sun5i_timer_clkevt *ce = (struct sun5i_timer_clkevt *)dev_id; + struct sun5i_timer_clkevt *ce = dev_id; writel(0x1, ce->timer.base + TIMER_IRQ_ST_REG); ce->clkevt.event_handler(&ce->clkevt); diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c new file mode 100644 index 000000000000..ea742889ee06 --- /dev/null +++ b/drivers/clocksource/timer-tegra186.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020 NVIDIA Corporation. All rights reserved. + */ + +#include <linux/clocksource.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/watchdog.h> + +/* shared registers */ +#define TKETSC0 0x000 +#define TKETSC1 0x004 +#define TKEUSEC 0x008 +#define TKEOSC 0x00c + +#define TKEIE(x) (0x100 + ((x) * 4)) +#define TKEIE_WDT_MASK(x, y) ((y) << (16 + 4 * (x))) + +/* timer registers */ +#define TMRCR 0x000 +#define TMRCR_ENABLE BIT(31) +#define TMRCR_PERIODIC BIT(30) +#define TMRCR_PTV(x) ((x) & 0x0fffffff) + +#define TMRSR 0x004 +#define TMRSR_INTR_CLR BIT(30) + +#define TMRCSSR 0x008 +#define TMRCSSR_SRC_USEC (0 << 0) + +/* watchdog registers */ +#define WDTCR 0x000 +#define WDTCR_SYSTEM_POR_RESET_ENABLE BIT(16) +#define WDTCR_SYSTEM_DEBUG_RESET_ENABLE BIT(15) +#define WDTCR_REMOTE_INT_ENABLE BIT(14) +#define WDTCR_LOCAL_FIQ_ENABLE BIT(13) +#define WDTCR_LOCAL_INT_ENABLE BIT(12) +#define WDTCR_PERIOD_MASK (0xff << 4) +#define WDTCR_PERIOD(x) (((x) & 0xff) << 4) +#define WDTCR_TIMER_SOURCE_MASK 0xf +#define WDTCR_TIMER_SOURCE(x) ((x) & 0xf) + +#define WDTCMDR 0x008 +#define WDTCMDR_DISABLE_COUNTER BIT(1) +#define WDTCMDR_START_COUNTER BIT(0) + +#define WDTUR 0x00c +#define WDTUR_UNLOCK_PATTERN 0x0000c45a + +struct tegra186_timer_soc { + unsigned int num_timers; + unsigned int num_wdts; +}; + +struct tegra186_tmr { + struct tegra186_timer *parent; + void __iomem *regs; + unsigned int index; + unsigned int hwirq; +}; + +struct tegra186_wdt { + struct watchdog_device base; + + void __iomem *regs; + unsigned int index; + bool locked; + + struct tegra186_tmr *tmr; +}; + +static inline struct tegra186_wdt *to_tegra186_wdt(struct watchdog_device *wdd) +{ + return container_of(wdd, struct tegra186_wdt, base); +} + +struct tegra186_timer { + const struct tegra186_timer_soc *soc; + struct device *dev; + void __iomem *regs; + + struct tegra186_wdt *wdt; + struct clocksource usec; + struct clocksource tsc; + struct clocksource osc; +}; + +static void tmr_writel(struct tegra186_tmr *tmr, u32 value, unsigned int offset) +{ + writel_relaxed(value, tmr->regs + offset); +} + +static void wdt_writel(struct tegra186_wdt *wdt, u32 value, unsigned int offset) +{ + writel_relaxed(value, wdt->regs + offset); +} + +static u32 wdt_readl(struct tegra186_wdt *wdt, unsigned int offset) +{ + return readl_relaxed(wdt->regs + offset); +} + +static struct tegra186_tmr *tegra186_tmr_create(struct tegra186_timer *tegra, + unsigned int index) +{ + unsigned int offset = 0x10000 + index * 0x10000; + struct tegra186_tmr *tmr; + + tmr = devm_kzalloc(tegra->dev, sizeof(*tmr), GFP_KERNEL); + if (!tmr) + return ERR_PTR(-ENOMEM); + + tmr->parent = tegra; + tmr->regs = tegra->regs + offset; + tmr->index = index; + tmr->hwirq = 0; + + return tmr; +} + +static const struct watchdog_info tegra186_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, + .identity = "NVIDIA Tegra186 WDT", +}; + +static void tegra186_wdt_disable(struct tegra186_wdt *wdt) +{ + /* unlock and disable the watchdog */ + wdt_writel(wdt, WDTUR_UNLOCK_PATTERN, WDTUR); + wdt_writel(wdt, WDTCMDR_DISABLE_COUNTER, WDTCMDR); + + /* disable timer */ + tmr_writel(wdt->tmr, 0, TMRCR); +} + +static void tegra186_wdt_enable(struct tegra186_wdt *wdt) +{ + struct tegra186_timer *tegra = wdt->tmr->parent; + u32 value; + + /* unmask hardware IRQ, this may have been lost across powergate */ + value = TKEIE_WDT_MASK(wdt->index, 1); + writel(value, tegra->regs + TKEIE(wdt->tmr->hwirq)); + + /* clear interrupt */ + tmr_writel(wdt->tmr, TMRSR_INTR_CLR, TMRSR); + + /* select microsecond source */ + tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR); + + /* configure timer (system reset happens on the fifth expiration) */ + value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) | + TMRCR_PERIODIC | TMRCR_ENABLE; + tmr_writel(wdt->tmr, value, TMRCR); + + if (!wdt->locked) { + value = wdt_readl(wdt, WDTCR); + + /* select the proper timer source */ + value &= ~WDTCR_TIMER_SOURCE_MASK; + value |= WDTCR_TIMER_SOURCE(wdt->tmr->index); + + /* single timer period since that's already configured */ + value &= ~WDTCR_PERIOD_MASK; + value |= WDTCR_PERIOD(1); + + /* enable local interrupt for WDT petting */ + value |= WDTCR_LOCAL_INT_ENABLE; + + /* enable local FIQ and remote interrupt for debug dump */ + if (0) + value |= WDTCR_REMOTE_INT_ENABLE | + WDTCR_LOCAL_FIQ_ENABLE; + + /* enable system debug reset (doesn't properly reboot) */ + if (0) + value |= WDTCR_SYSTEM_DEBUG_RESET_ENABLE; + + /* enable system POR reset */ + value |= WDTCR_SYSTEM_POR_RESET_ENABLE; + + wdt_writel(wdt, value, WDTCR); + } + + wdt_writel(wdt, WDTCMDR_START_COUNTER, WDTCMDR); +} + +static int tegra186_wdt_start(struct watchdog_device *wdd) +{ + struct tegra186_wdt *wdt = to_tegra186_wdt(wdd); + + tegra186_wdt_enable(wdt); + + return 0; +} + +static int tegra186_wdt_stop(struct watchdog_device *wdd) +{ + struct tegra186_wdt *wdt = to_tegra186_wdt(wdd); + + tegra186_wdt_disable(wdt); + + return 0; +} + +static int tegra186_wdt_ping(struct watchdog_device *wdd) +{ + struct tegra186_wdt *wdt = to_tegra186_wdt(wdd); + + tegra186_wdt_disable(wdt); + tegra186_wdt_enable(wdt); + + return 0; +} + +static int tegra186_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + struct tegra186_wdt *wdt = to_tegra186_wdt(wdd); + + if (watchdog_active(&wdt->base)) + tegra186_wdt_disable(wdt); + + wdt->base.timeout = timeout; + + if (watchdog_active(&wdt->base)) + tegra186_wdt_enable(wdt); + + return 0; +} + +static const struct watchdog_ops tegra186_wdt_ops = { + .owner = THIS_MODULE, + .start = tegra186_wdt_start, + .stop = tegra186_wdt_stop, + .ping = tegra186_wdt_ping, + .set_timeout = tegra186_wdt_set_timeout, +}; + +static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra, + unsigned int index) +{ + unsigned int offset = 0x10000, source; + struct tegra186_wdt *wdt; + u32 value; + int err; + + offset += tegra->soc->num_timers * 0x10000 + index * 0x10000; + + wdt = devm_kzalloc(tegra->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return ERR_PTR(-ENOMEM); + + wdt->regs = tegra->regs + offset; + wdt->index = index; + + /* read the watchdog configuration since it might be locked down */ + value = wdt_readl(wdt, WDTCR); + + if (value & WDTCR_LOCAL_INT_ENABLE) + wdt->locked = true; + + source = value & WDTCR_TIMER_SOURCE_MASK; + + wdt->tmr = tegra186_tmr_create(tegra, source); + if (IS_ERR(wdt->tmr)) + return ERR_CAST(wdt->tmr); + + wdt->base.info = &tegra186_wdt_info; + wdt->base.ops = &tegra186_wdt_ops; + wdt->base.min_timeout = 1; + wdt->base.max_timeout = 255; + wdt->base.parent = tegra->dev; + + err = watchdog_init_timeout(&wdt->base, 5, tegra->dev); + if (err < 0) { + dev_err(tegra->dev, "failed to initialize timeout: %d\n", err); + return ERR_PTR(err); + } + + err = devm_watchdog_register_device(tegra->dev, &wdt->base); + if (err < 0) { + dev_err(tegra->dev, "failed to register WDT: %d\n", err); + return ERR_PTR(err); + } + + return wdt; +} + +static u64 tegra186_timer_tsc_read(struct clocksource *cs) +{ + struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer, + tsc); + u32 hi, lo, ss; + + hi = readl_relaxed(tegra->regs + TKETSC1); + + /* + * The 56-bit value of the TSC is spread across two registers that are + * not synchronized. In order to read them atomically, ensure that the + * high 24 bits match before and after reading the low 32 bits. + */ + do { + /* snapshot the high 24 bits */ + ss = hi; + + lo = readl_relaxed(tegra->regs + TKETSC0); + hi = readl_relaxed(tegra->regs + TKETSC1); + } while (hi != ss); + + return (u64)hi << 32 | lo; +} + +static int tegra186_timer_tsc_init(struct tegra186_timer *tegra) +{ + tegra->tsc.name = "tsc"; + tegra->tsc.rating = 300; + tegra->tsc.read = tegra186_timer_tsc_read; + tegra->tsc.mask = CLOCKSOURCE_MASK(56); + tegra->tsc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + + return clocksource_register_hz(&tegra->tsc, 31250000); +} + +static u64 tegra186_timer_osc_read(struct clocksource *cs) +{ + struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer, + osc); + + return readl_relaxed(tegra->regs + TKEOSC); +} + +static int tegra186_timer_osc_init(struct tegra186_timer *tegra) +{ + tegra->osc.name = "osc"; + tegra->osc.rating = 300; + tegra->osc.read = tegra186_timer_osc_read; + tegra->osc.mask = CLOCKSOURCE_MASK(32); + tegra->osc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + + return clocksource_register_hz(&tegra->osc, 38400000); +} + +static u64 tegra186_timer_usec_read(struct clocksource *cs) +{ + struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer, + usec); + + return readl_relaxed(tegra->regs + TKEUSEC); +} + +static int tegra186_timer_usec_init(struct tegra186_timer *tegra) +{ + tegra->usec.name = "usec"; + tegra->usec.rating = 300; + tegra->usec.read = tegra186_timer_usec_read; + tegra->usec.mask = CLOCKSOURCE_MASK(32); + tegra->usec.flags = CLOCK_SOURCE_IS_CONTINUOUS; + + return clocksource_register_hz(&tegra->usec, USEC_PER_SEC); +} + +static irqreturn_t tegra186_timer_irq(int irq, void *data) +{ + struct tegra186_timer *tegra = data; + + if (watchdog_active(&tegra->wdt->base)) { + tegra186_wdt_disable(tegra->wdt); + tegra186_wdt_enable(tegra->wdt); + } + + return IRQ_HANDLED; +} + +static int tegra186_timer_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct tegra186_timer *tegra; + unsigned int irq; + int err; + + tegra = devm_kzalloc(dev, sizeof(*tegra), GFP_KERNEL); + if (!tegra) + return -ENOMEM; + + tegra->soc = of_device_get_match_data(dev); + dev_set_drvdata(dev, tegra); + tegra->dev = dev; + + tegra->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(tegra->regs)) + return PTR_ERR(tegra->regs); + + err = platform_get_irq(pdev, 0); + if (err < 0) + return err; + + irq = err; + + /* create a watchdog using a preconfigured timer */ + tegra->wdt = tegra186_wdt_create(tegra, 0); + if (IS_ERR(tegra->wdt)) { + err = PTR_ERR(tegra->wdt); + dev_err(dev, "failed to create WDT: %d\n", err); + return err; + } + + err = tegra186_timer_tsc_init(tegra); + if (err < 0) { + dev_err(dev, "failed to register TSC counter: %d\n", err); + return err; + } + + err = tegra186_timer_osc_init(tegra); + if (err < 0) { + dev_err(dev, "failed to register OSC counter: %d\n", err); + goto unregister_tsc; + } + + err = tegra186_timer_usec_init(tegra); + if (err < 0) { + dev_err(dev, "failed to register USEC counter: %d\n", err); + goto unregister_osc; + } + + err = devm_request_irq(dev, irq, tegra186_timer_irq, 0, + "tegra186-timer", tegra); + if (err < 0) { + dev_err(dev, "failed to request IRQ#%u: %d\n", irq, err); + goto unregister_usec; + } + + return 0; + +unregister_usec: + clocksource_unregister(&tegra->usec); +unregister_osc: + clocksource_unregister(&tegra->osc); +unregister_tsc: + clocksource_unregister(&tegra->tsc); + return err; +} + +static int tegra186_timer_remove(struct platform_device *pdev) +{ + struct tegra186_timer *tegra = platform_get_drvdata(pdev); + + clocksource_unregister(&tegra->usec); + clocksource_unregister(&tegra->osc); + clocksource_unregister(&tegra->tsc); + + return 0; +} + +static int __maybe_unused tegra186_timer_suspend(struct device *dev) +{ + struct tegra186_timer *tegra = dev_get_drvdata(dev); + + if (watchdog_active(&tegra->wdt->base)) + tegra186_wdt_disable(tegra->wdt); + + return 0; +} + +static int __maybe_unused tegra186_timer_resume(struct device *dev) +{ + struct tegra186_timer *tegra = dev_get_drvdata(dev); + + if (watchdog_active(&tegra->wdt->base)) + tegra186_wdt_enable(tegra->wdt); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(tegra186_timer_pm_ops, tegra186_timer_suspend, + tegra186_timer_resume); + +static const struct tegra186_timer_soc tegra186_timer = { + .num_timers = 10, + .num_wdts = 3, +}; + +static const struct tegra186_timer_soc tegra234_timer = { + .num_timers = 16, + .num_wdts = 3, +}; + +static const struct of_device_id tegra186_timer_of_match[] = { + { .compatible = "nvidia,tegra186-timer", .data = &tegra186_timer }, + { .compatible = "nvidia,tegra234-timer", .data = &tegra234_timer }, + { } +}; +MODULE_DEVICE_TABLE(of, tegra186_timer_of_match); + +static struct platform_driver tegra186_wdt_driver = { + .driver = { + .name = "tegra186-timer", + .pm = &tegra186_timer_pm_ops, + .of_match_table = tegra186_timer_of_match, + }, + .probe = tegra186_timer_probe, + .remove = tegra186_timer_remove, +}; +module_platform_driver(tegra186_wdt_driver); + +MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); +MODULE_DESCRIPTION("NVIDIA Tegra186 timers driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index c194e8f74e1d..469f7c91564b 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -44,6 +44,121 @@ enum { REQUEST_BY_NODE, }; +static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg, + int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + return readl_relaxed(timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_write(struct omap_dm_timer *timer, + u32 reg, u32 val, int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + writel_relaxed(val, timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer) +{ + u32 tidr; + + /* Assume v1 ip if bits [31:16] are zero */ + tidr = readl_relaxed(timer->io_base); + if (!(tidr >> 16)) { + timer->revision = 1; + timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET; + timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET; + timer->func_base = timer->io_base; + } else { + timer->revision = 2; + timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS; + timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR; + timer->pend = timer->io_base + + _OMAP_TIMER_WRITE_PEND_OFFSET + + OMAP_TIMER_V2_FUNC_OFFSET; + timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET; + } +} + +/* + * __omap_dm_timer_enable_posted - enables write posted mode + * @timer: pointer to timer instance handle + * + * Enables the write posted mode for the timer. When posted mode is enabled + * writes to certain timer registers are immediately acknowledged by the + * internal bus and hence prevents stalling the CPU waiting for the write to + * complete. Enabling this feature can improve performance for writing to the + * timer registers. + */ +static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer) +{ + if (timer->posted) + return; + + if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) { + timer->posted = OMAP_TIMER_NONPOSTED; + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0); + return; + } + + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, + OMAP_TIMER_CTRL_POSTED, 0); + timer->context.tsicr = OMAP_TIMER_CTRL_POSTED; + timer->posted = OMAP_TIMER_POSTED; +} + +static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer, + int posted, unsigned long rate) +{ + u32 l; + + l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + if (l & OMAP_TIMER_CTRL_ST) { + l &= ~0x1; + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted); +#ifdef CONFIG_ARCH_OMAP2PLUS + /* Readback to make sure write has completed */ + __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + /* + * Wait for functional clock period x 3.5 to make sure that + * timer is stopped + */ + udelay(3500000 / rate + 1); +#endif + } + + /* Ack possibly pending interrupt */ + writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat); +} + +static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_ena); + __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0); +} + +static inline unsigned int +__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted) +{ + return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted); +} + +static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_stat); +} + /** * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode * @timer: timer pointer over which read operation to perform @@ -921,6 +1036,10 @@ static const struct dmtimer_platform_data omap3plus_pdata = { .timer_ops = &dmtimer_ops, }; +static const struct dmtimer_platform_data am6_pdata = { + .timer_ops = &dmtimer_ops, +}; + static const struct of_device_id omap_timer_match[] = { { .compatible = "ti,omap2420-timer", @@ -949,6 +1068,10 @@ static const struct of_device_id omap_timer_match[] = { .compatible = "ti,dm816-timer", .data = &omap3plus_pdata, }, + { + .compatible = "ti,am654-timer", + .data = &am6_pdata, + }, {}, }; MODULE_DEVICE_TABLE(of, omap_timer_match); diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 7be38bc6a673..9ac75c1cde9c 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -566,6 +566,28 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) return 0; } +static int amd_pstate_cpu_resume(struct cpufreq_policy *policy) +{ + int ret; + + ret = amd_pstate_enable(true); + if (ret) + pr_err("failed to enable amd-pstate during resume, return %d\n", ret); + + return ret; +} + +static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy) +{ + int ret; + + ret = amd_pstate_enable(false); + if (ret) + pr_err("failed to disable amd-pstate during suspend, return %d\n", ret); + + return ret; +} + /* Sysfs attributes */ /* @@ -636,6 +658,8 @@ static struct cpufreq_driver amd_pstate_driver = { .target = amd_pstate_target, .init = amd_pstate_cpu_init, .exit = amd_pstate_cpu_exit, + .suspend = amd_pstate_cpu_suspend, + .resume = amd_pstate_cpu_resume, .set_boost = amd_pstate_set_boost, .name = "amd-pstate", .attr = amd_pstate_attr, diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 96de1536e1cb..2c96de3f2d83 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -127,6 +127,7 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "mediatek,mt8173", }, { .compatible = "mediatek,mt8176", }, { .compatible = "mediatek,mt8183", }, + { .compatible = "mediatek,mt8186", }, { .compatible = "mediatek,mt8365", }, { .compatible = "mediatek,mt8516", }, diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 37a1eb20f5ba..76f6b3884e6b 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -439,9 +439,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) /* Both presence and absence of sram regulator are valid cases. */ info->sram_reg = regulator_get_optional(cpu_dev, "sram"); - if (IS_ERR(info->sram_reg)) + if (IS_ERR(info->sram_reg)) { + ret = PTR_ERR(info->sram_reg); + if (ret == -EPROBE_DEFER) + goto out_free_resources; + info->sram_reg = NULL; - else { + } else { ret = regulator_enable(info->sram_reg); if (ret) { dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu); diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index 20f64a8b0a35..4b8ee2014da6 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -470,6 +470,10 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) if (slew_done_gpio_np) slew_done_gpio = read_gpio(slew_done_gpio_np); + of_node_put(volt_gpio_np); + of_node_put(freq_gpio_np); + of_node_put(slew_done_gpio_np); + /* If we use the frequency GPIOs, calculate the min/max speeds based * on the bus frequencies */ diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 0253731d6d25..36c79580fba2 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -442,6 +442,9 @@ static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy) struct platform_device *pdev = cpufreq_get_driver_data(); int ret; + if (data->throttle_irq <= 0) + return 0; + ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus); if (ret) dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n", @@ -469,6 +472,9 @@ static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy) static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data) { + if (data->throttle_irq <= 0) + return; + free_irq(data->throttle_irq, data); } diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 6b6b20da2bcf..573b417e1483 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -275,6 +275,7 @@ static int qoriq_cpufreq_probe(struct platform_device *pdev) np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist); if (np) { + of_node_put(np); dev_info(&pdev->dev, "Disabling due to erratum A-008083"); return -ENODEV; } diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index be7f512109f7..747aa537389b 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -3,7 +3,8 @@ # ARM CPU Idle drivers # config ARM_CPUIDLE - bool "Generic ARM/ARM64 CPU idle Driver" + bool "Generic ARM CPU idle Driver" + depends on ARM select DT_IDLE_STATES select CPU_IDLE_MULTIPLE_DRIVERS help diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index ee99c02c84e8..3e6aa319920b 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -133,98 +133,6 @@ config CRYPTO_PAES_S390 Select this option if you want to use the paes cipher for example to use protected key encrypted devices. -config CRYPTO_SHA1_S390 - tristate "SHA1 digest algorithm" - depends on S390 - select CRYPTO_HASH - help - This is the s390 hardware accelerated implementation of the - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). - - It is available as of z990. - -config CRYPTO_SHA256_S390 - tristate "SHA256 digest algorithm" - depends on S390 - select CRYPTO_HASH - help - This is the s390 hardware accelerated implementation of the - SHA256 secure hash standard (DFIPS 180-2). - - It is available as of z9. - -config CRYPTO_SHA512_S390 - tristate "SHA384 and SHA512 digest algorithm" - depends on S390 - select CRYPTO_HASH - help - This is the s390 hardware accelerated implementation of the - SHA512 secure hash standard. - - It is available as of z10. - -config CRYPTO_SHA3_256_S390 - tristate "SHA3_224 and SHA3_256 digest algorithm" - depends on S390 - select CRYPTO_HASH - help - This is the s390 hardware accelerated implementation of the - SHA3_256 secure hash standard. - - It is available as of z14. - -config CRYPTO_SHA3_512_S390 - tristate "SHA3_384 and SHA3_512 digest algorithm" - depends on S390 - select CRYPTO_HASH - help - This is the s390 hardware accelerated implementation of the - SHA3_512 secure hash standard. - - It is available as of z14. - -config CRYPTO_DES_S390 - tristate "DES and Triple DES cipher algorithms" - depends on S390 - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - select CRYPTO_LIB_DES - help - This is the s390 hardware accelerated implementation of the - DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). - - As of z990 the ECB and CBC mode are hardware accelerated. - As of z196 the CTR mode is hardware accelerated. - -config CRYPTO_AES_S390 - tristate "AES cipher algorithms" - depends on S390 - select CRYPTO_ALGAPI - select CRYPTO_SKCIPHER - help - This is the s390 hardware accelerated implementation of the - AES cipher algorithms (FIPS-197). - - As of z9 the ECB and CBC modes are hardware accelerated - for 128 bit keys. - As of z10 the ECB and CBC modes are hardware accelerated - for all AES key sizes. - As of z196 the CTR mode is hardware accelerated for all AES - key sizes and XTS mode is hardware accelerated for 256 and - 512 bit keys. - -config CRYPTO_CHACHA_S390 - tristate "ChaCha20 stream cipher" - depends on S390 - select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CHACHA - help - This is the s390 SIMD implementation of the ChaCha20 stream - cipher (RFC 7539). - - It is available as of z13. - config S390_PRNG tristate "Pseudo random number generator device driver" depends on S390 @@ -238,29 +146,6 @@ config S390_PRNG It is available as of z9. -config CRYPTO_GHASH_S390 - tristate "GHASH hash function" - depends on S390 - select CRYPTO_HASH - help - This is the s390 hardware accelerated implementation of GHASH, - the hash function used in GCM (Galois/Counter mode). - - It is available as of z196. - -config CRYPTO_CRC32_S390 - tristate "CRC-32 algorithms" - depends on S390 - select CRYPTO_HASH - select CRC32 - help - Select this option if you want to use hardware accelerated - implementations of CRC algorithms. With this option, you - can optimize the computation of CRC-32 (IEEE 802.3 Ethernet) - and CRC-32C (Castagnoli). - - It is available with IBM z13 or later. - config CRYPTO_DEV_NIAGARA2 tristate "Niagara2 Stream Processing Unit driver" select CRYPTO_LIB_DES diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index 9dba52fbee99..7d79a8744f9a 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -85,17 +85,9 @@ static int sp_get_irqs(struct sp_device *sp) struct sp_platform *sp_platform = sp->dev_specific; struct device *dev = sp->dev; struct platform_device *pdev = to_platform_device(dev); - unsigned int i, count; int ret; - for (i = 0, count = 0; i < pdev->num_resources; i++) { - struct resource *res = &pdev->resource[i]; - - if (resource_type(res) == IORESOURCE_IRQ) - count++; - } - - sp_platform->irq_count = count; + sp_platform->irq_count = platform_irq_count(pdev); ret = platform_get_irq(pdev, 0); if (ret < 0) { @@ -104,7 +96,7 @@ static int sp_get_irqs(struct sp_device *sp) } sp->psp_irq = ret; - if (count == 1) { + if (sp_platform->irq_count == 1) { sp->ccp_irq = ret; } else { ret = platform_get_irq(pdev, 1); diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 0e89a7a932d4..bfc8ee876278 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -197,7 +197,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, else cxld->target_type = CXL_DECODER_ACCELERATOR; - if (is_cxl_endpoint(to_cxl_port(cxld->dev.parent))) + if (is_endpoint_decoder(&cxld->dev)) return 0; target_list.value = diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 54f434733b56..cbf23beebebe 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -355,11 +355,13 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, return -EBUSY; /* Check the input buffer is the expected size */ - if (info->size_in != send_cmd->in.size) + if ((info->size_in != CXL_VARIABLE_PAYLOAD) && + (info->size_in != send_cmd->in.size)) return -ENOMEM; /* Check the output buffer is at least large enough */ - if (send_cmd->out.size < info->size_out) + if ((info->size_out != CXL_VARIABLE_PAYLOAD) && + (send_cmd->out.size < info->size_out)) return -ENOMEM; *mem_cmd = (struct cxl_mem_command) { diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index ea60abda6500..dbce99bdffab 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -272,7 +272,7 @@ static const struct device_type cxl_decoder_root_type = { .groups = cxl_decoder_root_attribute_groups, }; -static bool is_endpoint_decoder(struct device *dev) +bool is_endpoint_decoder(struct device *dev) { return dev->type == &cxl_decoder_endpoint_type; } diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 140dc3278cde..6799b27c7db2 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -340,6 +340,7 @@ struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port, struct cxl_decoder *to_cxl_decoder(struct device *dev); bool is_root_decoder(struct device *dev); +bool is_endpoint_decoder(struct device *dev); bool is_cxl_decoder(struct device *dev); struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port, unsigned int nr_targets); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 60d10ee1e7fc..7df0b053373a 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -300,13 +300,13 @@ struct cxl_mbox_identify { } __packed; struct cxl_mbox_get_lsa { - u32 offset; - u32 length; + __le32 offset; + __le32 length; } __packed; struct cxl_mbox_set_lsa { - u32 offset; - u32 reserved; + __le32 offset; + __le32 reserved; u8 data[]; } __packed; diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index c310f1fd3db0..a979d0b484d5 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -29,6 +29,7 @@ static int create_endpoint(struct cxl_memdev *cxlmd, { struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_port *endpoint; + int rc; endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev, cxlds->component_reg_phys, parent_port); @@ -37,13 +38,17 @@ static int create_endpoint(struct cxl_memdev *cxlmd, dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev)); + rc = cxl_endpoint_autoremove(cxlmd, endpoint); + if (rc) + return rc; + if (!endpoint->dev.driver) { dev_err(&cxlmd->dev, "%s failed probe\n", dev_name(&endpoint->dev)); return -ENXIO; } - return cxl_endpoint_autoremove(cxlmd, endpoint); + return 0; } static void enable_suspend(void *data) diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index bbeef91e637e..0aaa70b4e0f7 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -108,8 +108,8 @@ static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds, return -EINVAL; get_lsa = (struct cxl_mbox_get_lsa) { - .offset = cmd->in_offset, - .length = cmd->in_length, + .offset = cpu_to_le32(cmd->in_offset), + .length = cpu_to_le32(cmd->in_length), }; rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa, @@ -139,7 +139,7 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds, return -ENOMEM; *set_lsa = (struct cxl_mbox_set_lsa) { - .offset = cmd->in_offset, + .offset = cpu_to_le32(cmd->in_offset), }; memcpy(set_lsa->data, cmd->in_buf, cmd->in_length); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 01474daf4548..9602141bb8ec 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -123,7 +123,7 @@ void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq, unsigned long *max_freq) { - unsigned long *freq_table = devfreq->profile->freq_table; + unsigned long *freq_table = devfreq->freq_table; s32 qos_min_freq, qos_max_freq; lockdep_assert_held(&devfreq->lock); @@ -133,11 +133,11 @@ void devfreq_get_freq_range(struct devfreq *devfreq, * The devfreq drivers can initialize this in either ascending or * descending order and devfreq core supports both. */ - if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) { + if (freq_table[0] < freq_table[devfreq->max_state - 1]) { *min_freq = freq_table[0]; - *max_freq = freq_table[devfreq->profile->max_state - 1]; + *max_freq = freq_table[devfreq->max_state - 1]; } else { - *min_freq = freq_table[devfreq->profile->max_state - 1]; + *min_freq = freq_table[devfreq->max_state - 1]; *max_freq = freq_table[0]; } @@ -169,8 +169,8 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) { int lev; - for (lev = 0; lev < devfreq->profile->max_state; lev++) - if (freq == devfreq->profile->freq_table[lev]) + for (lev = 0; lev < devfreq->max_state; lev++) + if (freq == devfreq->freq_table[lev]) return lev; return -EINVAL; @@ -178,7 +178,6 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) static int set_freq_table(struct devfreq *devfreq) { - struct devfreq_dev_profile *profile = devfreq->profile; struct dev_pm_opp *opp; unsigned long freq; int i, count; @@ -188,25 +187,22 @@ static int set_freq_table(struct devfreq *devfreq) if (count <= 0) return -EINVAL; - profile->max_state = count; - profile->freq_table = devm_kcalloc(devfreq->dev.parent, - profile->max_state, - sizeof(*profile->freq_table), - GFP_KERNEL); - if (!profile->freq_table) { - profile->max_state = 0; + devfreq->max_state = count; + devfreq->freq_table = devm_kcalloc(devfreq->dev.parent, + devfreq->max_state, + sizeof(*devfreq->freq_table), + GFP_KERNEL); + if (!devfreq->freq_table) return -ENOMEM; - } - for (i = 0, freq = 0; i < profile->max_state; i++, freq++) { + for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) { opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); if (IS_ERR(opp)) { - devm_kfree(devfreq->dev.parent, profile->freq_table); - profile->max_state = 0; + devm_kfree(devfreq->dev.parent, devfreq->freq_table); return PTR_ERR(opp); } dev_pm_opp_put(opp); - profile->freq_table[i] = freq; + devfreq->freq_table[i] = freq; } return 0; @@ -246,7 +242,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) if (lev != prev_lev) { devfreq->stats.trans_table[ - (prev_lev * devfreq->profile->max_state) + lev]++; + (prev_lev * devfreq->max_state) + lev]++; devfreq->stats.total_trans++; } @@ -835,6 +831,9 @@ struct devfreq *devfreq_add_device(struct device *dev, if (err < 0) goto err_dev; mutex_lock(&devfreq->lock); + } else { + devfreq->freq_table = devfreq->profile->freq_table; + devfreq->max_state = devfreq->profile->max_state; } devfreq->scaling_min_freq = find_available_min_freq(devfreq); @@ -870,8 +869,8 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev, array3_size(sizeof(unsigned int), - devfreq->profile->max_state, - devfreq->profile->max_state), + devfreq->max_state, + devfreq->max_state), GFP_KERNEL); if (!devfreq->stats.trans_table) { mutex_unlock(&devfreq->lock); @@ -880,7 +879,7 @@ struct devfreq *devfreq_add_device(struct device *dev, } devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev, - devfreq->profile->max_state, + devfreq->max_state, sizeof(*devfreq->stats.time_in_state), GFP_KERNEL); if (!devfreq->stats.time_in_state) { @@ -932,8 +931,9 @@ struct devfreq *devfreq_add_device(struct device *dev, err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, NULL); if (err) { - dev_err(dev, "%s: Unable to start governor for the device\n", - __func__); + dev_err_probe(dev, err, + "%s: Unable to start governor for the device\n", + __func__); goto err_init; } create_sysfs_files(devfreq, devfreq->governor); @@ -1665,9 +1665,9 @@ static ssize_t available_frequencies_show(struct device *d, mutex_lock(&df->lock); - for (i = 0; i < df->profile->max_state; i++) + for (i = 0; i < df->max_state; i++) count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), - "%lu ", df->profile->freq_table[i]); + "%lu ", df->freq_table[i]); mutex_unlock(&df->lock); /* Truncate the trailing space */ @@ -1690,7 +1690,7 @@ static ssize_t trans_stat_show(struct device *dev, if (!df->profile) return -EINVAL; - max_state = df->profile->max_state; + max_state = df->max_state; if (max_state == 0) return sprintf(buf, "Not Supported.\n"); @@ -1707,19 +1707,17 @@ static ssize_t trans_stat_show(struct device *dev, len += sprintf(buf + len, " :"); for (i = 0; i < max_state; i++) len += sprintf(buf + len, "%10lu", - df->profile->freq_table[i]); + df->freq_table[i]); len += sprintf(buf + len, " time(ms)\n"); for (i = 0; i < max_state; i++) { - if (df->profile->freq_table[i] - == df->previous_freq) { + if (df->freq_table[i] == df->previous_freq) len += sprintf(buf + len, "*"); - } else { + else len += sprintf(buf + len, " "); - } - len += sprintf(buf + len, "%10lu:", - df->profile->freq_table[i]); + + len += sprintf(buf + len, "%10lu:", df->freq_table[i]); for (j = 0; j < max_state; j++) len += sprintf(buf + len, "%10u", df->stats.trans_table[(i * max_state) + j]); @@ -1743,7 +1741,7 @@ static ssize_t trans_stat_store(struct device *dev, if (!df->profile) return -EINVAL; - if (df->profile->max_state == 0) + if (df->max_state == 0) return count; err = kstrtoint(buf, 10, &value); @@ -1751,11 +1749,11 @@ static ssize_t trans_stat_store(struct device *dev, return -EINVAL; mutex_lock(&df->lock); - memset(df->stats.time_in_state, 0, (df->profile->max_state * + memset(df->stats.time_in_state, 0, (df->max_state * sizeof(*df->stats.time_in_state))); memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int), - df->profile->max_state, - df->profile->max_state)); + df->max_state, + df->max_state)); df->stats.total_trans = 0; df->stats.last_update = get_jiffies_64(); mutex_unlock(&df->lock); diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 9b849d781116..a443e7c42daf 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -519,15 +519,19 @@ static int of_get_devfreq_events(struct device_node *np, count = of_get_child_count(events_np); desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL); - if (!desc) + if (!desc) { + of_node_put(events_np); return -ENOMEM; + } info->num_events = count; of_id = of_match_device(exynos_ppmu_id_match, dev); if (of_id) info->ppmu_type = (enum exynos_ppmu_type)of_id->data; - else + else { + of_node_put(events_np); return -EINVAL; + } j = 0; for_each_child_of_node(events_np, node) { diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index e689101abc93..f7dcc44f9414 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -447,9 +447,9 @@ static int exynos_bus_probe(struct platform_device *pdev) } } - max_state = bus->devfreq->profile->max_state; - min_freq = (bus->devfreq->profile->freq_table[0] / 1000); - max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000); + max_state = bus->devfreq->max_state; + min_freq = (bus->devfreq->freq_table[0] / 1000); + max_freq = (bus->devfreq->freq_table[max_state - 1] / 1000); pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n", dev_name(dev), min_freq, max_freq); diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c index 72c67979ebe1..953cf9a1e9f7 100644 --- a/drivers/devfreq/governor_passive.c +++ b/drivers/devfreq/governor_passive.c @@ -1,4 +1,4 @@ - // SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/devfreq/governor_passive.c * @@ -14,10 +14,9 @@ #include <linux/slab.h> #include <linux/device.h> #include <linux/devfreq.h> +#include <linux/units.h> #include "governor.h" -#define HZ_PER_KHZ 1000 - static struct devfreq_cpu_data * get_parent_cpu_data(struct devfreq_passive_data *p_data, struct cpufreq_policy *policy) @@ -34,6 +33,20 @@ get_parent_cpu_data(struct devfreq_passive_data *p_data, return NULL; } +static void delete_parent_cpu_data(struct devfreq_passive_data *p_data) +{ + struct devfreq_cpu_data *parent_cpu_data, *tmp; + + list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) { + list_del(&parent_cpu_data->node); + + if (parent_cpu_data->opp_table) + dev_pm_opp_put_opp_table(parent_cpu_data->opp_table); + + kfree(parent_cpu_data); + } +} + static unsigned long get_target_freq_by_required_opp(struct device *p_dev, struct opp_table *p_opp_table, struct opp_table *opp_table, @@ -131,18 +144,18 @@ static int get_target_freq_with_devfreq(struct devfreq *devfreq, goto out; /* Use interpolation if required opps is not available */ - for (i = 0; i < parent_devfreq->profile->max_state; i++) - if (parent_devfreq->profile->freq_table[i] == *freq) + for (i = 0; i < parent_devfreq->max_state; i++) + if (parent_devfreq->freq_table[i] == *freq) break; - if (i == parent_devfreq->profile->max_state) + if (i == parent_devfreq->max_state) return -EINVAL; - if (i < devfreq->profile->max_state) { - child_freq = devfreq->profile->freq_table[i]; + if (i < devfreq->max_state) { + child_freq = devfreq->freq_table[i]; } else { - count = devfreq->profile->max_state; - child_freq = devfreq->profile->freq_table[count - 1]; + count = devfreq->max_state; + child_freq = devfreq->freq_table[count - 1]; } out: @@ -222,8 +235,7 @@ static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq) { struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; - struct devfreq_cpu_data *parent_cpu_data; - int cpu, ret = 0; + int ret; if (p_data->nb.notifier_call) { ret = cpufreq_unregister_notifier(&p_data->nb, @@ -232,27 +244,9 @@ static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq) return ret; } - for_each_possible_cpu(cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - if (!policy) { - ret = -EINVAL; - continue; - } - - parent_cpu_data = get_parent_cpu_data(p_data, policy); - if (!parent_cpu_data) { - cpufreq_cpu_put(policy); - continue; - } + delete_parent_cpu_data(p_data); - list_del(&parent_cpu_data->node); - if (parent_cpu_data->opp_table) - dev_pm_opp_put_opp_table(parent_cpu_data->opp_table); - kfree(parent_cpu_data); - cpufreq_cpu_put(policy); - } - - return ret; + return 0; } static int cpufreq_passive_register_notifier(struct devfreq *devfreq) @@ -336,7 +330,6 @@ err_free_cpu_data: err_put_policy: cpufreq_cpu_put(policy); err: - WARN_ON(cpufreq_passive_unregister_notifier(devfreq)); return ret; } @@ -407,8 +400,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq, if (!p_data) return -EINVAL; - if (!p_data->this) - p_data->this = devfreq; + p_data->this = devfreq; switch (event) { case DEVFREQ_GOV_START: diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 0cce6e4ec946..205acb2c744d 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -343,7 +343,7 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, if (old->context != context) continue; - dma_resv_list_set(list, i, replacement, usage); + dma_resv_list_set(list, i, dma_fence_get(replacement), usage); dma_fence_put(old); } } diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 3e9d726504e2..7b3e6030f7b4 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1900,6 +1900,11 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) for (i = 0; i < init_nr_desc_per_channel; i++) { desc = at_xdmac_alloc_desc(chan, GFP_KERNEL); if (!desc) { + if (i == 0) { + dev_warn(chan2dev(chan), + "can't allocate any descriptors\n"); + return -EIO; + } dev_warn(chan2dev(chan), "only %d descriptors have been allocated\n", i); break; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 0a2168a4ccb0..f696246f57fd 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -675,16 +675,10 @@ static int dmatest_func(void *data) /* * src and dst buffers are freed by ourselves below */ - if (params->polled) { + if (params->polled) flags = DMA_CTRL_ACK; - } else { - if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) { - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; - } else { - pr_err("Channel does not support interrupt!\n"); - goto err_pq_array; - } - } + else + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; ktime = ktime_get(); while (!(kthread_should_stop() || @@ -912,7 +906,6 @@ error_unmap_continue: runtime = ktime_to_us(ktime); ret = 0; -err_pq_array: kfree(dma_pq); err_srcs_array: kfree(srcs); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index e9c9bcb1f5c2..c741da02b67e 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -1164,8 +1164,9 @@ static int dma_chan_pause(struct dma_chan *dchan) BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); } else { - val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | - BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); + val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | + BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); } @@ -1190,12 +1191,13 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan) { u32 val; - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); if (chan->chip->dw->hdata->reg_map_8_channels) { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT); val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT); axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index ff0ea60051f0..5a8cc52c1abf 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -716,10 +716,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) struct idxd_wq *wq = idxd->wqs[i]; mutex_lock(&wq->wq_lock); - if (wq->state == IDXD_WQ_ENABLED) { - idxd_wq_disable_cleanup(wq); - wq->state = IDXD_WQ_DISABLED; - } + idxd_wq_disable_cleanup(wq); idxd_wq_device_reset_cleanup(wq); mutex_unlock(&wq->wq_lock); } diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 355fb3ef4cbf..aa3478257ddb 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -512,15 +512,16 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { - if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) + if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) { dev_warn(dev, "Unable to turn on user SVA feature.\n"); - else + } else { set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); - if (idxd_enable_system_pasid(idxd)) - dev_warn(dev, "No in-kernel DMA with PASID.\n"); - else - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); + if (idxd_enable_system_pasid(idxd)) + dev_warn(dev, "No in-kernel DMA with PASID.\n"); + else + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); + } } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 8535018ee7a2..f37a276f519e 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -891,7 +891,7 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac) * SDMA stops cyclic channel when DMA request triggers a channel and no SDMA * owned buffer is available (i.e. BD_DONE was set too late). */ - if (!is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) { + if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) { dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel); sdma_enable_channel(sdmac->sdma, sdmac->channel); } @@ -2346,7 +2346,7 @@ MODULE_DESCRIPTION("i.MX SDMA driver"); #if IS_ENABLED(CONFIG_SOC_IMX6Q) MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); #endif -#if IS_ENABLED(CONFIG_SOC_IMX7D) +#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M) MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); #endif MODULE_LICENSE("GPL"); diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c index efe8bd3a0e2a..9b9184f964be 100644 --- a/drivers/dma/lgm/lgm-dma.c +++ b/drivers/dma/lgm/lgm-dma.c @@ -1593,11 +1593,12 @@ static int intel_ldma_probe(struct platform_device *pdev) d->core_clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(d->core_clk)) return PTR_ERR(d->core_clk); - clk_prepare_enable(d->core_clk); d->rst = devm_reset_control_get_optional(dev, NULL); if (IS_ERR(d->rst)) return PTR_ERR(d->rst); + + clk_prepare_enable(d->core_clk); reset_control_deassert(d->rst); ret = devm_add_action_or_reset(dev, ldma_clk_disable, d); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 858400e42ec0..09915a5cba3e 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2589,7 +2589,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) /* If the DMAC pool is empty, alloc new */ if (!desc) { - DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(lock); LIST_HEAD(pool); if (!add_desc(&pool, &lock, GFP_ATOMIC, 1)) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 87f6ca1541cf..2ff787df513e 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -558,14 +558,6 @@ static int bam_alloc_chan(struct dma_chan *chan) return 0; } -static int bam_pm_runtime_get_sync(struct device *dev) -{ - if (pm_runtime_enabled(dev)) - return pm_runtime_get_sync(dev); - - return 0; -} - /** * bam_free_chan - Frees dma resources associated with specific channel * @chan: specified channel @@ -581,7 +573,7 @@ static void bam_free_chan(struct dma_chan *chan) unsigned long flags; int ret; - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return; @@ -784,7 +776,7 @@ static int bam_pause(struct dma_chan *chan) unsigned long flag; int ret; - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return ret; @@ -810,7 +802,7 @@ static int bam_resume(struct dma_chan *chan) unsigned long flag; int ret; - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return ret; @@ -919,7 +911,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data) if (srcs & P_IRQ) tasklet_schedule(&bdev->task); - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return IRQ_NONE; @@ -1037,7 +1029,7 @@ static void bam_start_dma(struct bam_chan *bchan) if (!vd) return; - ret = bam_pm_runtime_get_sync(bdev->dev); + ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return; @@ -1374,11 +1366,6 @@ static int bam_dma_probe(struct platform_device *pdev) if (ret) goto err_unregister_dma; - if (!bdev->bamclk) { - pm_runtime_disable(&pdev->dev); - return 0; - } - pm_runtime_irq_safe(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(&pdev->dev); @@ -1462,10 +1449,8 @@ static int __maybe_unused bam_dma_suspend(struct device *dev) { struct bam_device *bdev = dev_get_drvdata(dev); - if (bdev->bamclk) { - pm_runtime_force_suspend(dev); - clk_unprepare(bdev->bamclk); - } + pm_runtime_force_suspend(dev); + clk_unprepare(bdev->bamclk); return 0; } @@ -1475,13 +1460,11 @@ static int __maybe_unused bam_dma_resume(struct device *dev) struct bam_device *bdev = dev_get_drvdata(dev); int ret; - if (bdev->bamclk) { - ret = clk_prepare(bdev->bamclk); - if (ret) - return ret; + ret = clk_prepare(bdev->bamclk); + if (ret) + return ret; - pm_runtime_force_resume(dev); - } + pm_runtime_force_resume(dev); return 0; } diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c index 71d24fc07c00..f744ddbbbad7 100644 --- a/drivers/dma/ti/dma-crossbar.c +++ b/drivers/dma/ti/dma-crossbar.c @@ -245,6 +245,7 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, if (dma_spec->args[0] >= xbar->xbar_requests) { dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", dma_spec->args[0]); + put_device(&pdev->dev); return ERR_PTR(-EINVAL); } @@ -252,12 +253,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); if (!dma_spec->np) { dev_err(&pdev->dev, "Can't get DMA master\n"); + put_device(&pdev->dev); return ERR_PTR(-EINVAL); } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { of_node_put(dma_spec->np); + put_device(&pdev->dev); return ERR_PTR(-ENOMEM); } @@ -268,6 +271,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, mutex_unlock(&xbar->mutex); dev_err(&pdev->dev, "Run out of free DMA requests\n"); kfree(map); + of_node_put(dma_spec->np); + put_device(&pdev->dev); return ERR_PTR(-ENOMEM); } set_bit(map->xbar_out, xbar->dma_inuse); diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index 59b0bedc9c24..c8fa7dcfdbd0 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -103,9 +103,14 @@ static void dimm_setup_label(struct dimm_info *dimm, u16 handle) dmi_memdev_name(handle, &bank, &device); - /* both strings must be non-zero */ - if (bank && *bank && device && *device) - snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device); + /* + * Set to a NULL string when both bank and device are zero. In this case, + * the label assigned by default will be preserved. + */ + snprintf(dimm->label, sizeof(dimm->label), "%s%s%s", + (bank && *bank) ? bank : "", + (bank && *bank && device && *device) ? " " : "", + (device && *device) ? device : ""); } static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry) diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 1cee64b80a7e..f7d37c282819 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -514,6 +514,28 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) memset(p, 0, sizeof(*p)); } +static void enable_intr(struct synps_edac_priv *priv) +{ + /* Enable UE/CE Interrupts */ + if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) + writel(DDR_UE_MASK | DDR_CE_MASK, + priv->baseaddr + ECC_CLR_OFST); + else + writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, + priv->baseaddr + DDR_QOS_IRQ_EN_OFST); + +} + +static void disable_intr(struct synps_edac_priv *priv) +{ + /* Disable UE/CE Interrupts */ + if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) + writel(0x0, priv->baseaddr + ECC_CLR_OFST); + else + writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, + priv->baseaddr + DDR_QOS_IRQ_DB_OFST); +} + /** * intr_handler - Interrupt Handler for ECC interrupts. * @irq: IRQ number. @@ -555,6 +577,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id) /* v3.0 of the controller does not have this register */ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); + else + enable_intr(priv); + return IRQ_HANDLED; } @@ -837,25 +862,6 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev) init_csrows(mci); } -static void enable_intr(struct synps_edac_priv *priv) -{ - /* Enable UE/CE Interrupts */ - if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) - writel(DDR_UE_MASK | DDR_CE_MASK, - priv->baseaddr + ECC_CLR_OFST); - else - writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, - priv->baseaddr + DDR_QOS_IRQ_EN_OFST); - -} - -static void disable_intr(struct synps_edac_priv *priv) -{ - /* Disable UE/CE Interrupts */ - writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, - priv->baseaddr + DDR_QOS_IRQ_DB_OFST); -} - static int setup_irq(struct mem_ctl_info *mci, struct platform_device *pdev) { diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 1e7b7fec97d9..a14f65444b35 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -149,4 +149,16 @@ config ARM_SCMI_POWER_DOMAIN will be called scmi_pm_domain. Note this may needed early in boot before rootfs may be available. +config ARM_SCMI_POWER_CONTROL + tristate "SCMI system power control driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + help + This enables System Power control logic which binds system shutdown or + reboot actions to SCMI System Power notifications generated by SCP + firmware. + + This driver can also be built as a module. If so, the module will be + called scmi_power_control. Note this may needed early in boot to catch + early shutdown/reboot SCMI requests. + endmenu diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 8d4afadda38c..9ea86f8cc8f7 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -7,11 +7,12 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o -scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o +scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o +obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) # The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index f6fe723ab869..d4e23101448a 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -181,7 +181,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol, return NULL; } - id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); + id = ida_alloc_min(&scmi_bus_id, 1, GFP_KERNEL); if (id < 0) { kfree_const(scmi_dev->name); kfree(scmi_dev); @@ -204,7 +204,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol, put_dev: kfree_const(scmi_dev->name); put_device(&scmi_dev->dev); - ida_simple_remove(&scmi_bus_id, id); + ida_free(&scmi_bus_id, id); return NULL; } @@ -212,7 +212,7 @@ void scmi_device_destroy(struct scmi_device *scmi_dev) { kfree_const(scmi_dev->name); scmi_handle_put(scmi_dev->handle); - ida_simple_remove(&scmi_bus_id, scmi_dev->id); + ida_free(&scmi_bus_id, scmi_dev->id); device_unregister(&scmi_dev->dev); } diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index c7a83f6e38e5..3ed7ae0d6781 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -194,6 +194,7 @@ static int rate_cmp_func(const void *_r1, const void *_r2) } struct scmi_clk_ipriv { + struct device *dev; u32 clk_id; struct scmi_clock_info *clk; }; @@ -223,6 +224,29 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st, st->num_returned = NUM_RETURNED(flags); p->clk->rate_discrete = RATE_DISCRETE(flags); + /* Warn about out of spec replies ... */ + if (!p->clk->rate_discrete && + (st->num_returned != 3 || st->num_remaining != 0)) { + dev_warn(p->dev, + "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n", + p->clk->name, st->num_returned, st->num_remaining, + st->rx_len); + + /* + * A known quirk: a triplet is returned but num_returned != 3 + * Check for a safe payload size and fix. + */ + if (st->num_returned != 3 && st->num_remaining == 0 && + st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { + st->num_returned = 3; + st->num_remaining = 0; + } else { + dev_err(p->dev, + "Cannot fix out-of-spec reply !\n"); + return -EPROTO; + } + } + return 0; } @@ -255,7 +279,6 @@ iter_clk_describe_process_response(const struct scmi_protocol_handle *ph, *rate = RATE_TO_U64(r->rate[st->loop_idx]); p->clk->list.num_rates++; - //XXX dev_dbg(ph->dev, "Rate %llu Hz\n", *rate); } return ret; @@ -275,6 +298,7 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, struct scmi_clk_ipriv cpriv = { .clk_id = clk_id, .clk = clk, + .dev = ph->dev, }; iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index c1922bd650ae..609ebedee9cb 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -19,6 +19,7 @@ #include <linux/export.h> #include <linux/idr.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/hashtable.h> @@ -60,6 +61,11 @@ static atomic_t transfer_last_id; static DEFINE_IDR(scmi_requested_devices); static DEFINE_MUTEX(scmi_requested_devices_mtx); +/* Track globally the creation of SCMI SystemPower related devices */ +static bool scmi_syspower_registered; +/* Protect access to scmi_syspower_registered */ +static DEFINE_MUTEX(scmi_syspower_mtx); + struct scmi_requested_dev { const struct scmi_device_id *id_table; struct list_head node; @@ -660,6 +666,11 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, xfer); + + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -694,6 +705,12 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_response(cinfo, xfer); + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? + "DLYD" : "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.type); @@ -827,6 +844,12 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, xfer->state = SCMI_XFER_RESP_OK; } spin_unlock_irqrestore(&xfer->lock, flags); + + /* Trace polled replies. */ + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); } } else { /* And we wait for the response. */ @@ -903,6 +926,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return ret; } + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND", + xfer->hdr.seq, xfer->hdr.status, + xfer->tx.buf, xfer->tx.len); + ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) ret = scmi_to_linux_errno(xfer->hdr.status); @@ -1223,6 +1250,7 @@ static int scmi_iterator_run(void *iter) if (ret) break; + st->rx_len = i->t->rx.len; ret = iops->update_state(st, i->resp, i->priv); if (ret) break; @@ -1258,10 +1286,174 @@ out: return ret; } +struct scmi_msg_get_fc_info { + __le32 domain; + __le32 message_id; +}; + +struct scmi_msg_resp_desc_fc { + __le32 attr; +#define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) +#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) + __le32 rate_limit; + __le32 chan_addr_low; + __le32 chan_addr_high; + __le32 chan_size; + __le32 db_addr_low; + __le32 db_addr_high; + __le32 db_set_lmask; + __le32 db_set_hmask; + __le32 db_preserve_lmask; + __le32 db_preserve_hmask; +}; + +static void +scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, u32 valid_size, + u32 domain, void __iomem **p_addr, + struct scmi_fc_db_info **p_db) +{ + int ret; + u32 flags; + u64 phys_addr; + u8 size; + void __iomem *addr; + struct scmi_xfer *t; + struct scmi_fc_db_info *db = NULL; + struct scmi_msg_get_fc_info *info; + struct scmi_msg_resp_desc_fc *resp; + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + + if (!p_addr) { + ret = -EINVAL; + goto err_out; + } + + ret = ph->xops->xfer_get_init(ph, describe_id, + sizeof(*info), sizeof(*resp), &t); + if (ret) + goto err_out; + + info = t->tx.buf; + info->domain = cpu_to_le32(domain); + info->message_id = cpu_to_le32(message_id); + + /* + * Bail out on error leaving fc_info addresses zeroed; this includes + * the case in which the requested domain/message_id does NOT support + * fastchannels at all. + */ + ret = ph->xops->do_xfer(ph, t); + if (ret) + goto err_xfer; + + resp = t->rx.buf; + flags = le32_to_cpu(resp->attr); + size = le32_to_cpu(resp->chan_size); + if (size != valid_size) { + ret = -EINVAL; + goto err_xfer; + } + + phys_addr = le32_to_cpu(resp->chan_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_xfer; + } + + *p_addr = addr; + + if (p_db && SUPPORTS_DOORBELL(flags)) { + db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); + if (!db) { + ret = -ENOMEM; + goto err_db; + } + + size = 1 << DOORBELL_REG_WIDTH(flags); + phys_addr = le32_to_cpu(resp->db_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_db_mem; + } + + db->addr = addr; + db->width = size; + db->set = le32_to_cpu(resp->db_set_lmask); + db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; + db->mask = le32_to_cpu(resp->db_preserve_lmask); + db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; + + *p_db = db; + } + + ph->xops->xfer_put(ph, t); + + dev_dbg(ph->dev, + "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n", + pi->proto->id, message_id, domain); + + return; + +err_db_mem: + devm_kfree(ph->dev, db); + +err_db: + *p_addr = NULL; + +err_xfer: + ph->xops->xfer_put(ph, t); + +err_out: + dev_warn(ph->dev, + "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", + pi->proto->id, message_id, domain, ret); +} + +#define SCMI_PROTO_FC_RING_DB(w) \ +do { \ + u##w val = 0; \ + \ + if (db->mask) \ + val = ioread##w(db->addr) & db->mask; \ + iowrite##w((u##w)db->set | val, db->addr); \ +} while (0) + +static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) +{ + if (!db || !db->addr) + return; + + if (db->width == 1) + SCMI_PROTO_FC_RING_DB(8); + else if (db->width == 2) + SCMI_PROTO_FC_RING_DB(16); + else if (db->width == 4) + SCMI_PROTO_FC_RING_DB(32); + else /* db->width == 8 */ +#ifdef CONFIG_64BIT + SCMI_PROTO_FC_RING_DB(64); +#else + { + u64 val = 0; + + if (db->mask) + val = ioread64_hi_lo(db->addr) & db->mask; + iowrite64_hi_lo(db->set | val, db->addr); + } +#endif +} + static const struct scmi_proto_helpers_ops helpers_ops = { .extended_name_get = scmi_common_extended_name_get, .iter_response_init = scmi_iterator_init, .iter_response_run = scmi_iterator_run, + .fastchannel_init = scmi_common_fastchannel_init, + .fastchannel_db_ring = scmi_common_fastchannel_db_ring, }; /** @@ -1496,6 +1688,30 @@ static void scmi_devm_release_protocol(struct device *dev, void *res) scmi_protocol_release(dres->handle, dres->protocol_id); } +static struct scmi_protocol_instance __must_check * +scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + struct scmi_protocol_devres *dres; + + dres = devres_alloc(scmi_devm_release_protocol, + sizeof(*dres), GFP_KERNEL); + if (!dres) + return ERR_PTR(-ENOMEM); + + pi = scmi_get_protocol_instance(sdev->handle, protocol_id); + if (IS_ERR(pi)) { + devres_free(dres); + return pi; + } + + dres->handle = sdev->handle; + dres->protocol_id = protocol_id; + devres_add(&sdev->dev, dres); + + return pi; +} + /** * scmi_devm_protocol_get - Devres managed get protocol operations and handle * @sdev: A reference to an scmi_device whose embedded struct device is to @@ -1519,32 +1735,47 @@ scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id, struct scmi_protocol_handle **ph) { struct scmi_protocol_instance *pi; - struct scmi_protocol_devres *dres; - struct scmi_handle *handle = sdev->handle; if (!ph) return ERR_PTR(-EINVAL); - dres = devres_alloc(scmi_devm_release_protocol, - sizeof(*dres), GFP_KERNEL); - if (!dres) - return ERR_PTR(-ENOMEM); - - pi = scmi_get_protocol_instance(handle, protocol_id); - if (IS_ERR(pi)) { - devres_free(dres); + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) return pi; - } - - dres->handle = handle; - dres->protocol_id = protocol_id; - devres_add(&sdev->dev, dres); *ph = &pi->ph; return pi->proto->ops; } +/** + * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol + * @sdev: A reference to an scmi_device whose embedded struct device is to + * be used for devres accounting. + * @protocol_id: The protocol being requested. + * + * Get hold of a protocol accounting for its usage, possibly triggering its + * initialization but without getting access to its protocol specific operations + * and handle. + * + * Being a devres based managed method, protocol hold will be automatically + * released, and possibly de-initialized on last user, once the SCMI driver + * owning the scmi_device is unbound from it. + * + * Return: 0 on SUCCESS + */ +static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev, + u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) + return PTR_ERR(pi); + + return 0; +} + static int scmi_devm_protocol_match(struct device *dev, void *res, void *data) { struct scmi_protocol_devres *dres = res; @@ -1848,21 +2079,39 @@ scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, if (sdev) return sdev; + mutex_lock(&scmi_syspower_mtx); + if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) { + dev_warn(info->dev, + "SCMI SystemPower protocol device must be unique !\n"); + mutex_unlock(&scmi_syspower_mtx); + + return NULL; + } + pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id); sdev = scmi_device_create(np, info->dev, prot_id, name); if (!sdev) { dev_err(info->dev, "failed to create %d protocol device\n", prot_id); + mutex_unlock(&scmi_syspower_mtx); + return NULL; } if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { dev_err(&sdev->dev, "failed to setup transport\n"); scmi_device_destroy(sdev); + mutex_unlock(&scmi_syspower_mtx); + return NULL; } + if (prot_id == SCMI_PROTOCOL_SYSTEM) + scmi_syspower_registered = true; + + mutex_unlock(&scmi_syspower_mtx); + return sdev; } @@ -2131,6 +2380,7 @@ static int scmi_probe(struct platform_device *pdev) handle = &info->handle; handle->dev = info->dev; handle->version = &info->version; + handle->devm_protocol_acquire = scmi_devm_protocol_acquire; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; @@ -2400,6 +2650,7 @@ static int __init scmi_driver_init(void) scmi_sensors_register(); scmi_voltage_register(); scmi_system_register(); + scmi_powercap_register(); return platform_driver_register(&scmi_driver); } @@ -2416,6 +2667,7 @@ static void __exit scmi_driver_exit(void) scmi_sensors_unregister(); scmi_voltage_unregister(); scmi_system_unregister(); + scmi_powercap_unregister(); scmi_bus_exit(); diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index b503c22cfd32..8abace56b958 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -117,6 +117,7 @@ struct scmi_optee_channel { u32 channel_id; u32 tee_session; u32 caps; + u32 rx_len; struct mutex mu; struct scmi_chan_info *cinfo; union { @@ -302,6 +303,9 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t return -EIO; } + /* Save response size */ + channel->rx_len = param[2].u.memref.size; + return 0; } @@ -353,6 +357,7 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch shbuf = tee_shm_get_va(channel->tee_shm, 0); memset(shbuf, 0, msg_size); channel->req.msg = shbuf; + channel->rx_len = msg_size; return 0; } @@ -508,7 +513,7 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, struct scmi_optee_channel *channel = cinfo->transport_info; if (channel->tee_shm) - msg_fetch_response(channel->req.msg, SCMI_OPTEE_MAX_MSG_SIZE, xfer); + msg_fetch_response(channel->req.msg, channel->rx_len, xfer); else shmem_fetch_response(channel->req.shmem, xfer); } diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index bbb0331801ff..64ea2d2f2875 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -10,13 +10,14 @@ #include <linux/bits.h> #include <linux/of.h> #include <linux/io.h> -#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/scmi_protocol.h> #include <linux/sort.h> +#include <trace/events/scmi.h> + #include "protocols.h" #include "notify.h" @@ -35,6 +36,12 @@ enum scmi_performance_protocol_cmd { PERF_DOMAIN_NAME_GET = 0xc, }; +enum { + PERF_FC_LEVEL, + PERF_FC_LIMIT, + PERF_FC_MAX, +}; + struct scmi_opp { u32 perf; u32 power; @@ -115,43 +122,6 @@ struct scmi_msg_resp_perf_describe_levels { } opp[]; }; -struct scmi_perf_get_fc_info { - __le32 domain; - __le32 message_id; -}; - -struct scmi_msg_resp_perf_desc_fc { - __le32 attr; -#define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) -#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) - __le32 rate_limit; - __le32 chan_addr_low; - __le32 chan_addr_high; - __le32 chan_size; - __le32 db_addr_low; - __le32 db_addr_high; - __le32 db_set_lmask; - __le32 db_set_hmask; - __le32 db_preserve_lmask; - __le32 db_preserve_hmask; -}; - -struct scmi_fc_db_info { - int width; - u64 set; - u64 mask; - void __iomem *addr; -}; - -struct scmi_fc_info { - void __iomem *level_set_addr; - void __iomem *limit_set_addr; - void __iomem *level_get_addr; - void __iomem *limit_get_addr; - struct scmi_fc_db_info *level_set_db; - struct scmi_fc_db_info *limit_set_db; -}; - struct perf_dom_info { bool set_limits; bool set_perf; @@ -360,40 +330,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, return ret; } -#define SCMI_PERF_FC_RING_DB(w) \ -do { \ - u##w val = 0; \ - \ - if (db->mask) \ - val = ioread##w(db->addr) & db->mask; \ - iowrite##w((u##w)db->set | val, db->addr); \ -} while (0) - -static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db) -{ - if (!db || !db->addr) - return; - - if (db->width == 1) - SCMI_PERF_FC_RING_DB(8); - else if (db->width == 2) - SCMI_PERF_FC_RING_DB(16); - else if (db->width == 4) - SCMI_PERF_FC_RING_DB(32); - else /* db->width == 8 */ -#ifdef CONFIG_64BIT - SCMI_PERF_FC_RING_DB(64); -#else - { - u64 val = 0; - - if (db->mask) - val = ioread64_hi_lo(db->addr) & db->mask; - iowrite64_hi_lo(db->set | val, db->addr); - } -#endif -} - static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph, u32 domain, u32 max_perf, u32 min_perf) { @@ -426,10 +362,14 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph, if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf) return -EINVAL; - if (dom->fc_info && dom->fc_info->limit_set_addr) { - iowrite32(max_perf, dom->fc_info->limit_set_addr); - iowrite32(min_perf, dom->fc_info->limit_set_addr + 4); - scmi_perf_fc_ring_db(dom->fc_info->limit_set_db); + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET, + domain, min_perf, max_perf); + iowrite32(max_perf, fci->set_addr); + iowrite32(min_perf, fci->set_addr + 4); + ph->hops->fastchannel_db_ring(fci->set_db); return 0; } @@ -468,9 +408,13 @@ static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->limit_get_addr) { - *max_perf = ioread32(dom->fc_info->limit_get_addr); - *min_perf = ioread32(dom->fc_info->limit_get_addr + 4); + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + *max_perf = ioread32(fci->get_addr); + *min_perf = ioread32(fci->get_addr + 4); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET, + domain, *min_perf, *max_perf); return 0; } @@ -505,9 +449,13 @@ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->level_set_addr) { - iowrite32(level, dom->fc_info->level_set_addr); - scmi_perf_fc_ring_db(dom->fc_info->level_set_db); + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET, + domain, level, 0); + iowrite32(level, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); return 0; } @@ -542,8 +490,10 @@ static int scmi_perf_level_get(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->level_get_addr) { - *level = ioread32(dom->fc_info->level_get_addr); + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) { + *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET, + domain, *level, 0); return 0; } @@ -572,100 +522,33 @@ static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph, return ret; } -static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size) -{ - if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4) - return true; - if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8) - return true; - return false; -} - -static void -scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain, - u32 message_id, void __iomem **p_addr, - struct scmi_fc_db_info **p_db) -{ - int ret; - u32 flags; - u64 phys_addr; - u8 size; - void __iomem *addr; - struct scmi_xfer *t; - struct scmi_fc_db_info *db; - struct scmi_perf_get_fc_info *info; - struct scmi_msg_resp_perf_desc_fc *resp; - - if (!p_addr) - return; - - ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL, - sizeof(*info), sizeof(*resp), &t); - if (ret) - return; - - info = t->tx.buf; - info->domain = cpu_to_le32(domain); - info->message_id = cpu_to_le32(message_id); - - ret = ph->xops->do_xfer(ph, t); - if (ret) - goto err_xfer; - - resp = t->rx.buf; - flags = le32_to_cpu(resp->attr); - size = le32_to_cpu(resp->chan_size); - if (!scmi_perf_fc_size_is_valid(message_id, size)) - goto err_xfer; - - phys_addr = le32_to_cpu(resp->chan_addr_low); - phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; - addr = devm_ioremap(ph->dev, phys_addr, size); - if (!addr) - goto err_xfer; - *p_addr = addr; - - if (p_db && SUPPORTS_DOORBELL(flags)) { - db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); - if (!db) - goto err_xfer; - - size = 1 << DOORBELL_REG_WIDTH(flags); - phys_addr = le32_to_cpu(resp->db_addr_low); - phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; - addr = devm_ioremap(ph->dev, phys_addr, size); - if (!addr) - goto err_xfer; - - db->addr = addr; - db->width = size; - db->set = le32_to_cpu(resp->db_set_lmask); - db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; - db->mask = le32_to_cpu(resp->db_preserve_lmask); - db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; - *p_db = db; - } -err_xfer: - ph->xops->xfer_put(ph, t); -} - static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph, u32 domain, struct scmi_fc_info **p_fc) { struct scmi_fc_info *fc; - fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL); + fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL); if (!fc) return; - scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET, - &fc->level_set_addr, &fc->level_set_db); - scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET, - &fc->level_get_addr, NULL); - scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET, - &fc->limit_set_addr, &fc->limit_set_db); - scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET, - &fc->limit_get_addr, NULL); + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_SET, 4, domain, + &fc[PERF_FC_LEVEL].set_addr, + &fc[PERF_FC_LEVEL].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_GET, 4, domain, + &fc[PERF_FC_LEVEL].get_addr, NULL); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_SET, 8, domain, + &fc[PERF_FC_LIMIT].set_addr, + &fc[PERF_FC_LIMIT].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_GET, 8, domain, + &fc[PERF_FC_LIMIT].get_addr, NULL); + *p_fc = fc; } @@ -789,7 +672,7 @@ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph, dom = pi->dom_info + scmi_dev_domain_id(dev); - return dom->fc_info && dom->fc_info->level_set_addr; + return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr; } static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph) diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c new file mode 100644 index 000000000000..83b90bde755c --- /dev/null +++ b/drivers/firmware/arm_scmi/powercap.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Powercap Protocol + * + * Copyright (C) 2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt + +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include <trace/events/scmi.h> + +#include "protocols.h" +#include "notify.h" + +enum scmi_powercap_protocol_cmd { + POWERCAP_DOMAIN_ATTRIBUTES = 0x3, + POWERCAP_CAP_GET = 0x4, + POWERCAP_CAP_SET = 0x5, + POWERCAP_PAI_GET = 0x6, + POWERCAP_PAI_SET = 0x7, + POWERCAP_DOMAIN_NAME_GET = 0x8, + POWERCAP_MEASUREMENTS_GET = 0x9, + POWERCAP_CAP_NOTIFY = 0xa, + POWERCAP_MEASUREMENTS_NOTIFY = 0xb, + POWERCAP_DESCRIBE_FASTCHANNEL = 0xc, +}; + +enum { + POWERCAP_FC_CAP, + POWERCAP_FC_PAI, + POWERCAP_FC_MAX, +}; + +struct scmi_msg_resp_powercap_domain_attributes { + __le32 attributes; +#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31)) +#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30)) +#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28)) +#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27)) +#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26)) +#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25)) +#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22)) +#define POWERCAP_POWER_UNIT(x) \ + (FIELD_GET(GENMASK(24, 23), (x))) +#define SUPPORTS_POWER_UNITS_MW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x2) +#define SUPPORTS_POWER_UNITS_UW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x1) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; + __le32 min_pai; + __le32 max_pai; + __le32 pai_step; + __le32 min_power_cap; + __le32 max_power_cap; + __le32 power_cap_step; + __le32 sustainable_power; + __le32 accuracy; + __le32 parent_id; +}; + +struct scmi_msg_powercap_set_cap_or_pai { + __le32 domain; + __le32 flags; +#define CAP_SET_ASYNC BIT(1) +#define CAP_SET_IGNORE_DRESP BIT(0) + __le32 value; +}; + +struct scmi_msg_resp_powercap_cap_set_complete { + __le32 domain; + __le32 power_cap; +}; + +struct scmi_msg_resp_powercap_meas_get { + __le32 power; + __le32 pai; +}; + +struct scmi_msg_powercap_notify_cap { + __le32 domain; + __le32 notify_enable; +}; + +struct scmi_msg_powercap_notify_thresh { + __le32 domain; + __le32 notify_enable; + __le32 power_thresh_low; + __le32 power_thresh_high; +}; + +struct scmi_powercap_cap_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power_cap; + __le32 pai; +}; + +struct scmi_powercap_meas_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power; +}; + +struct scmi_powercap_state { + bool meas_notif_enabled; + u64 thresholds; +#define THRESH_LOW(p, id) \ + (lower_32_bits((p)->states[(id)].thresholds)) +#define THRESH_HIGH(p, id) \ + (upper_32_bits((p)->states[(id)].thresholds)) +}; + +struct powercap_info { + u32 version; + int num_domains; + struct scmi_powercap_state *states; + struct scmi_powercap_info *powercaps; +}; + +static enum scmi_powercap_protocol_cmd evt_2_cmd[] = { + POWERCAP_CAP_NOTIFY, + POWERCAP_MEASUREMENTS_NOTIFY, +}; + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable); + +static int +scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pi) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(u32), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + u32 attributes; + + attributes = get_unaligned_le32(t->rx.buf); + pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static inline int +scmi_powercap_validate(unsigned int min_val, unsigned int max_val, + unsigned int step_val, bool configurable) +{ + if (!min_val || !max_val) + return -EPROTO; + + if ((configurable && min_val == max_val) || + (!configurable && min_val != max_val)) + return -EPROTO; + + if (min_val != max_val && !step_val) + return -EPROTO; + + return 0; +} + +static int +scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pinfo, u32 domain) +{ + int ret; + u32 flags; + struct scmi_xfer *t; + struct scmi_powercap_info *dom_info = pinfo->powercaps + domain; + struct scmi_msg_resp_powercap_domain_attributes *resp; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES, + sizeof(domain), sizeof(*resp), &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + resp = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + flags = le32_to_cpu(resp->attributes); + + dom_info->id = domain; + dom_info->notify_powercap_cap_change = + SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags); + dom_info->notify_powercap_measurement_change = + SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags); + dom_info->async_powercap_cap_set = + SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags); + dom_info->powercap_cap_config = + SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags); + dom_info->powercap_monitoring = + SUPPORTS_POWERCAP_MONITORING(flags); + dom_info->powercap_pai_config = + SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags); + dom_info->powercap_scale_mw = + SUPPORTS_POWER_UNITS_MW(flags); + dom_info->powercap_scale_uw = + SUPPORTS_POWER_UNITS_UW(flags); + dom_info->fastchannels = + SUPPORTS_POWERCAP_FASTCHANNELS(flags); + + strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE); + + dom_info->min_pai = le32_to_cpu(resp->min_pai); + dom_info->max_pai = le32_to_cpu(resp->max_pai); + dom_info->pai_step = le32_to_cpu(resp->pai_step); + ret = scmi_powercap_validate(dom_info->min_pai, + dom_info->max_pai, + dom_info->pai_step, + dom_info->powercap_pai_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent PAI config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap); + dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap); + dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step); + ret = scmi_powercap_validate(dom_info->min_power_cap, + dom_info->max_power_cap, + dom_info->power_cap_step, + dom_info->powercap_cap_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent CAP config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->sustainable_power = + le32_to_cpu(resp->sustainable_power); + dom_info->accuracy = le32_to_cpu(resp->accuracy); + + dom_info->parent_id = le32_to_cpu(resp->parent_id); + if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID && + (dom_info->parent_id >= pinfo->num_domains || + dom_info->parent_id == dom_info->id)) { + dev_err(ph->dev, + "Platform reported inconsistent parent ID for domain %d - %s\n", + dom_info->id, dom_info->name); + ret = -ENODEV; + } + } + +clean: + ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && SUPPORTS_EXTENDED_NAMES(flags)) + ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET, + domain, dom_info->name, + SCMI_MAX_STR_SIZE); + + return ret; +} + +static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + return pi->num_domains; +} + +static const struct scmi_powercap_info * +scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains) + return NULL; + + return pi->powercaps + domain_id; +} + +static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *power_cap = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_cap || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) { + *power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET, + domain_id, *power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_get(ph, domain_id, power_cap); +} + +static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph, + const struct scmi_powercap_info *pc, + u32 power_cap, bool ignore_dresp) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(pc->id); + msg->flags = + cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) | + FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp)); + msg->value = cpu_to_le32(power_cap); + + if (!pc->async_powercap_cap_set || ignore_dresp) { + ret = ph->xops->do_xfer(ph, t); + } else { + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_msg_resp_powercap_cap_set_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->domain) == pc->id) + dev_dbg(ph->dev, + "Powercap ID %d CAP set async to %u\n", + pc->id, + get_unaligned_le32(&resp->power_cap)); + else + ret = -EPROTO; + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_cap, + bool ignore_dresp) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_cap_config || !power_cap || + power_cap < pc->min_power_cap || + power_cap > pc->max_power_cap) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP]; + + iowrite32(power_cap, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET, + domain_id, power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_set(ph, pc, power_cap, ignore_dresp); +} + +static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *pai = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!pai || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) { + *pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET, + domain_id, *pai, 0); + return 0; + } + + return scmi_powercap_xfer_pai_get(ph, domain_id, pai); +} + +static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(domain_id); + msg->flags = cpu_to_le32(0); + msg->value = cpu_to_le32(pai); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_pai_config || !pai || + pai < pc->min_pai || pai > pc->max_pai) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI]; + + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET, + domain_id, pai, 0); + iowrite32(pai, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + return 0; + } + + return scmi_powercap_xfer_pai_set(ph, domain_id, pai); +} + +static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *average_power, + u32 *pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_powercap_meas_get *resp; + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_monitoring || !pai || !average_power) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET, + sizeof(u32), sizeof(*resp), &t); + if (ret) + return ret; + + resp = t->rx.buf; + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + *average_power = le32_to_cpu(resp->power); + *pai = le32_to_cpu(resp->pai); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_thresh_low, + u32 *power_thresh_high) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_thresh_low || !power_thresh_high || + domain_id >= pi->num_domains) + return -EINVAL; + + *power_thresh_low = THRESH_LOW(pi, domain_id); + *power_thresh_high = THRESH_HIGH(pi, domain_id); + + return 0; +} + +static int +scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_thresh_low, + u32 power_thresh_high) +{ + int ret = 0; + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains || + power_thresh_low > power_thresh_high) + return -EINVAL; + + /* Anything to do ? */ + if (THRESH_LOW(pi, domain_id) == power_thresh_low && + THRESH_HIGH(pi, domain_id) == power_thresh_high) + return ret; + + pi->states[domain_id].thresholds = + (FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) | + FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high)); + + /* Update thresholds if notification already enabled */ + if (pi->states[domain_id].meas_notif_enabled) + ret = scmi_powercap_notify(ph, domain_id, + POWERCAP_MEASUREMENTS_NOTIFY, + true); + + return ret; +} + +static const struct scmi_powercap_proto_ops powercap_proto_ops = { + .num_domains_get = scmi_powercap_num_domains_get, + .info_get = scmi_powercap_dom_info_get, + .cap_get = scmi_powercap_cap_get, + .cap_set = scmi_powercap_cap_set, + .pai_get = scmi_powercap_pai_get, + .pai_set = scmi_powercap_pai_set, + .measurements_get = scmi_powercap_measurements_get, + .measurements_threshold_set = scmi_powercap_measurements_threshold_set, + .measurements_threshold_get = scmi_powercap_measurements_threshold_get, +}; + +static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph, + u32 domain, struct scmi_fc_info **p_fc) +{ + struct scmi_fc_info *fc; + + fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL); + if (!fc) + return; + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_SET, 4, domain, + &fc[POWERCAP_FC_CAP].set_addr, + &fc[POWERCAP_FC_CAP].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_GET, 4, domain, + &fc[POWERCAP_FC_CAP].get_addr, NULL); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_SET, 4, domain, + &fc[POWERCAP_FC_PAI].set_addr, + &fc[POWERCAP_FC_PAI].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_GET, 4, domain, + &fc[POWERCAP_FC_PAI].get_addr, NULL); + + *p_fc = fc; +} + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + + switch (message_id) { + case POWERCAP_CAP_NOTIFY: + { + struct scmi_msg_powercap_notify_cap *notify; + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + break; + } + case POWERCAP_MEASUREMENTS_NOTIFY: + { + u32 low, high; + struct scmi_msg_powercap_notify_thresh *notify; + + /* + * Note that we have to pick the most recently configured + * thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY + * enable request and we fail, complaining, if no thresholds + * were ever set, since this is an indication the API has been + * used wrongly. + */ + ret = scmi_powercap_measurements_threshold_get(ph, domain, + &low, &high); + if (ret) + return ret; + + if (enable && !low && !high) { + dev_err(ph->dev, + "Invalid Measurements Notify thresholds: %u/%u\n", + low, high); + return -EINVAL; + } + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + notify->power_thresh_low = cpu_to_le32(low); + notify->power_thresh_high = cpu_to_le32(high); + break; + } + default: + return -EINVAL; + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + struct powercap_info *pi = ph->get_priv(ph); + + if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_powercap_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY) + /* + * On success save the current notification enabled state, so + * as to be able to properly update the notification thresholds + * when they are modified on a domain for which measurement + * notifications were currently enabled. + * + * This is needed because the SCMI Notification core machinery + * and API does not support passing per-notification custom + * arguments at callback registration time. + * + * Note that this can be done here with a simple flag since the + * SCMI core Notifications code takes care of keeping proper + * per-domain enables refcounting, so that this helper function + * will be called only once (for enables) when the first user + * registers a callback on this domain and once more (disable) + * when the last user de-registers its callback. + */ + pi->states[src_id].meas_notif_enabled = enable; + + return ret; +} + +static void * +scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + void *rep = NULL; + + switch (evt_id) { + case SCMI_EVENT_POWERCAP_CAP_CHANGED: + { + const struct scmi_powercap_cap_changed_notify_payld *p = payld; + struct scmi_powercap_cap_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power_cap = le32_to_cpu(p->power_cap); + r->pai = le32_to_cpu(p->pai); + *src_id = r->domain_id; + rep = r; + break; + } + case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED: + { + const struct scmi_powercap_meas_changed_notify_payld *p = payld; + struct scmi_powercap_meas_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power = le32_to_cpu(p->power); + *src_id = r->domain_id; + rep = r; + break; + } + default: + break; + } + + return rep; +} + +static int +scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!pi) + return -EINVAL; + + return pi->num_domains; +} + +static const struct scmi_event powercap_events[] = { + { + .id = SCMI_EVENT_POWERCAP_CAP_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_cap_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_cap_changed_report), + }, + { + .id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_meas_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_meas_changed_report), + }, +}; + +static const struct scmi_event_ops powercap_event_ops = { + .get_num_sources = scmi_powercap_get_num_sources, + .set_notify_enabled = scmi_powercap_set_notify_enabled, + .fill_custom_report = scmi_powercap_fill_custom_report, +}; + +static const struct scmi_protocol_events powercap_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &powercap_event_ops, + .evts = powercap_events, + .num_events = ARRAY_SIZE(powercap_events), +}; + +static int +scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph) +{ + int domain, ret; + u32 version; + struct powercap_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Powercap Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_powercap_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->powercaps), + GFP_KERNEL); + if (!pinfo->powercaps) + return -ENOMEM; + + /* + * Note that any failure in retrieving any domain attribute leads to + * the whole Powercap protocol initialization failure: this way the + * reported Powercap domains are all assured, when accessed, to be well + * formed and correlated by sane parent-child relationship (if any). + */ + for (domain = 0; domain < pinfo->num_domains; domain++) { + ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain); + if (ret) + return ret; + + if (pinfo->powercaps[domain].fastchannels) + scmi_powercap_domain_init_fc(ph, domain, + &pinfo->powercaps[domain].fc_info); + } + + pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->states), GFP_KERNEL); + if (!pinfo->states) + return -ENOMEM; + + pinfo->version = version; + + return ph->set_priv(ph, pinfo); +} + +static const struct scmi_protocol scmi_powercap = { + .id = SCMI_PROTOCOL_POWERCAP, + .owner = THIS_MODULE, + .instance_init = &scmi_powercap_protocol_init, + .ops = &powercap_proto_ops, + .events = &powercap_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap) diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index c679f3fb8718..2f3bf691db7c 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -179,6 +179,8 @@ struct scmi_protocol_handle { * @max_resources: Maximum acceptable number of items, configured by the caller * depending on the underlying resources that it is querying. * @loop_idx: The iterator loop index in the current multi-part reply. + * @rx_len: Size in bytes of the currenly processed message; it can be used by + * the user of the iterator to verify a reply size. * @priv: Optional pointer to some additional state-related private data setup * by the caller during the iterations. */ @@ -188,6 +190,7 @@ struct scmi_iterator_state { unsigned int num_remaining; unsigned int max_resources; unsigned int loop_idx; + size_t rx_len; void *priv; }; @@ -212,6 +215,19 @@ struct scmi_iterator_ops { struct scmi_iterator_state *st, void *priv); }; +struct scmi_fc_db_info { + int width; + u64 set; + u64 mask; + void __iomem *addr; +}; + +struct scmi_fc_info { + void __iomem *set_addr; + void __iomem *get_addr; + struct scmi_fc_db_info *set_db; +}; + /** * struct scmi_proto_helpers_ops - References to common protocol helpers * @extended_name_get: A common helper function to retrieve extended naming @@ -227,6 +243,9 @@ struct scmi_iterator_ops { * provided in @ops. * @iter_response_run: A common helper to trigger the run of a previously * initialized iterator. + * @fastchannel_init: A common helper used to initialize FC descriptors by + * gathering FC descriptions from the SCMI platform server. + * @fastchannel_db_ring: A common helper to ring a FC doorbell. */ struct scmi_proto_helpers_ops { int (*extended_name_get)(const struct scmi_protocol_handle *ph, @@ -236,6 +255,12 @@ struct scmi_proto_helpers_ops { unsigned int max_resources, u8 msg_id, size_t tx_size, void *priv); int (*iter_response_run)(void *iter); + void (*fastchannel_init)(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, + u32 valid_size, u32 domain, + void __iomem **p_addr, + struct scmi_fc_db_info **p_db); + void (*fastchannel_db_ring)(struct scmi_fc_db_info *db); }; /** @@ -312,5 +337,6 @@ DECLARE_SCMI_REGISTER_UNREGISTER(reset); DECLARE_SCMI_REGISTER_UNREGISTER(sensors); DECLARE_SCMI_REGISTER_UNREGISTER(voltage); DECLARE_SCMI_REGISTER_UNREGISTER(system); +DECLARE_SCMI_REGISTER_UNREGISTER(powercap); #endif /* _SCMI_PROTOCOLS_H */ diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c new file mode 100644 index 000000000000..6eb7d2a4b6b1 --- /dev/null +++ b/drivers/firmware/arm_scmi/scmi_power_control.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Generic SystemPower Control driver. + * + * Copyright (C) 2020-2022 ARM Ltd. + */ +/* + * In order to handle platform originated SCMI SystemPower requests (like + * shutdowns or cold/warm resets) we register an SCMI Notification notifier + * block to react when such SCMI SystemPower events are emitted by platform. + * + * Once such a notification is received we act accordingly to perform the + * required system transition depending on the kind of request. + * + * Graceful requests are routed to userspace through the same API methods + * (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus + * events. + * + * Direct forceful requests are not supported since are not meant to be sent + * by the SCMI platform to an OSPM like Linux. + * + * Additionally, graceful request notifications can carry an optional timeout + * field stating the maximum amount of time allowed by the platform for + * completion after which they are converted to forceful ones: the assumption + * here is that even graceful requests can be upper-bound by a maximum final + * timeout strictly enforced by the platform itself which can ultimately cut + * the power off at will anytime; in order to avoid such extreme scenario, we + * track progress of graceful requests through the means of a reboot notifier + * converting timed-out graceful requests to forceful ones, so at least we + * try to perform a clean sync and shutdown/restart before the power is cut. + * + * Given the peculiar nature of SCMI SystemPower protocol, that is being in + * charge of triggering system wide shutdown/reboot events, there should be + * only one SCMI platform actively emitting SystemPower events. + * For this reason the SCMI core takes care to enforce the creation of one + * single unique device associated to the SCMI System Power protocol; no matter + * how many SCMI platforms are defined on the system, only one can be designated + * to support System Power: as a consequence this driver will never be probed + * more than once. + * + * For similar reasons as soon as the first valid SystemPower is received by + * this driver and the shutdown/reboot is started, any further notification + * possibly emitted by the platform will be ignored. + */ + +#include <linux/math.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/printk.h> +#include <linux/reboot.h> +#include <linux/scmi_protocol.h> +#include <linux/slab.h> +#include <linux/time64.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#ifndef MODULE +#include <linux/fs.h> +#endif + +enum scmi_syspower_state { + SCMI_SYSPOWER_IDLE, + SCMI_SYSPOWER_IN_PROGRESS, + SCMI_SYSPOWER_REBOOTING +}; + +/** + * struct scmi_syspower_conf - Common configuration + * + * @dev: A reference device + * @state: Current SystemPower state + * @state_mtx: @state related mutex + * @required_transition: The requested transition as decribed in the received + * SCMI SystemPower notification + * @userspace_nb: The notifier_block registered against the SCMI SystemPower + * notification to start the needed userspace interactions. + * @reboot_nb: A notifier_block optionally used to track reboot progress + * @forceful_work: A worker used to trigger a forceful transition once a + * graceful has timed out. + */ +struct scmi_syspower_conf { + struct device *dev; + enum scmi_syspower_state state; + /* Protect access to state */ + struct mutex state_mtx; + enum scmi_system_events required_transition; + + struct notifier_block userspace_nb; + struct notifier_block reboot_nb; + + struct delayed_work forceful_work; +}; + +#define userspace_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, userspace_nb) + +#define reboot_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, reboot_nb) + +#define dwork_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, forceful_work) + +/** + * scmi_reboot_notifier - A reboot notifier to catch an ongoing successful + * system transition + * @nb: Reference to the related notifier block + * @reason: The reason for the ongoing reboot + * @__unused: The cmd being executed on a restart request (unused) + * + * When an ongoing system transition is detected, compatible with the one + * requested by SCMI, cancel the delayed work. + * + * Return: NOTIFY_OK in any case + */ +static int scmi_reboot_notifier(struct notifier_block *nb, + unsigned long reason, void *__unused) +{ + struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb); + + mutex_lock(&sc->state_mtx); + switch (reason) { + case SYS_HALT: + case SYS_POWER_OFF: + if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + case SYS_RESTART: + if (sc->required_transition == SCMI_SYSTEM_COLDRESET || + sc->required_transition == SCMI_SYSTEM_WARMRESET) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + default: + break; + } + + if (sc->state == SCMI_SYSPOWER_REBOOTING) { + dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n"); + cancel_delayed_work_sync(&sc->forceful_work); + } + mutex_unlock(&sc->state_mtx); + + return NOTIFY_OK; +} + +/** + * scmi_request_forceful_transition - Request forceful SystemPower transition + * @sc: A reference to the configuration data + * + * Initiates the required SystemPower transition without involving userspace: + * just trigger the action at the kernel level after issuing an emergency + * sync. (if possible at all) + */ +static inline void +scmi_request_forceful_transition(struct scmi_syspower_conf *sc) +{ + dev_dbg(sc->dev, "Serving forceful request:%d\n", + sc->required_transition); + +#ifndef MODULE + emergency_sync(); +#endif + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + kernel_power_off(); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + kernel_restart(NULL); + break; + default: + break; + } +} + +static void scmi_forceful_work_func(struct work_struct *work) +{ + struct scmi_syspower_conf *sc; + struct delayed_work *dwork; + + if (system_state > SYSTEM_RUNNING) + return; + + dwork = to_delayed_work(work); + sc = dwork_to_sconf(dwork); + + dev_dbg(sc->dev, "Graceful request timed out...forcing !\n"); + mutex_lock(&sc->state_mtx); + /* avoid deadlock by unregistering reboot notifier first */ + unregister_reboot_notifier(&sc->reboot_nb); + if (sc->state == SCMI_SYSPOWER_IN_PROGRESS) + scmi_request_forceful_transition(sc); + mutex_unlock(&sc->state_mtx); +} + +/** + * scmi_request_graceful_transition - Request graceful SystemPower transition + * @sc: A reference to the configuration data + * @timeout_ms: The desired timeout to wait for the shutdown to complete before + * system is forcibly shutdown. + * + * Initiates the required SystemPower transition, requesting userspace + * co-operation: it uses the same orderly_ methods used by ACPI Shutdown event + * processing. + * + * Takes care also to register a reboot notifier and to schedule a delayed work + * in order to detect if userspace actions are taking too long and in such a + * case to trigger a forceful transition. + */ +static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc, + unsigned int timeout_ms) +{ + unsigned int adj_timeout_ms = 0; + + if (timeout_ms) { + int ret; + + sc->reboot_nb.notifier_call = &scmi_reboot_notifier; + ret = register_reboot_notifier(&sc->reboot_nb); + if (!ret) { + /* Wait only up to 75% of the advertised timeout */ + adj_timeout_ms = mult_frac(timeout_ms, 3, 4); + INIT_DELAYED_WORK(&sc->forceful_work, + scmi_forceful_work_func); + schedule_delayed_work(&sc->forceful_work, + msecs_to_jiffies(adj_timeout_ms)); + } else { + /* Carry on best effort even without a reboot notifier */ + dev_warn(sc->dev, + "Cannot register reboot notifier !\n"); + } + } + + dev_dbg(sc->dev, + "Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n", + sc->required_transition, timeout_ms, adj_timeout_ms); + + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + /* + * When triggered early at boot-time the 'orderly' call will + * partially fail due to the lack of userspace itself, but + * the force=true argument will start anyway a successful + * forced shutdown. + */ + orderly_poweroff(true); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + orderly_reboot(); + break; + default: + break; + } +} + +/** + * scmi_userspace_notifier - Notifier callback to act on SystemPower + * Notifications + * @nb: Reference to the related notifier block + * @event: The SystemPower notification event id + * @data: The SystemPower event report + * + * This callback is in charge of decoding the received SystemPower report + * and act accordingly triggering a graceful or forceful system transition. + * + * Note that once a valid SCMI SystemPower event starts being served, any + * other following SystemPower notification received from the same SCMI + * instance (handle) will be ignored. + * + * Return: NOTIFY_OK once a valid SystemPower event has been successfully + * processed. + */ +static int scmi_userspace_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct scmi_system_power_state_notifier_report *er = data; + struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb); + + if (er->system_state >= SCMI_SYSTEM_POWERUP) { + dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n", + er->system_state); + return NOTIFY_DONE; + } + + if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) { + dev_err(sc->dev, "Ignoring forceful notification.\n"); + return NOTIFY_DONE; + } + + /* + * Bail out if system is already shutting down or an SCMI SystemPower + * requested is already being served. + */ + if (system_state > SYSTEM_RUNNING) + return NOTIFY_DONE; + mutex_lock(&sc->state_mtx); + if (sc->state != SCMI_SYSPOWER_IDLE) { + dev_dbg(sc->dev, + "Transition already in progress...ignore.\n"); + mutex_unlock(&sc->state_mtx); + return NOTIFY_DONE; + } + sc->state = SCMI_SYSPOWER_IN_PROGRESS; + mutex_unlock(&sc->state_mtx); + + sc->required_transition = er->system_state; + + /* Leaving a trace in logs of who triggered the shutdown/reboot. */ + dev_info(sc->dev, "Serving shutdown/reboot request: %d\n", + sc->required_transition); + + scmi_request_graceful_transition(sc, er->timeout); + + return NOTIFY_OK; +} + +static int scmi_syspower_probe(struct scmi_device *sdev) +{ + int ret; + struct scmi_syspower_conf *sc; + struct scmi_handle *handle = sdev->handle; + + if (!handle) + return -ENODEV; + + ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM); + if (ret) + return ret; + + sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL); + if (!sc) + return -ENOMEM; + + sc->state = SCMI_SYSPOWER_IDLE; + mutex_init(&sc->state_mtx); + sc->required_transition = SCMI_SYSTEM_MAX; + sc->userspace_nb.notifier_call = &scmi_userspace_notifier; + sc->dev = &sdev->dev; + + return handle->notify_ops->devm_event_notifier_register(sdev, + SCMI_PROTOCOL_SYSTEM, + SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER, + NULL, &sc->userspace_nb); +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_SYSTEM, "syspower" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_system_power_driver = { + .name = "scmi-system-power", + .probe = scmi_syspower_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_system_power_driver); + +MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>"); +MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c index 220e399118ad..9383d7584539 100644 --- a/drivers/firmware/arm_scmi/system.c +++ b/drivers/firmware/arm_scmi/system.c @@ -27,10 +27,12 @@ struct scmi_system_power_state_notifier_payld { __le32 agent_id; __le32 flags; __le32 system_state; + __le32 timeout; }; struct scmi_system_info { u32 version; + bool graceful_timeout_supported; }; static int scmi_system_request_notify(const struct scmi_protocol_handle *ph, @@ -72,17 +74,27 @@ scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph, const void *payld, size_t payld_sz, void *report, u32 *src_id) { + size_t expected_sz; const struct scmi_system_power_state_notifier_payld *p = payld; struct scmi_system_power_state_notifier_report *r = report; + struct scmi_system_info *pinfo = ph->get_priv(ph); + expected_sz = pinfo->graceful_timeout_supported ? + sizeof(*p) : sizeof(*p) - sizeof(__le32); if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER || - sizeof(*p) != payld_sz) + payld_sz != expected_sz) return NULL; r->timestamp = timestamp; r->agent_id = le32_to_cpu(p->agent_id); r->flags = le32_to_cpu(p->flags); r->system_state = le32_to_cpu(p->system_state); + if (pinfo->graceful_timeout_supported && + r->system_state == SCMI_SYSTEM_SHUTDOWN && + SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags)) + r->timeout = le32_to_cpu(p->timeout); + else + r->timeout = 0x00; *src_id = 0; return r; @@ -129,6 +141,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph) return -ENOMEM; pinfo->version = version; + if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2) + pinfo->graceful_timeout_supported = true; + return ph->set_priv(ph, pinfo); } diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index ddf0b9ff9e15..435d0e2658a4 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info) info->firmware_version = le32_to_cpu(caps.platform_version); } /* Ignore error if not implemented */ - if (scpi_info->is_legacy && ret == -EOPNOTSUPP) + if (info->is_legacy && ret == -EOPNOTSUPP) return 0; return ret; @@ -913,13 +913,14 @@ static int scpi_probe(struct platform_device *pdev) struct resource res; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + struct scpi_drvinfo *scpi_drvinfo; - scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); - if (!scpi_info) + scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL); + if (!scpi_drvinfo) return -ENOMEM; if (of_match_device(legacy_scpi_of_match, &pdev->dev)) - scpi_info->is_legacy = true; + scpi_drvinfo->is_legacy = true; count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); if (count < 0) { @@ -927,19 +928,19 @@ static int scpi_probe(struct platform_device *pdev) return -ENODEV; } - scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), - GFP_KERNEL); - if (!scpi_info->channels) + scpi_drvinfo->channels = + devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL); + if (!scpi_drvinfo->channels) return -ENOMEM; - ret = devm_add_action(dev, scpi_free_channels, scpi_info); + ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo); if (ret) return ret; - for (; scpi_info->num_chans < count; scpi_info->num_chans++) { + for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) { resource_size_t size; - int idx = scpi_info->num_chans; - struct scpi_chan *pchan = scpi_info->channels + idx; + int idx = scpi_drvinfo->num_chans; + struct scpi_chan *pchan = scpi_drvinfo->channels + idx; struct mbox_client *cl = &pchan->cl; struct device_node *shmem = of_parse_phandle(np, "shmem", idx); @@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev) return ret; } - scpi_info->commands = scpi_std_commands; + scpi_drvinfo->commands = scpi_std_commands; - platform_set_drvdata(pdev, scpi_info); + platform_set_drvdata(pdev, scpi_drvinfo); - if (scpi_info->is_legacy) { + if (scpi_drvinfo->is_legacy) { /* Replace with legacy variants */ scpi_ops.clk_set_val = legacy_scpi_clk_set_val; - scpi_info->commands = scpi_legacy_commands; + scpi_drvinfo->commands = scpi_legacy_commands; /* Fill priority bitmap */ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++) set_bit(legacy_hpriority_cmds[idx], - scpi_info->cmd_priority); + scpi_drvinfo->cmd_priority); } - ret = scpi_init_versions(scpi_info); + scpi_info = scpi_drvinfo; + + ret = scpi_init_versions(scpi_drvinfo); if (ret) { dev_err(dev, "incorrect or no SCP firmware found\n"); + scpi_info = NULL; return ret; } - if (scpi_info->is_legacy && !scpi_info->protocol_version && - !scpi_info->firmware_version) + if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version && + !scpi_drvinfo->firmware_version) dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n"); else dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", FIELD_GET(PROTO_REV_MAJOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(PROTO_REV_MINOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(FW_REV_MAJOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_MINOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_PATCH_MASK, - scpi_info->firmware_version)); - scpi_info->scpi_ops = &scpi_ops; + scpi_drvinfo->firmware_version)); + + scpi_drvinfo->scpi_ops = &scpi_ops; - return devm_of_platform_populate(dev); + ret = devm_of_platform_populate(dev); + if (ret) + scpi_info = NULL; + + return ret; } static const struct of_device_id scpi_of_match[] = { diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c index 73089a24f04b..ceae84c19d22 100644 --- a/drivers/firmware/efi/reboot.c +++ b/drivers/firmware/efi/reboot.c @@ -6,7 +6,7 @@ #include <linux/efi.h> #include <linux/reboot.h> -static void (*orig_pm_power_off)(void); +static struct sys_off_handler *efi_sys_off_handler; int efi_reboot_quirk_mode = -1; @@ -51,15 +51,11 @@ bool __weak efi_poweroff_required(void) return false; } -static void efi_power_off(void) +static int efi_power_off(struct sys_off_data *data) { efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); - /* - * The above call should not return, if it does fall back to - * the original power off method (typically ACPI poweroff). - */ - if (orig_pm_power_off) - orig_pm_power_off(); + + return NOTIFY_DONE; } static int __init efi_shutdown_init(void) @@ -68,8 +64,13 @@ static int __init efi_shutdown_init(void) return -ENODEV; if (efi_poweroff_required()) { - orig_pm_power_off = pm_power_off; - pm_power_off = efi_power_off; + /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */ + efi_sys_off_handler = + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE + 1, + efi_power_off, NULL); + if (IS_ERR(efi_sys_off_handler)) + return PTR_ERR(efi_sys_off_handler); } return 0; diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c index 1829ba220576..9f918b9e6f8f 100644 --- a/drivers/firmware/qcom_scm-legacy.c +++ b/drivers/firmware/qcom_scm-legacy.c @@ -120,6 +120,9 @@ static void __scm_legacy_do(const struct arm_smccc_args *smc, /** * scm_legacy_call() - Sends a command to the SCM and waits for the command to * finish processing. + * @dev: device + * @desc: descriptor structure containing arguments and return values + * @res: results from SMC call * * A note on cache maintenance: * Note that any buffers that are expected to be accessed by the secure world @@ -211,6 +214,7 @@ out: /** * scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments * and 3 return values + * @unused: device, legacy argument, not used, can be NULL * @desc: SCM call descriptor containing arguments * @res: SCM call return values * diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 3163660fa8e2..cdbfe54c8146 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -7,6 +7,7 @@ #include <linux/cpumask.h> #include <linux/export.h> #include <linux/dma-mapping.h> +#include <linux/interconnect.h> #include <linux/module.h> #include <linux/types.h> #include <linux/qcom_scm.h> @@ -31,8 +32,13 @@ struct qcom_scm { struct clk *core_clk; struct clk *iface_clk; struct clk *bus_clk; + struct icc_path *path; struct reset_controller_dev reset; + /* control access to the interconnect path */ + struct mutex scm_bw_lock; + int scm_vote_count; + u64 dload_mode_addr; }; @@ -99,6 +105,42 @@ static void qcom_scm_clk_disable(void) clk_disable_unprepare(__scm->bus_clk); } +static int qcom_scm_bw_enable(void) +{ + int ret = 0; + + if (!__scm->path) + return 0; + + if (IS_ERR(__scm->path)) + return -EINVAL; + + mutex_lock(&__scm->scm_bw_lock); + if (!__scm->scm_vote_count) { + ret = icc_set_bw(__scm->path, 0, UINT_MAX); + if (ret < 0) { + dev_err(__scm->dev, "failed to set bandwidth request\n"); + goto err_bw; + } + } + __scm->scm_vote_count++; +err_bw: + mutex_unlock(&__scm->scm_bw_lock); + + return ret; +} + +static void qcom_scm_bw_disable(void) +{ + if (IS_ERR_OR_NULL(__scm->path)) + return; + + mutex_lock(&__scm->scm_bw_lock); + if (__scm->scm_vote_count-- == 1) + icc_set_bw(__scm->path, 0, 0); + mutex_unlock(&__scm->scm_bw_lock); +} + enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; static DEFINE_SPINLOCK(scm_query_lock); @@ -444,10 +486,15 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, if (ret) goto out; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + desc.args[1] = mdata_phys; ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); out: @@ -507,7 +554,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) if (ret) return ret; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -537,7 +589,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral) if (ret) return ret; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -566,8 +623,13 @@ int qcom_scm_pas_shutdown(u32 peripheral) if (ret) return ret; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -1277,8 +1339,15 @@ static int qcom_scm_probe(struct platform_device *pdev) if (ret < 0) return ret; + mutex_init(&scm->scm_bw_lock); + clks = (unsigned long)of_device_get_match_data(&pdev->dev); + scm->path = devm_of_icc_get(&pdev->dev, NULL); + if (IS_ERR(scm->path)) + return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), + "failed to acquire interconnect path\n"); + scm->core_clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(scm->core_clk)) { if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) @@ -1337,7 +1406,7 @@ static int qcom_scm_probe(struct platform_device *pdev) /* * If requested enable "download mode", from this point on warmboot - * will cause the the boot stages to enter download mode, unless + * will cause the boot stages to enter download mode, unless * disabled below by a clean shutdown/reboot. */ if (download_mode) diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index 2bfbb05f7d89..1f276f108cc9 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -34,21 +34,59 @@ #include <linux/screen_info.h> #include <linux/sysfb.h> +static struct platform_device *pd; +static DEFINE_MUTEX(disable_lock); +static bool disabled; + +static bool sysfb_unregister(void) +{ + if (IS_ERR_OR_NULL(pd)) + return false; + + platform_device_unregister(pd); + pd = NULL; + + return true; +} + +/** + * sysfb_disable() - disable the Generic System Framebuffers support + * + * This disables the registration of system framebuffer devices that match the + * generic drivers that make use of the system framebuffer set up by firmware. + * + * It also unregisters a device if this was already registered by sysfb_init(). + * + * Context: The function can sleep. A @disable_lock mutex is acquired to serialize + * against sysfb_init(), that registers a system framebuffer device. + */ +void sysfb_disable(void) +{ + mutex_lock(&disable_lock); + sysfb_unregister(); + disabled = true; + mutex_unlock(&disable_lock); +} +EXPORT_SYMBOL_GPL(sysfb_disable); + static __init int sysfb_init(void) { struct screen_info *si = &screen_info; struct simplefb_platform_data mode; - struct platform_device *pd; const char *name; bool compatible; - int ret; + int ret = 0; + + mutex_lock(&disable_lock); + if (disabled) + goto unlock_mutex; /* try to create a simple-framebuffer device */ compatible = sysfb_parse_mode(si, &mode); if (compatible) { - ret = sysfb_create_simplefb(si, &mode); - if (!ret) - return 0; + pd = sysfb_create_simplefb(si, &mode); + if (!IS_ERR(pd)) + goto unlock_mutex; } /* if the FB is incompatible, create a legacy framebuffer device */ @@ -60,8 +98,10 @@ static __init int sysfb_init(void) name = "platform-framebuffer"; pd = platform_device_alloc(name, 0); - if (!pd) - return -ENOMEM; + if (!pd) { + ret = -ENOMEM; + goto unlock_mutex; + } sysfb_apply_efi_quirks(pd); @@ -73,9 +113,11 @@ static __init int sysfb_init(void) if (ret) goto err; - return 0; + goto unlock_mutex; err: platform_device_put(pd); +unlock_mutex: + mutex_unlock(&disable_lock); return ret; } diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c index bda8712bfd8c..a353e27f83f5 100644 --- a/drivers/firmware/sysfb_simplefb.c +++ b/drivers/firmware/sysfb_simplefb.c @@ -57,8 +57,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si, return false; } -__init int sysfb_create_simplefb(const struct screen_info *si, - const struct simplefb_platform_data *mode) +__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode) { struct platform_device *pd; struct resource res; @@ -76,7 +76,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si, base |= (u64)si->ext_lfb_base << 32; if (!base || (u64)(resource_size_t)base != base) { printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } /* @@ -93,7 +93,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si, length = mode->height * mode->stride; if (length > size) { printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } length = PAGE_ALIGN(length); @@ -104,11 +104,11 @@ __init int sysfb_create_simplefb(const struct screen_info *si, res.start = base; res.end = res.start + length - 1; if (res.end <= res.start) - return -EINVAL; + return ERR_PTR(-EINVAL); pd = platform_device_alloc("simple-framebuffer", 0); if (!pd) - return -ENOMEM; + return ERR_PTR(-ENOMEM); sysfb_apply_efi_quirks(pd); @@ -124,10 +124,10 @@ __init int sysfb_create_simplefb(const struct screen_info *si, if (ret) goto err_put_device; - return 0; + return pd; err_put_device: platform_device_put(pd); - return ret; + return ERR_PTR(ret); } diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index fd89899aeeed..0c440afd5224 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -474,7 +474,7 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp, mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0; dentry = debugfs_create_file(name, mode, parent, bpmp, &bpmp_debug_fops); - if (!dentry) { + if (IS_ERR(dentry)) { err = -ENOMEM; goto out; } @@ -725,7 +725,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, if (t & DEBUGFS_S_ISDIR) { dentry = debugfs_create_dir(name, parent); - if (!dentry) + if (IS_ERR(dentry)) return -ENOMEM; err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1); if (err < 0) @@ -738,7 +738,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, dentry = debugfs_create_file(name, mode, parent, bpmp, &debugfs_fops); - if (!dentry) + if (IS_ERR(dentry)) return -ENOMEM; } } @@ -788,11 +788,11 @@ int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp) return 0; root = debugfs_create_dir("bpmp", NULL); - if (!root) + if (IS_ERR(root)) return -ENOMEM; bpmp->debugfs_mirror = debugfs_create_dir("debug", root); - if (!bpmp->debugfs_mirror) { + if (IS_ERR(bpmp->debugfs_mirror)) { err = -ENOMEM; goto out; } diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 5654c5e9862b..037db21de510 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -201,7 +201,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, int err; if (data && size > 0) - memcpy(data, channel->ib->data, size); + memcpy_fromio(data, channel->ib->data, size); err = tegra_bpmp_ack_response(channel); if (err < 0) @@ -245,7 +245,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, channel->ob->flags = flags; if (data && size > 0) - memcpy(channel->ob->data, data, size); + memcpy_toio(channel->ob->data, data, size); return tegra_bpmp_post_request(channel); } @@ -420,7 +420,7 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code, channel->ob->code = code; if (data && size > 0) - memcpy(channel->ob->data, data, size); + memcpy_toio(channel->ob->data, data, size); err = tegra_bpmp_post_response(channel); if (WARN_ON(err < 0)) diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 7977a494a651..d1f652802181 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -2,7 +2,7 @@ /* * Xilinx Zynq MPSoC Firmware layer * - * Copyright (C) 2014-2021 Xilinx, Inc. + * Copyright (C) 2014-2022 Xilinx, Inc. * * Michal Simek <michal.simek@xilinx.com> * Davorin Mista <davorin.mista@aggios.com> @@ -340,6 +340,20 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, static u32 pm_api_version; static u32 pm_tz_version; +int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) +{ + int ret; + + ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0, + NULL); + if (!ret) + return ret; + + /* try old implementation as fallback strategy if above fails */ + return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num, + reset, NULL); +} + /** * zynqmp_pm_get_api_version() - Get version number of PMU PM firmware * @version: Returned version value diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c index b2c90bdd39d0..52d7b8d99170 100644 --- a/drivers/gpio/gpio-msc313.c +++ b/drivers/gpio/gpio-msc313.c @@ -550,15 +550,12 @@ static struct irq_chip msc313_gpio_irqchip = { * so we need to provide the fwspec. Essentially gpiochip_populate_parent_fwspec_twocell * that puts GIC_SPI into the first cell. */ -static void *msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) +static int msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 3; @@ -566,7 +563,7 @@ static void *msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc, fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; - return fwspec; + return 0; } static int msc313e_gpio_child_to_parent_hwirq(struct gpio_chip *chip, diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 08bc52c3cdcb..ecd7d169470b 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = { .reg_bits = 8, .val_bits = 8, + .use_single_read = true, + .use_single_write = true, + .readable_reg = pca953x_readable_register, .writeable_reg = pca953x_writeable_register, .volatile_reg = pca953x_volatile_register, @@ -906,15 +909,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert) { DECLARE_BITMAP(val, MAX_LINE); + u8 regaddr; int ret; - ret = regcache_sync_region(chip->regmap, chip->regs->output, - chip->regs->output + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); + ret = regcache_sync_region(chip->regmap, regaddr, + regaddr + NBANK(chip) - 1); if (ret) goto out; - ret = regcache_sync_region(chip->regmap, chip->regs->direction, - chip->regs->direction + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); + ret = regcache_sync_region(chip->regmap, regaddr, + regaddr + NBANK(chip) - 1); if (ret) goto out; @@ -1127,14 +1133,14 @@ static int pca953x_regcache_sync(struct device *dev) * sync these registers first and only then sync the rest. */ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); - ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); + ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret); return ret; } regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); - ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); + ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret); return ret; @@ -1144,7 +1150,7 @@ static int pca953x_regcache_sync(struct device *dev) if (chip->driver_data & PCA_PCAL) { regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0); ret = regcache_sync_region(chip->regmap, regaddr, - regaddr + NBANK(chip)); + regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync INT latch registers: %d\n", ret); @@ -1153,7 +1159,7 @@ static int pca953x_regcache_sync(struct device *dev) regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0); ret = regcache_sync_region(chip->regmap, regaddr, - regaddr + NBANK(chip)); + regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync INT mask registers: %d\n", ret); diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 98109839102f..1020c2feb249 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -991,28 +991,22 @@ static struct configfs_attribute *gpio_sim_device_config_attrs[] = { }; struct gpio_sim_chip_name_ctx { - struct gpio_sim_device *dev; + struct fwnode_handle *swnode; char *page; }; static int gpio_sim_emit_chip_name(struct device *dev, void *data) { struct gpio_sim_chip_name_ctx *ctx = data; - struct fwnode_handle *swnode; - struct gpio_sim_bank *bank; /* This would be the sysfs device exported in /sys/class/gpio. */ if (dev->class) return 0; - swnode = dev_fwnode(dev); + if (device_match_fwnode(dev, ctx->swnode)) + return sprintf(ctx->page, "%s\n", dev_name(dev)); - list_for_each_entry(bank, &ctx->dev->bank_list, siblings) { - if (bank->swnode == swnode) - return sprintf(ctx->page, "%s\n", dev_name(dev)); - } - - return -ENODATA; + return 0; } static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item, @@ -1020,7 +1014,7 @@ static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item, { struct gpio_sim_bank *bank = to_gpio_sim_bank(item); struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank); - struct gpio_sim_chip_name_ctx ctx = { dev, page }; + struct gpio_sim_chip_name_ctx ctx = { bank->swnode, page }; int ret; mutex_lock(&dev->lock); diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index ff2d2a1f9c73..e4fb4cb38a0f 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -443,15 +443,12 @@ static int tegra_gpio_child_to_parent_hwirq(struct gpio_chip *chip, return 0; } -static void *tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = chip->irq.parent_domain->fwnode; fwspec->param_count = 3; @@ -459,7 +456,7 @@ static void *tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip, fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; - return fwspec; + return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index de28a68daea0..54d9fa7da9c1 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -621,16 +621,13 @@ static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain, return 0; } -static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { struct tegra_gpio *gpio = gpiochip_get_data(chip); - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = chip->irq.parent_domain->fwnode; fwspec->param_count = 3; @@ -638,7 +635,7 @@ static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip, fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; - return fwspec; + return 0; } static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip, diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index 9f66deab46ea..cc62c6e64103 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -15,8 +15,6 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/spinlock.h> -#include <asm-generic/msi.h> - #define GPIO_RX_DAT 0x0 #define GPIO_TX_SET 0x8 @@ -408,18 +406,15 @@ static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, return 0; } -static void *thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - msi_alloc_info_t *info; - - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return NULL; + msi_alloc_info_t *info = &gfwspec->msiinfo; info->hwirq = parent_hwirq; - return info; + return 0; } static int thunderx_gpio_probe(struct pci_dev *pdev, diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 23cddb265a0d..9db42f6a2043 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -19,6 +19,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_irq.h> +#include <linux/pinctrl/consumer.h> #define VF610_GPIO_PER_PORT 32 diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c index e6534ea1eaa7..5e108ba9956a 100644 --- a/drivers/gpio/gpio-visconti.c +++ b/drivers/gpio/gpio-visconti.c @@ -103,15 +103,12 @@ static int visconti_gpio_child_to_parent_hwirq(struct gpio_chip *gc, return -EINVAL; } -static void *visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = chip->irq.parent_domain->fwnode; fwspec->param_count = 3; @@ -119,7 +116,7 @@ static void *visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip, fwspec->param[1] = parent_hwirq; fwspec->param[2] = parent_type; - return fwspec; + return 0; } static int visconti_gpio_probe(struct platform_device *pdev) diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index b6d3a57e27ed..7f8e2fed2988 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v) const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5); map[index] &= ~(0xFFFFFFFFul << offset); - map[index] |= v << offset; + map[index] |= (unsigned long)v << offset; } static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch) diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index f5aa5f93342a..b26e64338376 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -421,6 +421,10 @@ out_free_lh: * @work: the worker that implements software debouncing * @sw_debounced: flag indicating if the software debouncer is active * @level: the current debounced physical level of the line + * @hdesc: the Hardware Timestamp Engine (HTE) descriptor + * @raw_level: the line level at the time of event + * @total_discard_seq: the running counter of the discarded events + * @last_seqno: the last sequence number before debounce period expires */ struct line { struct gpio_desc *desc; @@ -1460,11 +1464,12 @@ static ssize_t linereq_read(struct file *file, static void linereq_free(struct linereq *lr) { unsigned int i; - bool hte; + bool hte = false; for (i = 0; i < lr->num_lines; i++) { - hte = !!test_bit(FLAG_EVENT_CLOCK_HTE, - &lr->lines[i].desc->flags); + if (lr->lines[i].desc) + hte = !!test_bit(FLAG_EVENT_CLOCK_HTE, + &lr->lines[i].desc->flags); edge_detector_stop(&lr->lines[i], hte); if (lr->lines[i].desc) gpiod_free(lr->lines[i].desc); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 9535f48e18d1..68d9f95d7799 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1107,7 +1107,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, irq_hw_number_t hwirq; unsigned int type = IRQ_TYPE_NONE; struct irq_fwspec *fwspec = data; - void *parent_arg; + union gpio_irq_fwspec gpio_parent_fwspec = {}; unsigned int parent_hwirq; unsigned int parent_type; struct gpio_irq_chip *girq = &gc->irq; @@ -1147,14 +1147,15 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, irq_set_probe(irq); /* This parent only handles asserted level IRQs */ - parent_arg = girq->populate_parent_alloc_arg(gc, parent_hwirq, parent_type); - if (!parent_arg) - return -ENOMEM; + ret = girq->populate_parent_alloc_arg(gc, &gpio_parent_fwspec, + parent_hwirq, parent_type); + if (ret) + return ret; chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n", irq, parent_hwirq); irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key); - ret = irq_domain_alloc_irqs_parent(d, irq, 1, parent_arg); + ret = irq_domain_alloc_irqs_parent(d, irq, 1, &gpio_parent_fwspec); /* * If the parent irqdomain is msi, the interrupts have already * been allocated, so the EEXIST is good. @@ -1166,7 +1167,6 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, "failed to allocate parent hwirq %d for hwirq %lu\n", parent_hwirq, hwirq); - kfree(parent_arg); return ret; } @@ -1181,15 +1181,18 @@ static void gpiochip_hierarchy_setup_domain_ops(struct irq_domain_ops *ops) ops->activate = gpiochip_irq_domain_activate; ops->deactivate = gpiochip_irq_domain_deactivate; ops->alloc = gpiochip_hierarchy_irq_domain_alloc; - ops->free = irq_domain_free_irqs_common; /* - * We only allow overriding the translate() function for + * We only allow overriding the translate() and free() functions for * hierarchical chips, and this should only be done if the user - * really need something other than 1:1 translation. + * really need something other than 1:1 translation for translate() + * callback and free if user wants to free up any resources which + * were allocated during callbacks, for example populate_parent_alloc_arg. */ if (!ops->translate) ops->translate = gpiochip_hierarchy_irq_domain_translate; + if (!ops->free) + ops->free = irq_domain_free_irqs_common; } static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc) @@ -1230,34 +1233,28 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc) return !!gc->irq.parent_domain; } -void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) +int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 2; fwspec->param[0] = parent_hwirq; fwspec->param[1] = parent_type; - return fwspec; + return 0; } EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell); -void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, - unsigned int parent_hwirq, - unsigned int parent_type) +int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { - struct irq_fwspec *fwspec; - - fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 4; @@ -1266,7 +1263,7 @@ void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, fwspec->param[2] = 0; fwspec->param[3] = parent_type; - return fwspec; + return 0; } EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e88c497fa010..f65656df3619 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -256,7 +256,6 @@ config DRM_AMDGPU select HWMON select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE - select DRM_BUDDY help Choose this option if you have a recent AMD Radeon graphics card. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 1f8161cd507f..3b1c675aba34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -714,7 +714,8 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, { bool all_hub = false; - if (adev->family == AMDGPU_FAMILY_AI) + if (adev->family == AMDGPU_FAMILY_AI || + adev->family == AMDGPU_FAMILY_RV) all_hub = true; return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 6b6d46e29e6e..4608599ba6bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1364,16 +1364,10 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdkfd_process_info *process_info = vm->process_info; - struct amdgpu_bo *pd = vm->root.bo; if (!process_info) return; - /* Release eviction fence from PD */ - amdgpu_bo_reserve(pd, false); - amdgpu_bo_fence(pd, NULL, false); - amdgpu_bo_unreserve(pd); - /* Update process info */ mutex_lock(&process_info->lock); process_info->n_vms--; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 714178f1b6c6..2168163aad2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -40,7 +40,7 @@ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu) { struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list, rhead); - + mutex_destroy(&list->bo_list_mutex); kvfree(list); } @@ -136,6 +136,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, trace_amdgpu_cs_bo_status(list->num_entries, total_size); + mutex_init(&list->bo_list_mutex); *result = list; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 529d52a204cf..9caea1688fc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -47,6 +47,10 @@ struct amdgpu_bo_list { struct amdgpu_bo *oa_obj; unsigned first_userptr; unsigned num_entries; + + /* Protect access during command submission. + */ + struct mutex bo_list_mutex; }; int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b28af04b0c3e..d8f1335bc68f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -519,6 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, return r; } + mutex_lock(&p->bo_list->bo_list_mutex); + /* One for TTM and one for the CS job */ amdgpu_bo_list_for_each_entry(e, p->bo_list) e->tv.num_shared = 2; @@ -651,6 +653,7 @@ out_free_user_pages: kvfree(e->user_pages); e->user_pages = NULL; } + mutex_unlock(&p->bo_list->bo_list_mutex); } return r; } @@ -690,9 +693,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, { unsigned i; - if (error && backoff) + if (error && backoff) { ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); + mutex_unlock(&parser->bo_list->bo_list_mutex); + } for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj); @@ -832,12 +837,16 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) continue; r = amdgpu_vm_bo_update(adev, bo_va, false); - if (r) + if (r) { + mutex_unlock(&p->bo_list->bo_list_mutex); return r; + } r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update); - if (r) + if (r) { + mutex_unlock(&p->bo_list->bo_list_mutex); return r; + } } r = amdgpu_vm_handle_moved(adev, vm); @@ -1278,6 +1287,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock); + mutex_unlock(&p->bo_list->bo_list_mutex); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 625424f3082b..58df107e3beb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5164,7 +5164,7 @@ int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true); /* disable ras on ALL IPs */ if (!need_emergency_restart && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 17c9bbe0cbc5..4dfd6724b3ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1528,6 +1528,21 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, stime, etime, mode); } +static bool +amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_fb_helper *fb_helper = dev->fb_helper; + + if (!fb_helper || !fb_helper->buffer) + return false; + + if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj) + return false; + + return true; +} + int amdgpu_display_suspend_helper(struct amdgpu_device *adev) { struct drm_device *dev = adev_to_drm(adev); @@ -1563,10 +1578,12 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) continue; } robj = gem_to_amdgpu_bo(fb->obj[0]); - r = amdgpu_bo_reserve(robj, true); - if (r == 0) { - amdgpu_bo_unpin(robj); - amdgpu_bo_unreserve(robj); + if (!amdgpu_display_robj_is_fb(adev, robj)) { + r = amdgpu_bo_reserve(robj, true); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index b4cf8717f554..89011bae7588 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -320,6 +320,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (!amdgpu_device_has_dc_support(adev)) { if (!adev->enable_virtual_display) /* Disable vblank IRQs aggressively for power-saving */ + /* XXX: can this be enabled for DC? */ adev_to_drm(adev)->vblank_disable_immediate = true; r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 6546552e596c..acfa207cf970 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -30,15 +30,12 @@ #include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_range_manager.h> -#include "amdgpu_vram_mgr.h" - /* state back for walking over vram_mgr and gtt_mgr allocations */ struct amdgpu_res_cursor { uint64_t start; uint64_t size; uint64_t remaining; - void *node; - uint32_t mem_type; + struct drm_mm_node *node; }; /** @@ -55,63 +52,27 @@ static inline void amdgpu_res_first(struct ttm_resource *res, uint64_t start, uint64_t size, struct amdgpu_res_cursor *cur) { - struct drm_buddy_block *block; - struct list_head *head, *next; struct drm_mm_node *node; - if (!res) - goto fallback; - - BUG_ON(start + size > res->num_pages << PAGE_SHIFT); - - cur->mem_type = res->mem_type; - - switch (cur->mem_type) { - case TTM_PL_VRAM: - head = &to_amdgpu_vram_mgr_resource(res)->blocks; - - block = list_first_entry_or_null(head, - struct drm_buddy_block, - link); - if (!block) - goto fallback; - - while (start >= amdgpu_vram_mgr_block_size(block)) { - start -= amdgpu_vram_mgr_block_size(block); - - next = block->link.next; - if (next != head) - block = list_entry(next, struct drm_buddy_block, link); - } - - cur->start = amdgpu_vram_mgr_block_start(block) + start; - cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size); - cur->remaining = size; - cur->node = block; - break; - case TTM_PL_TT: - node = to_ttm_range_mgr_node(res)->mm_nodes; - while (start >= node->size << PAGE_SHIFT) - start -= node++->size << PAGE_SHIFT; - - cur->start = (node->start << PAGE_SHIFT) + start; - cur->size = min((node->size << PAGE_SHIFT) - start, size); + if (!res || res->mem_type == TTM_PL_SYSTEM) { + cur->start = start; + cur->size = size; cur->remaining = size; - cur->node = node; - break; - default: - goto fallback; + cur->node = NULL; + WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); + return; } - return; + BUG_ON(start + size > res->num_pages << PAGE_SHIFT); -fallback: - cur->start = start; - cur->size = size; + node = to_ttm_range_mgr_node(res)->mm_nodes; + while (start >= node->size << PAGE_SHIFT) + start -= node++->size << PAGE_SHIFT; + + cur->start = (node->start << PAGE_SHIFT) + start; + cur->size = min((node->size << PAGE_SHIFT) - start, size); cur->remaining = size; - cur->node = NULL; - WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); - return; + cur->node = node; } /** @@ -124,9 +85,7 @@ fallback: */ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) { - struct drm_buddy_block *block; - struct drm_mm_node *node; - struct list_head *next; + struct drm_mm_node *node = cur->node; BUG_ON(size > cur->remaining); @@ -140,27 +99,9 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) return; } - switch (cur->mem_type) { - case TTM_PL_VRAM: - block = cur->node; - - next = block->link.next; - block = list_entry(next, struct drm_buddy_block, link); - - cur->node = block; - cur->start = amdgpu_vram_mgr_block_start(block); - cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining); - break; - case TTM_PL_TT: - node = cur->node; - - cur->node = ++node; - cur->start = node->start << PAGE_SHIFT; - cur->size = min(node->size << PAGE_SHIFT, cur->remaining); - break; - default: - return; - } + cur->node = ++node; + cur->start = node->start << PAGE_SHIFT; + cur->size = min(node->size << PAGE_SHIFT, cur->remaining); } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6a70818039dd..9120ae80ef52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -26,7 +26,6 @@ #include <linux/dma-direction.h> #include <drm/gpu_scheduler.h> -#include "amdgpu_vram_mgr.h" #include "amdgpu.h" #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) @@ -39,6 +38,15 @@ #define AMDGPU_POISON 0xd0bed0be +struct amdgpu_vram_mgr { + struct ttm_resource_manager manager; + struct drm_mm mm; + spinlock_t lock; + struct list_head reservations_pending; + struct list_head reserved_pages; + atomic64_t vis_usage; +}; + struct amdgpu_gtt_mgr { struct ttm_resource_manager manager; struct drm_mm mm; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 576849e95296..108e8e8a1a36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -496,7 +496,8 @@ static int amdgpu_vkms_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = YRES_MAX; adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 49e4092f447f..0a7611648573 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -32,10 +32,8 @@ #include "atom.h" struct amdgpu_vram_reservation { - u64 start; - u64 size; - struct list_head allocated; - struct list_head blocks; + struct list_head node; + struct drm_mm_node mm_node; }; static inline struct amdgpu_vram_mgr * @@ -188,18 +186,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = { }; /** - * amdgpu_vram_mgr_vis_size - Calculate visible block size + * amdgpu_vram_mgr_vis_size - Calculate visible node size * * @adev: amdgpu_device pointer - * @block: DRM BUDDY block structure + * @node: MM node structure * - * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM + * Calculate how many bytes of the MM node are inside visible VRAM */ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, - struct drm_buddy_block *block) + struct drm_mm_node *node) { - u64 start = amdgpu_vram_mgr_block_start(block); - u64 end = start + amdgpu_vram_mgr_block_size(block); + uint64_t start = node->start << PAGE_SHIFT; + uint64_t end = (node->size + node->start) << PAGE_SHIFT; if (start >= adev->gmc.visible_vram_size) return 0; @@ -220,9 +218,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_resource *res = bo->tbo.resource; - struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); - struct drm_buddy_block *block; - u64 usage = 0; + unsigned pages = res->num_pages; + struct drm_mm_node *mm; + u64 usage; if (amdgpu_gmc_vram_full_visible(&adev->gmc)) return amdgpu_bo_size(bo); @@ -230,8 +228,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) return 0; - list_for_each_entry(block, &vres->blocks, link) - usage += amdgpu_vram_mgr_vis_size(adev, block); + mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0]; + for (usage = 0; pages; pages -= mm->size, mm++) + usage += amdgpu_vram_mgr_vis_size(adev, mm); return usage; } @@ -241,30 +240,23 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - struct drm_buddy *mm = &mgr->mm; + struct drm_mm *mm = &mgr->mm; struct amdgpu_vram_reservation *rsv, *temp; - struct drm_buddy_block *block; uint64_t vis_usage; - list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { - if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, - rsv->size, mm->chunk_size, &rsv->allocated, - DRM_BUDDY_RANGE_ALLOCATION)) - continue; - - block = amdgpu_vram_mgr_first_block(&rsv->allocated); - if (!block) + list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) { + if (drm_mm_reserve_node(mm, &rsv->mm_node)) continue; dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n", - rsv->start, rsv->size); + rsv->mm_node.start, rsv->mm_node.size); - vis_usage = amdgpu_vram_mgr_vis_size(adev, block); + vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node); atomic64_add(vis_usage, &mgr->vis_usage); spin_lock(&man->bdev->lru_lock); - man->usage += rsv->size; + man->usage += rsv->mm_node.size << PAGE_SHIFT; spin_unlock(&man->bdev->lru_lock); - list_move(&rsv->blocks, &mgr->reserved_pages); + list_move(&rsv->node, &mgr->reserved_pages); } } @@ -286,16 +278,14 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, if (!rsv) return -ENOMEM; - INIT_LIST_HEAD(&rsv->allocated); - INIT_LIST_HEAD(&rsv->blocks); + INIT_LIST_HEAD(&rsv->node); + rsv->mm_node.start = start >> PAGE_SHIFT; + rsv->mm_node.size = size >> PAGE_SHIFT; - rsv->start = start; - rsv->size = size; - - mutex_lock(&mgr->lock); - list_add_tail(&rsv->blocks, &mgr->reservations_pending); + spin_lock(&mgr->lock); + list_add_tail(&rsv->node, &mgr->reservations_pending); amdgpu_vram_mgr_do_reserve(&mgr->manager); - mutex_unlock(&mgr->lock); + spin_unlock(&mgr->lock); return 0; } @@ -317,19 +307,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, struct amdgpu_vram_reservation *rsv; int ret; - mutex_lock(&mgr->lock); + spin_lock(&mgr->lock); - list_for_each_entry(rsv, &mgr->reservations_pending, blocks) { - if (rsv->start <= start && - (start < (rsv->start + rsv->size))) { + list_for_each_entry(rsv, &mgr->reservations_pending, node) { + if ((rsv->mm_node.start <= start) && + (start < (rsv->mm_node.start + rsv->mm_node.size))) { ret = -EBUSY; goto out; } } - list_for_each_entry(rsv, &mgr->reserved_pages, blocks) { - if (rsv->start <= start && - (start < (rsv->start + rsv->size))) { + list_for_each_entry(rsv, &mgr->reserved_pages, node) { + if ((rsv->mm_node.start <= start) && + (start < (rsv->mm_node.start + rsv->mm_node.size))) { ret = 0; goto out; } @@ -337,11 +327,33 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, ret = -ENOENT; out: - mutex_unlock(&mgr->lock); + spin_unlock(&mgr->lock); return ret; } /** + * amdgpu_vram_mgr_virt_start - update virtual start address + * + * @mem: ttm_resource to update + * @node: just allocated node + * + * Calculate a virtual BO start address to easily check if everything is CPU + * accessible. + */ +static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, + struct drm_mm_node *node) +{ + unsigned long start; + + start = node->start + node->size; + if (start > mem->num_pages) + start -= mem->num_pages; + else + start = 0; + mem->start = max(mem->start, start); +} + +/** * amdgpu_vram_mgr_new - allocate new ranges * * @man: TTM memory type manager @@ -356,44 +368,46 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_resource **res) { - u64 vis_usage = 0, max_bytes, cur_size, min_block_size; + unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - struct amdgpu_vram_mgr_resource *vres; - u64 size, remaining_size, lpfn, fpfn; - struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; - unsigned long pages_per_block; + uint64_t vis_usage = 0, mem_bytes, max_bytes; + struct ttm_range_mgr_node *node; + struct drm_mm *mm = &mgr->mm; + enum drm_mm_insert_mode mode; + unsigned i; int r; - lpfn = place->lpfn << PAGE_SHIFT; + lpfn = place->lpfn; if (!lpfn) - lpfn = man->size; - - fpfn = place->fpfn << PAGE_SHIFT; + lpfn = man->size >> PAGE_SHIFT; max_bytes = adev->gmc.mc_vram_size; if (tbo->type != ttm_bo_type_kernel) max_bytes -= AMDGPU_VM_RESERVED_VRAM; + mem_bytes = tbo->base.size; if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { - pages_per_block = ~0ul; + pages_per_node = ~0ul; + num_nodes = 1; } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - pages_per_block = HPAGE_PMD_NR; + pages_per_node = HPAGE_PMD_NR; #else /* default to 2MB */ - pages_per_block = 2UL << (20UL - PAGE_SHIFT); + pages_per_node = 2UL << (20UL - PAGE_SHIFT); #endif - pages_per_block = max_t(uint32_t, pages_per_block, - tbo->page_alignment); + pages_per_node = max_t(uint32_t, pages_per_node, + tbo->page_alignment); + num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node); } - vres = kzalloc(sizeof(*vres), GFP_KERNEL); - if (!vres) + node = kvmalloc(struct_size(node, mm_nodes, num_nodes), + GFP_KERNEL | __GFP_ZERO); + if (!node) return -ENOMEM; - ttm_resource_init(tbo, place, &vres->base); + ttm_resource_init(tbo, place, &node->base); /* bail out quickly if there's likely not enough VRAM for this BO */ if (ttm_resource_manager_usage(man) > max_bytes) { @@ -401,130 +415,66 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, goto error_fini; } - INIT_LIST_HEAD(&vres->blocks); - + mode = DRM_MM_INSERT_BEST; if (place->flags & TTM_PL_FLAG_TOPDOWN) - vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; - - if (fpfn || lpfn != man->size) - /* Allocate blocks in desired range */ - vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; - - remaining_size = vres->base.num_pages << PAGE_SHIFT; - - mutex_lock(&mgr->lock); - while (remaining_size) { - if (tbo->page_alignment) - min_block_size = tbo->page_alignment << PAGE_SHIFT; - else - min_block_size = mgr->default_page_size; - - BUG_ON(min_block_size < mm->chunk_size); - - /* Limit maximum size to 2GiB due to SG table limitations */ - size = min(remaining_size, 2ULL << 30); - - if (size >= pages_per_block << PAGE_SHIFT) - min_block_size = pages_per_block << PAGE_SHIFT; - - cur_size = size; - - if (fpfn + size != place->lpfn << PAGE_SHIFT) { - /* - * Except for actual range allocation, modify the size and - * min_block_size conforming to continuous flag enablement - */ - if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { - size = roundup_pow_of_two(size); - min_block_size = size; - /* - * Modify the size value if size is not - * aligned with min_block_size - */ - } else if (!IS_ALIGNED(size, min_block_size)) { - size = round_up(size, min_block_size); + mode = DRM_MM_INSERT_HIGH; + + pages_left = node->base.num_pages; + + /* Limit maximum size to 2GB due to SG table limitations */ + pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); + + i = 0; + spin_lock(&mgr->lock); + while (pages_left) { + uint32_t alignment = tbo->page_alignment; + + if (pages >= pages_per_node) + alignment = pages_per_node; + + r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages, + alignment, 0, place->fpfn, + lpfn, mode); + if (unlikely(r)) { + if (pages > pages_per_node) { + if (is_power_of_2(pages)) + pages = pages / 2; + else + pages = rounddown_pow_of_two(pages); + continue; } + goto error_free; } - r = drm_buddy_alloc_blocks(mm, fpfn, - lpfn, - size, - min_block_size, - &vres->blocks, - vres->flags); - if (unlikely(r)) - goto error_free_blocks; - - if (size > remaining_size) - remaining_size = 0; - else - remaining_size -= size; - } - mutex_unlock(&mgr->lock); - - if (cur_size != size) { - struct drm_buddy_block *block; - struct list_head *trim_list; - u64 original_size; - LIST_HEAD(temp); - - trim_list = &vres->blocks; - original_size = vres->base.num_pages << PAGE_SHIFT; - - /* - * If size value is rounded up to min_block_size, trim the last - * block to the required size - */ - if (!list_is_singular(&vres->blocks)) { - block = list_last_entry(&vres->blocks, typeof(*block), link); - list_move_tail(&block->link, &temp); - trim_list = &temp; - /* - * Compute the original_size value by subtracting the - * last block size with (aligned size - original size) - */ - original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size); - } + vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); + amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]); + pages_left -= pages; + ++i; - mutex_lock(&mgr->lock); - drm_buddy_block_trim(mm, - original_size, - trim_list); - mutex_unlock(&mgr->lock); - - if (!list_empty(&temp)) - list_splice_tail(trim_list, &vres->blocks); - } - - list_for_each_entry(block, &vres->blocks, link) - vis_usage += amdgpu_vram_mgr_vis_size(adev, block); - - block = amdgpu_vram_mgr_first_block(&vres->blocks); - if (!block) { - r = -EINVAL; - goto error_fini; + if (pages > pages_left) + pages = pages_left; } + spin_unlock(&mgr->lock); - vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT; - - if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks)) - vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS; + if (i == 1) + node->base.placement |= TTM_PL_FLAG_CONTIGUOUS; if (adev->gmc.xgmi.connected_to_cpu) - vres->base.bus.caching = ttm_cached; + node->base.bus.caching = ttm_cached; else - vres->base.bus.caching = ttm_write_combined; + node->base.bus.caching = ttm_write_combined; atomic64_add(vis_usage, &mgr->vis_usage); - *res = &vres->base; + *res = &node->base; return 0; -error_free_blocks: - drm_buddy_free_list(mm, &vres->blocks); - mutex_unlock(&mgr->lock); +error_free: + while (i--) + drm_mm_remove_node(&node->mm_nodes[i]); + spin_unlock(&mgr->lock); error_fini: - ttm_resource_fini(man, &vres->base); - kfree(vres); + ttm_resource_fini(man, &node->base); + kvfree(node); return r; } @@ -540,26 +490,27 @@ error_fini: static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, struct ttm_resource *res) { - struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; uint64_t vis_usage = 0; + unsigned i, pages; - mutex_lock(&mgr->lock); - list_for_each_entry(block, &vres->blocks, link) - vis_usage += amdgpu_vram_mgr_vis_size(adev, block); + spin_lock(&mgr->lock); + for (i = 0, pages = res->num_pages; pages; + pages -= node->mm_nodes[i].size, ++i) { + struct drm_mm_node *mm = &node->mm_nodes[i]; + drm_mm_remove_node(mm); + vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); + } amdgpu_vram_mgr_do_reserve(man); - - drm_buddy_free_list(mm, &vres->blocks); - mutex_unlock(&mgr->lock); + spin_unlock(&mgr->lock); atomic64_sub(vis_usage, &mgr->vis_usage); ttm_resource_fini(man, res); - kfree(vres); + kvfree(node); } /** @@ -591,7 +542,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, if (!*sgt) return -ENOMEM; - /* Determine the number of DRM_BUDDY blocks to export */ + /* Determine the number of DRM_MM nodes to export */ amdgpu_res_first(res, offset, length, &cursor); while (cursor.remaining) { num_entries++; @@ -607,10 +558,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, sg->length = 0; /* - * Walk down DRM_BUDDY blocks to populate scatterlist nodes - * @note: Use iterator api to get first the DRM_BUDDY block + * Walk down DRM_MM nodes to populate scatterlist nodes + * @note: Use iterator api to get first the DRM_MM node * and the number of bytes from it. Access the following - * DRM_BUDDY block(s) if more buffer needs to exported + * DRM_MM node(s) if more buffer needs to exported */ amdgpu_res_first(res, offset, length, &cursor); for_each_sgtable_sg((*sgt), sg, i) { @@ -697,22 +648,13 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); - struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; drm_printf(printer, " vis usage:%llu\n", amdgpu_vram_mgr_vis_usage(mgr)); - mutex_lock(&mgr->lock); - drm_printf(printer, "default_page_size: %lluKiB\n", - mgr->default_page_size >> 10); - - drm_buddy_print(mm, printer); - - drm_printf(printer, "reserved:\n"); - list_for_each_entry(block, &mgr->reserved_pages, link) - drm_buddy_block_print(mm, block, printer); - mutex_unlock(&mgr->lock); + spin_lock(&mgr->lock); + drm_mm_print(&mgr->mm, printer); + spin_unlock(&mgr->lock); } static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { @@ -732,21 +674,16 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) { struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; struct ttm_resource_manager *man = &mgr->manager; - int err; ttm_resource_manager_init(man, &adev->mman.bdev, adev->gmc.real_vram_size); man->func = &amdgpu_vram_mgr_func; - err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); - if (err) - return err; - - mutex_init(&mgr->lock); + drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT); + spin_lock_init(&mgr->lock); INIT_LIST_HEAD(&mgr->reservations_pending); INIT_LIST_HEAD(&mgr->reserved_pages); - mgr->default_page_size = PAGE_SIZE; ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); ttm_resource_manager_set_used(man, true); @@ -774,16 +711,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) if (ret) return; - mutex_lock(&mgr->lock); - list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) + spin_lock(&mgr->lock); + list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) kfree(rsv); - list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { - drm_buddy_free_list(&mgr->mm, &rsv->blocks); + list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { + drm_mm_remove_node(&rsv->mm_node); kfree(rsv); } - drm_buddy_fini(&mgr->mm); - mutex_unlock(&mgr->lock); + drm_mm_takedown(&mgr->mm); + spin_unlock(&mgr->lock); ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h deleted file mode 100644 index 9a2db87186c7..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h +++ /dev/null @@ -1,89 +0,0 @@ -/* SPDX-License-Identifier: MIT - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __AMDGPU_VRAM_MGR_H__ -#define __AMDGPU_VRAM_MGR_H__ - -#include <drm/drm_buddy.h> - -struct amdgpu_vram_mgr { - struct ttm_resource_manager manager; - struct drm_buddy mm; - /* protects access to buffer objects */ - struct mutex lock; - struct list_head reservations_pending; - struct list_head reserved_pages; - atomic64_t vis_usage; - u64 default_page_size; -}; - -struct amdgpu_vram_mgr_resource { - struct ttm_resource base; - struct list_head blocks; - unsigned long flags; -}; - -static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block) -{ - return drm_buddy_block_offset(block); -} - -static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block) -{ - return PAGE_SIZE << drm_buddy_block_order(block); -} - -static inline struct drm_buddy_block * -amdgpu_vram_mgr_first_block(struct list_head *list) -{ - return list_first_entry_or_null(list, struct drm_buddy_block, link); -} - -static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) -{ - struct drm_buddy_block *block; - u64 start, size; - - block = amdgpu_vram_mgr_first_block(head); - if (!block) - return false; - - while (head != block->link.next) { - start = amdgpu_vram_mgr_block_start(block); - size = amdgpu_vram_mgr_block_size(block); - - block = list_entry(block->link.next, struct drm_buddy_block, link); - if (start + size != amdgpu_vram_mgr_block_start(block)) - return false; - } - - return true; -} - -static inline struct amdgpu_vram_mgr_resource * -to_amdgpu_vram_mgr_resource(struct ttm_resource *res) -{ - return container_of(res, struct amdgpu_vram_mgr_resource, base); -} - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 288fce7dc0ed..9c964cd3b5d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2796,7 +2796,8 @@ static int dce_v10_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index cbe5250b31cb..e0ad9f27dc3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2914,7 +2914,8 @@ static int dce_v11_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 982855e6cf52..3caf6f386042 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2673,7 +2673,8 @@ static int dce_v6_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 84440741c60b..7c75df5bffed 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2693,7 +2693,8 @@ static int dce_v8_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index bf4200457772..a08769c5e94b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -184,6 +184,8 @@ static void kfd_device_info_init(struct kfd_dev *kfd, /* Navi2x+, Navi1x+ */ if (gc_version == IP_VERSION(10, 3, 6)) kfd->device_info.no_atomic_fw_version = 14; + else if (gc_version == IP_VERSION(10, 3, 7)) + kfd->device_info.no_atomic_fw_version = 3; else if (gc_version >= IP_VERSION(10, 3, 0)) kfd->device_info.no_atomic_fw_version = 92; else if (gc_version >= IP_VERSION(10, 1, 1)) diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index b4029c0d5d8c..ec6771e87e73 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -6,7 +6,7 @@ config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y select SND_HDA_COMPONENT if SND_HDA_CORE - select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) + select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 39b425d83bb1..3087dd1a1856 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -72,6 +72,7 @@ #include <linux/pci.h> #include <linux/firmware.h> #include <linux/component.h> +#include <linux/dmi.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/display/drm_hdmi_helper.h> @@ -462,6 +463,26 @@ static void dm_pflip_high_irq(void *interrupt_params) vrr_active, (int) !e); } +static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) +{ + struct drm_crtc *crtc = &acrtc->base; + struct drm_device *dev = crtc->dev; + unsigned long flags; + + drm_crtc_handle_vblank(crtc); + + spin_lock_irqsave(&dev->event_lock, flags); + + /* Send completion event for cursor-only commits */ + if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + drm_crtc_send_vblank_event(crtc, acrtc->event); + drm_crtc_vblank_put(crtc); + acrtc->event = NULL; + } + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + static void dm_vupdate_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; @@ -500,7 +521,7 @@ static void dm_vupdate_high_irq(void *interrupt_params) * if a pageflip happened inside front-porch. */ if (vrr_active) { - drm_crtc_handle_vblank(&acrtc->base); + dm_crtc_handle_vblank(acrtc); /* BTR processing for pre-DCE12 ASICs */ if (acrtc->dm_irq_params.stream && @@ -552,7 +573,7 @@ static void dm_crtc_high_irq(void *interrupt_params) * to dm_vupdate_high_irq after end of front-porch. */ if (!vrr_active) - drm_crtc_handle_vblank(&acrtc->base); + dm_crtc_handle_vblank(acrtc); /** * Following stuff must happen at start of vblank, for crc @@ -1382,6 +1403,41 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev) return false; } +static const struct dmi_system_id hpd_disconnect_quirk_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), + }, + }, + {} +}; + +static void retrieve_dmi_info(struct amdgpu_display_manager *dm) +{ + const struct dmi_system_id *dmi_id; + + dm->aux_hpd_discon_quirk = false; + + dmi_id = dmi_first_match(hpd_disconnect_quirk_table); + if (dmi_id) { + dm->aux_hpd_discon_quirk = true; + DRM_INFO("aux_hpd_discon_quirk attached\n"); + } +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -1508,6 +1564,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } INIT_LIST_HEAD(&adev->dm.da_list); + + retrieve_dmi_info(&adev->dm); + /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1594,7 +1653,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); #endif - if (dc_enable_dmub_notifications(adev->dm.dc)) { + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { @@ -1630,6 +1689,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } + /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point, for + * example HPD from DPIA. + */ + if (dc_is_dmub_outbox_supported(adev->dm.dc)) + dc_enable_dmub_outbox(adev->dm.dc); + /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev); @@ -2619,9 +2685,6 @@ static int dm_resume(void *handle) */ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); - if (dc_enable_dmub_notifications(adev->dm.dc)) - amdgpu_dm_outbox_init(adev); - r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2639,6 +2702,11 @@ static int dm_resume(void *handle) } } + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + amdgpu_dm_outbox_init(adev); + dc_enable_dmub_outbox(adev->dm.dc); + } + WARN_ON(!dc_commit_state(dm->dc, dc_state)); dm_gpureset_commit_state(dm->cached_dc_state, dm); @@ -2660,13 +2728,15 @@ static int dm_resume(void *handle) /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context); - /* Re-enable outbox interrupts for DPIA. */ - if (dc_enable_dmub_notifications(adev->dm.dc)) - amdgpu_dm_outbox_init(adev); - /* Before powering on DC we need to re-initialize DMUB. */ dm_dmub_hw_resume(adev); + /* Re-enable outbox interrupts for DPIA. */ + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + amdgpu_dm_outbox_init(adev); + dc_enable_dmub_outbox(adev->dm.dc); + } + /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -3822,7 +3892,8 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; /* indicates support for immediate flip */ adev_to_drm(adev)->mode_config.async_page_flip = true; @@ -4259,9 +4330,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } } - /* Disable vblank IRQs aggressively for power-saving. */ - adev_to_drm(adev)->vblank_disable_immediate = true; - /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -5409,7 +5477,7 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state, } } - if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) + if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) *pre_multiplied_alpha = false; } @@ -9137,6 +9205,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct amdgpu_bo *abo; uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); + bool cursor_update = false; bool pflip_present = false; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; @@ -9172,8 +9241,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); /* Cursor plane is handled after stream updates */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if ((fb && crtc == pcrtc) || + (old_plane_state->fb && old_plane_state->crtc == pcrtc)) + cursor_update = true; + continue; + } if (!fb || !crtc || pcrtc != crtc) continue; @@ -9336,6 +9410,17 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.vrr_infopacket = &acrtc_state->stream->vrr_infopacket; } + } else if (cursor_update && acrtc_state->active_planes > 0 && + !acrtc_state->force_dpms_off && + acrtc_attach->base.state->event) { + drm_crtc_vblank_get(pcrtc); + + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + + acrtc_attach->event = acrtc_attach->base.state->event; + acrtc_attach->base.state->event = NULL; + + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } /* Update the planes if changed or disable if we don't have any. */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index aa34c0068f41..e80ef93f6550 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -540,6 +540,14 @@ struct amdgpu_display_manager { * last successfully applied backlight values. */ u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; + + /** + * @aux_hpd_discon_quirk: + * + * quirk for hpd discon while aux is on-going. + * occurred on certain intel platform + */ + bool aux_hpd_discon_quirk; }; enum dsc_clock_force_state { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 9221b6690a4a..2b9b095e5f03 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -56,6 +56,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, ssize_t result = 0; struct aux_payload payload; enum aux_return_code_type operation_result; + struct amdgpu_device *adev; + struct ddc_service *ddc; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -74,6 +76,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); + /* + * w/a on certain intel platform where hpd is unexpected to pull low during + * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON + * aux transaction is succuess in such case, therefore bypass the error + */ + ddc = TO_DM_AUX(aux)->ddc_service; + adev = ddc->ctx->driver_context; + if (adev->dm.aux_hpd_discon_quirk) { + if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && + operation_result == AUX_RET_ERROR_HPD_DISCON) { + result = 0; + operation_result = AUX_RET_SUCCESS; + } + } + if (payload.write && result >= 0) result = msg->size; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 6774dd8bb53e..3fe3fbac1e63 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1117,12 +1117,13 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) * on certain displays, such as the Sharp 4k. 36bpp is needed * to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and * SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc - * precision on at least DCN display engines. However, at least - * Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth, - * so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3 - * did not show such problems, so this seems to be the exception. + * precision on DCN display engines, but apparently not for DCE, as + * far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have + * problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth, + * neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel + * passthrough). Therefore only use 36 bpp on DCN where it is actually needed. */ - if (plane_state->ctx->dce_version > DCE_VERSION_11_0) + if (plane_state->ctx->dce_version > DCE_VERSION_MAX) pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP; else pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 5f8809f6990d..2fbd2926a531 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1228,6 +1228,8 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; + if (speed == 0) + return -EINVAL; /* * To prevent from possible overheat, some ASICs may have requirement * for minimum fan speed: diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c index b2675c769a55..4b503c544256 100644 --- a/drivers/gpu/drm/bridge/fsl-ldb.c +++ b/drivers/gpu/drm/bridge/fsl-ldb.c @@ -74,22 +74,6 @@ static int fsl_ldb_attach(struct drm_bridge *bridge, bridge, flags); } -static int fsl_ldb_atomic_check(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - /* Invert DE signal polarity. */ - bridge_state->input_bus_cfg.flags &= ~(DRM_BUS_FLAG_DE_LOW | - DRM_BUS_FLAG_DE_HIGH); - if (bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_LOW) - bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH; - else if (bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_HIGH) - bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_LOW; - - return 0; -} - static void fsl_ldb_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { @@ -153,7 +137,7 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge, reg = LDB_CTRL_CH0_ENABLE; if (fsl_ldb->lvds_dual_link) - reg |= LDB_CTRL_CH1_ENABLE; + reg |= LDB_CTRL_CH1_ENABLE | LDB_CTRL_SPLIT_MODE; if (lvds_format_24bpp) { reg |= LDB_CTRL_CH0_DATA_WIDTH; @@ -233,7 +217,7 @@ fsl_ldb_mode_valid(struct drm_bridge *bridge, { struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge); - if (mode->clock > (fsl_ldb->lvds_dual_link ? 80000 : 160000)) + if (mode->clock > (fsl_ldb->lvds_dual_link ? 160000 : 80000)) return MODE_CLOCK_HIGH; return MODE_OK; @@ -241,7 +225,6 @@ fsl_ldb_mode_valid(struct drm_bridge *bridge, static const struct drm_bridge_funcs funcs = { .attach = fsl_ldb_attach, - .atomic_check = fsl_ldb_atomic_check, .atomic_enable = fsl_ldb_atomic_enable, .atomic_disable = fsl_ldb_atomic_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c index 74bd4a76b253..059fd71424f6 100644 --- a/drivers/gpu/drm/drm_aperture.c +++ b/drivers/gpu/drm/drm_aperture.c @@ -329,7 +329,20 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const struct drm_driver *req_driver) { resource_size_t base, size; - int bar, ret = 0; + int bar, ret; + + /* + * WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. + */ +#if IS_REACHABLE(CONFIG_FB) + ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name); + if (ret) + return ret; +#endif + ret = vga_remove_vgacon(pdev); + if (ret) + return ret; for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) { if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) @@ -339,15 +352,6 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, drm_aperture_detach_drivers(base, size); } - /* - * WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. - */ -#if IS_REACHABLE(CONFIG_FB) - ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name); -#endif - if (ret == 0) - ret = vga_remove_vgacon(pdev); - return ret; + return 0; } EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers); diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index d5962a34c01d..e5fc875990c4 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem, struct iosys_map *map) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + int ret; + + dma_resv_lock(gem->resv, NULL); + ret = ttm_bo_vmap(bo, map); + dma_resv_unlock(gem->resv); - return ttm_bo_vmap(bo, map); + return ret; } EXPORT_SYMBOL(drm_gem_ttm_vmap); @@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem, { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + dma_resv_lock(gem->resv, NULL); ttm_bo_vunmap(bo, map); + dma_resv_unlock(gem->resv); } EXPORT_SYMBOL(drm_gem_ttm_vunmap); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index df87ba99a87c..d4e0f2e85548 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -286,6 +286,21 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Lenovo Yoga Tablet 2 830F / 830L */ + .matches = { + /* + * Note this also matches the Lenovo Yoga Tablet 2 1050F/L + * since that uses the same mainboard. The resolution match + * will limit this to only matching on the 830F/L. Neither has + * any external video outputs so those are not a concern. + */ + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), + DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), + DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), + /* Partial match on beginning of BIOS version */ + DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* OneGX1 Pro */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 061b277e5ce7..14d2a64193b2 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -839,6 +839,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); if (ret) { + drm_dp_mst_put_port_malloc(port); intel_connector_free(intel_connector); return NULL; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index ab4c5ab28e4d..321af109d484 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -933,8 +933,9 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, case I915_CONTEXT_PARAM_PERSISTENCE: if (args->size) ret = -EINVAL; - ret = proto_context_set_persistence(fpriv->dev_priv, pc, - args->value); + else + ret = proto_context_set_persistence(fpriv->dev_priv, pc, + args->value); break; case I915_CONTEXT_PARAM_PROTECTED_CONTENT: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 3e5d6057b3ef..1674b0c5802b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -35,12 +35,12 @@ bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj) if (obj->cache_dirty) return false; - if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) - return true; - if (IS_DGFX(i915)) return false; + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) + return true; + /* Currently in use by HW (display engine)? Keep flushed. */ return i915_gem_object_is_framebuffer(obj); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index f46ee16a323a..a4fb577eceb4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -60,6 +60,8 @@ __i915_gem_object_create_region(struct intel_memory_region *mem, if (page_size) default_page_size = page_size; + /* We should be able to fit a page within an sg entry */ + GEM_BUG_ON(overflows_type(default_page_size, u32)); GEM_BUG_ON(!is_power_of_2_u64(default_page_size)); GEM_BUG_ON(default_page_size < PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 4c25d9b2f138..8f1bb6a4b7d1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -620,10 +620,15 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, struct ttm_resource *res) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + u32 page_alignment; if (!i915_ttm_gtt_binds_lmem(res)) return i915_ttm_tt_get_st(bo->ttm); + page_alignment = bo->page_alignment << PAGE_SHIFT; + if (!page_alignment) + page_alignment = obj->mm.region->min_page_size; + /* * If CPU mapping differs, we need to add the ttm_tt pages to * the resulting st. Might make sense for GGTT. @@ -634,7 +639,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, struct i915_refct_sgt *rsgt; rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, - res); + res, + page_alignment); if (IS_ERR(rsgt)) return rsgt; @@ -643,7 +649,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, return i915_refct_sgt_get(obj->ttm.cached_io_rsgt); } - return intel_region_ttm_resource_to_rsgt(obj->mm.region, res); + return intel_region_ttm_resource_to_rsgt(obj->mm.region, res, + page_alignment); } static int i915_ttm_truncate(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index 319936f91ac5..e6e01c2a74a6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -9,6 +9,7 @@ #include <linux/jiffies.h> #include "gt/intel_engine.h" +#include "gt/intel_rps.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" @@ -31,6 +32,37 @@ i915_gem_object_wait_fence(struct dma_fence *fence, timeout); } +static void +i915_gem_object_boost(struct dma_resv *resv, unsigned int flags) +{ + struct dma_resv_iter cursor; + struct dma_fence *fence; + + /* + * Prescan all fences for potential boosting before we begin waiting. + * + * When we wait, we wait on outstanding fences serially. If the + * dma-resv contains a sequence such as 1:1, 1:2 instead of a reduced + * form 1:2, then as we look at each wait in turn we see that each + * request is currently executing and not worthy of boosting. But if + * we only happen to look at the final fence in the sequence (because + * of request coalescing or splitting between read/write arrays by + * the iterator), then we would boost. As such our decision to boost + * or not is delicately balanced on the order we wait on fences. + * + * So instead of looking for boosts sequentially, look for all boosts + * upfront and then wait on the outstanding fences. + */ + + dma_resv_iter_begin(&cursor, resv, + dma_resv_usage_rw(flags & I915_WAIT_ALL)); + dma_resv_for_each_fence_unlocked(&cursor, fence) + if (dma_fence_is_i915(fence) && + !i915_request_started(to_request(fence))) + intel_rps_boost(to_request(fence)); + dma_resv_iter_end(&cursor); +} + static long i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int flags, @@ -40,6 +72,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, struct dma_fence *fence; long ret = timeout ?: 1; + i915_gem_object_boost(resv, flags); + dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(flags & I915_WAIT_ALL)); dma_resv_for_each_fence_unlocked(&cursor, fence) { diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 09f82545789f..44e7339e7a4a 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -273,10 +273,17 @@ struct intel_context { u8 child_index; /** @guc: GuC specific members for parallel submission */ struct { - /** @wqi_head: head pointer in work queue */ + /** @wqi_head: cached head pointer in work queue */ u16 wqi_head; - /** @wqi_tail: tail pointer in work queue */ + /** @wqi_tail: cached tail pointer in work queue */ u16 wqi_tail; + /** @wq_head: pointer to the actual head in work queue */ + u32 *wq_head; + /** @wq_tail: pointer to the actual head in work queue */ + u32 *wq_tail; + /** @wq_status: pointer to the status in work queue */ + u32 *wq_status; + /** * @parent_page: page in context state (ce->state) used * by parent for work queue, process descriptor diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 1431f1e9dbee..04e435bce79b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -201,6 +201,8 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine); int intel_engine_stop_cs(struct intel_engine_cs *engine); void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine); +void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine); + void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask); u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 14c6ddbbfde8..5b6ce10cb158 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1282,10 +1282,10 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine, intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); /* - * Wa_22011802037 : gen12, Prior to doing a reset, ensure CS is + * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is * stopped, set ring stop bit and prefetch disable bit to halt CS */ - if (GRAPHICS_VER(engine->i915) == 12) + if (IS_GRAPHICS_VER(engine->i915, 11, 12)) intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base), _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE)); @@ -1308,6 +1308,18 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) return -ENODEV; ENGINE_TRACE(engine, "\n"); + /* + * TODO: Find out why occasionally stopping the CS times out. Seen + * especially with gem_eio tests. + * + * Occasionally trying to stop the cs times out, but does not adversely + * affect functionality. The timeout is set as a config parameter that + * defaults to 100ms. In most cases the follow up operation is to wait + * for pending MI_FORCE_WAKES. The assumption is that this timeout is + * sufficient for any pending MI_FORCEWAKEs to complete. Once root + * caused, the caller must check and handle the return from this + * function. + */ if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) { ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n", @@ -1334,6 +1346,78 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); } +static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine) +{ + static const i915_reg_t _reg[I915_NUM_ENGINES] = { + [RCS0] = MSG_IDLE_CS, + [BCS0] = MSG_IDLE_BCS, + [VCS0] = MSG_IDLE_VCS0, + [VCS1] = MSG_IDLE_VCS1, + [VCS2] = MSG_IDLE_VCS2, + [VCS3] = MSG_IDLE_VCS3, + [VCS4] = MSG_IDLE_VCS4, + [VCS5] = MSG_IDLE_VCS5, + [VCS6] = MSG_IDLE_VCS6, + [VCS7] = MSG_IDLE_VCS7, + [VECS0] = MSG_IDLE_VECS0, + [VECS1] = MSG_IDLE_VECS1, + [VECS2] = MSG_IDLE_VECS2, + [VECS3] = MSG_IDLE_VECS3, + [CCS0] = MSG_IDLE_CS, + [CCS1] = MSG_IDLE_CS, + [CCS2] = MSG_IDLE_CS, + [CCS3] = MSG_IDLE_CS, + }; + u32 val; + + if (!_reg[engine->id].reg) { + drm_err(&engine->i915->drm, + "MSG IDLE undefined for engine id %u\n", engine->id); + return 0; + } + + val = intel_uncore_read(engine->uncore, _reg[engine->id]); + + /* bits[29:25] & bits[13:9] >> shift */ + return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT; +} + +static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask) +{ + int ret; + + /* Ensure GPM receives fw up/down after CS is stopped */ + udelay(1); + + /* Wait for forcewake request to complete in GPM */ + ret = __intel_wait_for_register_fw(gt->uncore, + GEN9_PWRGT_DOMAIN_STATUS, + fw_mask, fw_mask, 5000, 0, NULL); + + /* Ensure CS receives fw ack from GPM */ + udelay(1); + + if (ret) + GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret); +} + +/* + * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any + * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The + * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the + * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we + * are concerned only with the gt reset here, we use a logical OR of pending + * forcewakeups from all reset domains and then wait for them to complete by + * querying PWRGT_DOMAIN_STATUS. + */ +void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine) +{ + u32 fw_pending = __cs_pending_mi_force_wakes(engine); + + if (fw_pending) + __gpm_wait_for_fw_complete(engine->gt, fw_pending); +} + static u32 read_subslice_reg(const struct intel_engine_cs *engine, int slice, int subslice, i915_reg_t reg) diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 86f7a9ac1c39..0627fa10d2dc 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -661,6 +661,16 @@ static inline void execlists_schedule_out(struct i915_request *rq) i915_request_put(rq); } +static u32 map_i915_prio_to_lrc_desc_prio(int prio) +{ + if (prio > I915_PRIORITY_NORMAL) + return GEN12_CTX_PRIORITY_HIGH; + else if (prio < I915_PRIORITY_NORMAL) + return GEN12_CTX_PRIORITY_LOW; + else + return GEN12_CTX_PRIORITY_NORMAL; +} + static u64 execlists_update_context(struct i915_request *rq) { struct intel_context *ce = rq->context; @@ -669,7 +679,7 @@ static u64 execlists_update_context(struct i915_request *rq) desc = ce->lrc.desc; if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY) - desc |= lrc_desc_priority(rq_prio(rq)); + desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq)); /* * WaIdleLiteRestore:bdw,skl @@ -2958,6 +2968,13 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) ring_set_paused(engine, 1); intel_engine_stop_cs(engine); + /* + * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need + * to wait for any pending mi force wakeups + */ + if (IS_GRAPHICS_VER(engine->i915, 11, 12)) + intel_engine_wait_for_pending_mi_fw(engine); + engine->execlists.reset_ccid = active_ccid(engine); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 51a0fe60c050..531af6ad7007 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -1209,6 +1209,20 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt) mutex_lock(>->tlb_invalidate_lock); intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */ + + for_each_engine(engine, gt, id) { + struct reg_and_bit rb; + + rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); + if (!i915_mmio_reg_offset(rb.reg)) + continue; + + intel_uncore_write_fw(uncore, rb.reg, rb.bit); + } + + spin_unlock_irq(&uncore->lock); + for_each_engine(engine, gt, id) { /* * HW architecture suggest typical invalidation time at 40us, @@ -1223,7 +1237,6 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt) if (!i915_mmio_reg_offset(rb.reg)) continue; - intel_uncore_write_fw(uncore, rb.reg, rb.bit); if (__intel_wait_for_register_fw(uncore, rb.reg, rb.bit, 0, timeout_us, timeout_ms, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index 31be734010db..a390f0813c8b 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -111,16 +111,6 @@ enum { #define XEHP_SW_COUNTER_SHIFT 58 #define XEHP_SW_COUNTER_WIDTH 6 -static inline u32 lrc_desc_priority(int prio) -{ - if (prio > I915_PRIORITY_NORMAL) - return GEN12_CTX_PRIORITY_HIGH; - else if (prio < I915_PRIORITY_NORMAL) - return GEN12_CTX_PRIORITY_LOW; - else - return GEN12_CTX_PRIORITY_NORMAL; -} - static inline void lrc_runtime_start(struct intel_context *ce) { struct intel_context_stats *stats = &ce->stats; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index a5338c3fde7a..c68d36fb5bbd 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -300,9 +300,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) return err; } -static int gen6_reset_engines(struct intel_gt *gt, - intel_engine_mask_t engine_mask, - unsigned int retry) +static int __gen6_reset_engines(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) { struct intel_engine_cs *engine; u32 hw_mask; @@ -321,6 +321,20 @@ static int gen6_reset_engines(struct intel_gt *gt, return gen6_hw_domain_reset(gt, hw_mask); } +static int gen6_reset_engines(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(>->uncore->lock, flags); + ret = __gen6_reset_engines(gt, engine_mask, retry); + spin_unlock_irqrestore(>->uncore->lock, flags); + + return ret; +} + static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine) { int vecs_id; @@ -487,9 +501,9 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine) rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit); } -static int gen11_reset_engines(struct intel_gt *gt, - intel_engine_mask_t engine_mask, - unsigned int retry) +static int __gen11_reset_engines(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned int retry) { struct intel_engine_cs *engine; intel_engine_mask_t tmp; @@ -583,8 +597,11 @@ static int gen8_reset_engines(struct intel_gt *gt, struct intel_engine_cs *engine; const bool reset_non_ready = retry >= 1; intel_engine_mask_t tmp; + unsigned long flags; int ret; + spin_lock_irqsave(>->uncore->lock, flags); + for_each_engine_masked(engine, gt, engine_mask, tmp) { ret = gen8_engine_reset_prepare(engine); if (ret && !reset_non_ready) @@ -612,17 +629,19 @@ static int gen8_reset_engines(struct intel_gt *gt, * This is best effort, so ignore any error from the initial reset. */ if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES) - gen11_reset_engines(gt, gt->info.engine_mask, 0); + __gen11_reset_engines(gt, gt->info.engine_mask, 0); if (GRAPHICS_VER(gt->i915) >= 11) - ret = gen11_reset_engines(gt, engine_mask, retry); + ret = __gen11_reset_engines(gt, engine_mask, retry); else - ret = gen6_reset_engines(gt, engine_mask, retry); + ret = __gen6_reset_engines(gt, engine_mask, retry); skip_reset: for_each_engine_masked(engine, gt, engine_mask, tmp) gen8_engine_reset_cancel(engine); + spin_unlock_irqrestore(>->uncore->lock, flags); + return ret; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 8b2c11dbe354..1109088fe8f6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -176,8 +176,8 @@ static int live_lrc_layout(void *arg) continue; hw = shmem_pin_map(engine->default_state); - if (IS_ERR(hw)) { - err = PTR_ERR(hw); + if (!hw) { + err = -ENOMEM; break; } hw += LRC_STATE_OFFSET / sizeof(*hw); @@ -365,8 +365,8 @@ static int live_lrc_fixed(void *arg) continue; hw = shmem_pin_map(engine->default_state); - if (IS_ERR(hw)) { - err = PTR_ERR(hw); + if (!hw) { + err = -ENOMEM; break; } hw += LRC_STATE_OFFSET / sizeof(*hw); diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index 4ef9990ed7f8..29ef8afc8c2e 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -122,6 +122,9 @@ enum intel_guc_action { INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002, INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003, INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004, + INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY = 0x1005, + INTEL_GUC_ACTION_V69_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006, + INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007, INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008, INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009, INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 2c4ad4a65089..8c6885f43d1a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -310,8 +310,8 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) if (IS_DG2(gt->i915)) flags |= GUC_WA_DUAL_QUEUE; - /* Wa_22011802037: graphics version 12 */ - if (GRAPHICS_VER(gt->i915) == 12) + /* Wa_22011802037: graphics version 11/12 */ + if (IS_GRAPHICS_VER(gt->i915, 11, 12)) flags |= GUC_WA_PRE_PARSER; /* Wa_16011777198:dg2 */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 966e69a8b1c1..9feda105f913 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -170,6 +170,11 @@ struct intel_guc { /** @ads_engine_usage_size: size of engine usage in the ADS */ u32 ads_engine_usage_size; + /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */ + struct i915_vma *lrc_desc_pool_v69; + /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */ + void *lrc_desc_pool_vaddr_v69; + /** * @context_lookup: used to resolve intel_context from guc_id, if a * context is present in this structure it is registered with the GuC diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 42cb7a9a6199..89a7e5ec0614 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -203,6 +203,20 @@ struct guc_wq_item { u32 fence_id; } __packed; +struct guc_process_desc_v69 { + u32 stage_id; + u64 db_base_addr; + u32 head; + u32 tail; + u32 error_offset; + u64 wq_base_addr; + u32 wq_size_bytes; + u32 wq_status; + u32 engine_presence; + u32 priority; + u32 reserved[36]; +} __packed; + struct guc_sched_wq_desc { u32 head; u32 tail; @@ -227,6 +241,37 @@ struct guc_ctxt_registration_info { }; #define CONTEXT_REGISTRATION_FLAG_KMD BIT(0) +/* Preempt to idle on quantum expiry */ +#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69 BIT(0) + +/* + * GuC Context registration descriptor. + * FIXME: This is only required to exist during context registration. + * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC + * is not required. + */ +struct guc_lrc_desc_v69 { + u32 hw_context_desc; + u32 slpm_perf_mode_hint; /* SPLC v1 only */ + u32 slpm_freq_hint; + u32 engine_submit_mask; /* In logical space */ + u8 engine_class; + u8 reserved0[3]; + u32 priority; + u32 process_desc; + u32 wq_addr; + u32 wq_size; + u32 context_flags; /* CONTEXT_REGISTRATION_* */ + /* Time for one workload to execute. (in micro seconds) */ + u32 execution_quantum; + /* Time to wait for a preemption request to complete before issuing a + * reset. (in micro seconds). + */ + u32 preemption_timeout; + u32 policy_flags; /* CONTEXT_POLICY_* */ + u32 reserved1[19]; +} __packed; + /* 32-bit KLV structure as used by policy updates and others */ struct guc_klv_generic_dw_t { u32 kl; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 1726f0f19901..2d9f5f1c79d3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -414,12 +414,15 @@ struct sync_semaphore { }; struct parent_scratch { - struct guc_sched_wq_desc wq_desc; + union guc_descs { + struct guc_sched_wq_desc wq_desc; + struct guc_process_desc_v69 pdesc; + } descs; struct sync_semaphore go; struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1]; - u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) - + u8 unused[WQ_OFFSET - sizeof(union guc_descs) - sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)]; u32 wq[WQ_SIZE / sizeof(u32)]; @@ -456,17 +459,23 @@ __get_parent_scratch(struct intel_context *ce) LRC_STATE_OFFSET) / sizeof(u32))); } +static struct guc_process_desc_v69 * +__get_process_desc_v69(struct intel_context *ce) +{ + struct parent_scratch *ps = __get_parent_scratch(ce); + + return &ps->descs.pdesc; +} + static struct guc_sched_wq_desc * -__get_wq_desc(struct intel_context *ce) +__get_wq_desc_v70(struct intel_context *ce) { struct parent_scratch *ps = __get_parent_scratch(ce); - return &ps->wq_desc; + return &ps->descs.wq_desc; } -static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc, - struct intel_context *ce, - u32 wqi_size) +static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size) { /* * Check for space in work queue. Caching a value of head pointer in @@ -476,7 +485,7 @@ static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc, #define AVAILABLE_SPACE \ CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) if (wqi_size > AVAILABLE_SPACE) { - ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head); + ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head); if (wqi_size > AVAILABLE_SPACE) return NULL; @@ -495,11 +504,55 @@ static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) return ce; } +static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index) +{ + struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69; + + if (!base) + return NULL; + + GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID); + + return &base[index]; +} + +static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc) +{ + u32 size; + int ret; + + size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) * + GUC_MAX_CONTEXT_ID); + ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69, + (void **)&guc->lrc_desc_pool_vaddr_v69); + if (ret) + return ret; + + return 0; +} + +static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc) +{ + if (!guc->lrc_desc_pool_vaddr_v69) + return; + + guc->lrc_desc_pool_vaddr_v69 = NULL; + i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP); +} + static inline bool guc_submission_initialized(struct intel_guc *guc) { return guc->submission_initialized; } +static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id) +{ + struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id); + + if (desc) + memset(desc, 0, sizeof(*desc)); +} + static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id) { return __get_context(guc, id); @@ -526,6 +579,8 @@ static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id) if (unlikely(!guc_submission_initialized(guc))) return; + _reset_lrc_desc_v69(guc, id); + /* * xarray API doesn't have xa_erase_irqsave wrapper, so calling * the lower level functions directly. @@ -611,7 +666,7 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout) true, timeout); } -static int guc_context_policy_init(struct intel_context *ce, bool loop); +static int guc_context_policy_init_v70(struct intel_context *ce, bool loop); static int try_context_registration(struct intel_context *ce, bool loop); static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) @@ -639,7 +694,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) GEM_BUG_ON(context_guc_id_invalid(ce)); if (context_policy_required(ce)) { - err = guc_context_policy_init(ce, false); + err = guc_context_policy_init_v70(ce, false); if (err) return err; } @@ -737,9 +792,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce) return (WQ_SIZE - ce->parallel.guc.wqi_tail); } -static void write_wqi(struct guc_sched_wq_desc *wq_desc, - struct intel_context *ce, - u32 wqi_size) +static void write_wqi(struct intel_context *ce, u32 wqi_size) { BUILD_BUG_ON(!is_power_of_2(WQ_SIZE)); @@ -750,13 +803,12 @@ static void write_wqi(struct guc_sched_wq_desc *wq_desc, ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) & (WQ_SIZE - 1); - WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail); + WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail); } static int guc_wq_noop_append(struct intel_context *ce) { - struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce); - u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce)); + u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce)); u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1; if (!wqi) @@ -775,7 +827,6 @@ static int __guc_wq_item_append(struct i915_request *rq) { struct intel_context *ce = request_to_scheduling_context(rq); struct intel_context *child; - struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce); unsigned int wqi_size = (ce->parallel.number_children + 4) * sizeof(u32); u32 *wqi; @@ -795,7 +846,7 @@ static int __guc_wq_item_append(struct i915_request *rq) return ret; } - wqi = get_wq_pointer(wq_desc, ce, wqi_size); + wqi = get_wq_pointer(ce, wqi_size); if (!wqi) return -EBUSY; @@ -810,7 +861,7 @@ static int __guc_wq_item_append(struct i915_request *rq) for_each_child(ce, child) *wqi++ = child->ring->tail / sizeof(u64); - write_wqi(wq_desc, ce, wqi_size); + write_wqi(ce, wqi_size); return 0; } @@ -1527,87 +1578,18 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub) lrc_update_regs(ce, engine, head); } -static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine) -{ - static const i915_reg_t _reg[I915_NUM_ENGINES] = { - [RCS0] = MSG_IDLE_CS, - [BCS0] = MSG_IDLE_BCS, - [VCS0] = MSG_IDLE_VCS0, - [VCS1] = MSG_IDLE_VCS1, - [VCS2] = MSG_IDLE_VCS2, - [VCS3] = MSG_IDLE_VCS3, - [VCS4] = MSG_IDLE_VCS4, - [VCS5] = MSG_IDLE_VCS5, - [VCS6] = MSG_IDLE_VCS6, - [VCS7] = MSG_IDLE_VCS7, - [VECS0] = MSG_IDLE_VECS0, - [VECS1] = MSG_IDLE_VECS1, - [VECS2] = MSG_IDLE_VECS2, - [VECS3] = MSG_IDLE_VECS3, - [CCS0] = MSG_IDLE_CS, - [CCS1] = MSG_IDLE_CS, - [CCS2] = MSG_IDLE_CS, - [CCS3] = MSG_IDLE_CS, - }; - u32 val; - - if (!_reg[engine->id].reg) - return 0; - - val = intel_uncore_read(engine->uncore, _reg[engine->id]); - - /* bits[29:25] & bits[13:9] >> shift */ - return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT; -} - -static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask) -{ - int ret; - - /* Ensure GPM receives fw up/down after CS is stopped */ - udelay(1); - - /* Wait for forcewake request to complete in GPM */ - ret = __intel_wait_for_register_fw(gt->uncore, - GEN9_PWRGT_DOMAIN_STATUS, - fw_mask, fw_mask, 5000, 0, NULL); - - /* Ensure CS receives fw ack from GPM */ - udelay(1); - - if (ret) - GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret); -} - -/* - * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any - * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The - * pending status is indicated by bits[13:9] (masked by bits[ 29:25]) in the - * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we - * are concerned only with the gt reset here, we use a logical OR of pending - * forcewakeups from all reset domains and then wait for them to complete by - * querying PWRGT_DOMAIN_STATUS. - */ static void guc_engine_reset_prepare(struct intel_engine_cs *engine) { - u32 fw_pending; - - if (GRAPHICS_VER(engine->i915) != 12) + if (!IS_GRAPHICS_VER(engine->i915, 11, 12)) return; - /* - * Wa_22011802037 - * TODO: Occasionally trying to stop the cs times out, but does not - * adversely affect functionality. The timeout is set as a config - * parameter that defaults to 100ms. Assuming that this timeout is - * sufficient for any pending MI_FORCEWAKEs to complete, ignore the - * timeout returned here until it is root caused. - */ intel_engine_stop_cs(engine); - fw_pending = __cs_pending_mi_force_wakes(engine); - if (fw_pending) - __gpm_wait_for_fw_complete(engine->gt, fw_pending); + /* + * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need + * to wait for any pending mi force wakeups + */ + intel_engine_wait_for_pending_mi_fw(engine); } static void guc_reset_nop(struct intel_engine_cs *engine) @@ -1868,20 +1850,34 @@ static void reset_fail_worker_func(struct work_struct *w); int intel_guc_submission_init(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); + int ret; if (guc->submission_initialized) return 0; + if (guc->fw.major_ver_found < 70) { + ret = guc_lrc_desc_pool_create_v69(guc); + if (ret) + return ret; + } + guc->submission_state.guc_ids_bitmap = bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL); - if (!guc->submission_state.guc_ids_bitmap) - return -ENOMEM; + if (!guc->submission_state.guc_ids_bitmap) { + ret = -ENOMEM; + goto destroy_pool; + } guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; guc->timestamp.shift = gpm_timestamp_shift(gt); guc->submission_initialized = true; return 0; + +destroy_pool: + guc_lrc_desc_pool_destroy_v69(guc); + + return ret; } void intel_guc_submission_fini(struct intel_guc *guc) @@ -1890,6 +1886,7 @@ void intel_guc_submission_fini(struct intel_guc *guc) return; guc_flush_destroyed_contexts(guc); + guc_lrc_desc_pool_destroy_v69(guc); i915_sched_engine_put(guc->sched_engine); bitmap_free(guc->submission_state.guc_ids_bitmap); guc->submission_initialized = false; @@ -2147,10 +2144,34 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) spin_unlock_irqrestore(&guc->submission_state.lock, flags); } -static int __guc_action_register_multi_lrc(struct intel_guc *guc, - struct intel_context *ce, - struct guc_ctxt_registration_info *info, - bool loop) +static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc, + struct intel_context *ce, + u32 guc_id, + u32 offset, + bool loop) +{ + struct intel_context *child; + u32 action[4 + MAX_ENGINE_INSTANCE]; + int len = 0; + + GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE); + + action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC; + action[len++] = guc_id; + action[len++] = ce->parallel.number_children + 1; + action[len++] = offset; + for_each_child(ce, child) { + offset += sizeof(struct guc_lrc_desc_v69); + action[len++] = offset; + } + + return guc_submission_send_busy_loop(guc, action, len, 0, loop); +} + +static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc, + struct intel_context *ce, + struct guc_ctxt_registration_info *info, + bool loop) { struct intel_context *child; u32 action[13 + (MAX_ENGINE_INSTANCE * 2)]; @@ -2190,9 +2211,24 @@ static int __guc_action_register_multi_lrc(struct intel_guc *guc, return guc_submission_send_busy_loop(guc, action, len, 0, loop); } -static int __guc_action_register_context(struct intel_guc *guc, - struct guc_ctxt_registration_info *info, - bool loop) +static int __guc_action_register_context_v69(struct intel_guc *guc, + u32 guc_id, + u32 offset, + bool loop) +{ + u32 action[] = { + INTEL_GUC_ACTION_REGISTER_CONTEXT, + guc_id, + offset, + }; + + return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), + 0, loop); +} + +static int __guc_action_register_context_v70(struct intel_guc *guc, + struct guc_ctxt_registration_info *info, + bool loop) { u32 action[] = { INTEL_GUC_ACTION_REGISTER_CONTEXT, @@ -2213,24 +2249,52 @@ static int __guc_action_register_context(struct intel_guc *guc, 0, loop); } -static void prepare_context_registration_info(struct intel_context *ce, - struct guc_ctxt_registration_info *info); +static void prepare_context_registration_info_v69(struct intel_context *ce); +static void prepare_context_registration_info_v70(struct intel_context *ce, + struct guc_ctxt_registration_info *info); -static int register_context(struct intel_context *ce, bool loop) +static int +register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop) +{ + u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) + + ce->guc_id.id * sizeof(struct guc_lrc_desc_v69); + + prepare_context_registration_info_v69(ce); + + if (intel_context_is_parent(ce)) + return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id, + offset, loop); + else + return __guc_action_register_context_v69(guc, ce->guc_id.id, + offset, loop); +} + +static int +register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop) { struct guc_ctxt_registration_info info; + + prepare_context_registration_info_v70(ce, &info); + + if (intel_context_is_parent(ce)) + return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop); + else + return __guc_action_register_context_v70(guc, &info, loop); +} + +static int register_context(struct intel_context *ce, bool loop) +{ struct intel_guc *guc = ce_to_guc(ce); int ret; GEM_BUG_ON(intel_context_is_child(ce)); trace_intel_context_register(ce); - prepare_context_registration_info(ce, &info); - - if (intel_context_is_parent(ce)) - ret = __guc_action_register_multi_lrc(guc, ce, &info, loop); + if (guc->fw.major_ver_found >= 70) + ret = register_context_v70(guc, ce, loop); else - ret = __guc_action_register_context(guc, &info, loop); + ret = register_context_v69(guc, ce, loop); + if (likely(!ret)) { unsigned long flags; @@ -2238,7 +2302,8 @@ static int register_context(struct intel_context *ce, bool loop) set_context_registered(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); - guc_context_policy_init(ce, loop); + if (guc->fw.major_ver_found >= 70) + guc_context_policy_init_v70(ce, loop); } return ret; @@ -2335,7 +2400,7 @@ static int __guc_context_set_context_policies(struct intel_guc *guc, 0, loop); } -static int guc_context_policy_init(struct intel_context *ce, bool loop) +static int guc_context_policy_init_v70(struct intel_context *ce, bool loop) { struct intel_engine_cs *engine = ce->engine; struct intel_guc *guc = &engine->gt->uc.guc; @@ -2394,8 +2459,108 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop) return ret; } -static void prepare_context_registration_info(struct intel_context *ce, - struct guc_ctxt_registration_info *info) +static void guc_context_policy_init_v69(struct intel_engine_cs *engine, + struct guc_lrc_desc_v69 *desc) +{ + desc->policy_flags = 0; + + if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION) + desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69; + + /* NB: For both of these, zero means disabled. */ + desc->execution_quantum = engine->props.timeslice_duration_ms * 1000; + desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000; +} + +static u32 map_guc_prio_to_lrc_desc_prio(u8 prio) +{ + /* + * this matches the mapping we do in map_i915_prio_to_guc_prio() + * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL) + */ + switch (prio) { + default: + MISSING_CASE(prio); + fallthrough; + case GUC_CLIENT_PRIORITY_KMD_NORMAL: + return GEN12_CTX_PRIORITY_NORMAL; + case GUC_CLIENT_PRIORITY_NORMAL: + return GEN12_CTX_PRIORITY_LOW; + case GUC_CLIENT_PRIORITY_HIGH: + case GUC_CLIENT_PRIORITY_KMD_HIGH: + return GEN12_CTX_PRIORITY_HIGH; + } +} + +static void prepare_context_registration_info_v69(struct intel_context *ce) +{ + struct intel_engine_cs *engine = ce->engine; + struct intel_guc *guc = &engine->gt->uc.guc; + u32 ctx_id = ce->guc_id.id; + struct guc_lrc_desc_v69 *desc; + struct intel_context *child; + + GEM_BUG_ON(!engine->mask); + + /* + * Ensure LRC + CT vmas are is same region as write barrier is done + * based on CT vma region. + */ + GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != + i915_gem_object_is_lmem(ce->ring->vma->obj)); + + desc = __get_lrc_desc_v69(guc, ctx_id); + desc->engine_class = engine_class_to_guc_class(engine->class); + desc->engine_submit_mask = engine->logical_mask; + desc->hw_context_desc = ce->lrc.lrca; + desc->priority = ce->guc_state.prio; + desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; + guc_context_policy_init_v69(engine, desc); + + /* + * If context is a parent, we need to register a process descriptor + * describing a work queue and register all child contexts. + */ + if (intel_context_is_parent(ce)) { + struct guc_process_desc_v69 *pdesc; + + ce->parallel.guc.wqi_tail = 0; + ce->parallel.guc.wqi_head = 0; + + desc->process_desc = i915_ggtt_offset(ce->state) + + __get_parent_scratch_offset(ce); + desc->wq_addr = i915_ggtt_offset(ce->state) + + __get_wq_offset(ce); + desc->wq_size = WQ_SIZE; + + pdesc = __get_process_desc_v69(ce); + memset(pdesc, 0, sizeof(*(pdesc))); + pdesc->stage_id = ce->guc_id.id; + pdesc->wq_base_addr = desc->wq_addr; + pdesc->wq_size_bytes = desc->wq_size; + pdesc->wq_status = WQ_STATUS_ACTIVE; + + ce->parallel.guc.wq_head = &pdesc->head; + ce->parallel.guc.wq_tail = &pdesc->tail; + ce->parallel.guc.wq_status = &pdesc->wq_status; + + for_each_child(ce, child) { + desc = __get_lrc_desc_v69(guc, child->guc_id.id); + + desc->engine_class = + engine_class_to_guc_class(engine->class); + desc->hw_context_desc = child->lrc.lrca; + desc->priority = ce->guc_state.prio; + desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; + guc_context_policy_init_v69(engine, desc); + } + + clear_children_join_go_memory(ce); + } +} + +static void prepare_context_registration_info_v70(struct intel_context *ce, + struct guc_ctxt_registration_info *info) { struct intel_engine_cs *engine = ce->engine; struct intel_guc *guc = &engine->gt->uc.guc; @@ -2420,6 +2585,8 @@ static void prepare_context_registration_info(struct intel_context *ce, */ info->hwlrca_lo = lower_32_bits(ce->lrc.lrca); info->hwlrca_hi = upper_32_bits(ce->lrc.lrca); + if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY) + info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio); info->flags = CONTEXT_REGISTRATION_FLAG_KMD; /* @@ -2443,10 +2610,14 @@ static void prepare_context_registration_info(struct intel_context *ce, info->wq_base_hi = upper_32_bits(wq_base_offset); info->wq_size = WQ_SIZE; - wq_desc = __get_wq_desc(ce); + wq_desc = __get_wq_desc_v70(ce); memset(wq_desc, 0, sizeof(*wq_desc)); wq_desc->wq_status = WQ_STATUS_ACTIVE; + ce->parallel.guc.wq_head = &wq_desc->head; + ce->parallel.guc.wq_tail = &wq_desc->tail; + ce->parallel.guc.wq_status = &wq_desc->wq_status; + clear_children_join_go_memory(ce); } } @@ -2761,11 +2932,21 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc, u16 guc_id, u32 preemption_timeout) { - struct context_policy policy; + if (guc->fw.major_ver_found >= 70) { + struct context_policy policy; - __guc_context_policy_start_klv(&policy, guc_id); - __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout); - __guc_context_set_context_policies(guc, &policy, true); + __guc_context_policy_start_klv(&policy, guc_id); + __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout); + __guc_context_set_context_policies(guc, &policy, true); + } else { + u32 action[] = { + INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT, + guc_id, + preemption_timeout + }; + + intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); + } } static void guc_context_ban(struct intel_context *ce, struct i915_request *rq) @@ -3013,11 +3194,21 @@ static int guc_context_alloc(struct intel_context *ce) static void __guc_context_set_prio(struct intel_guc *guc, struct intel_context *ce) { - struct context_policy policy; + if (guc->fw.major_ver_found >= 70) { + struct context_policy policy; - __guc_context_policy_start_klv(&policy, ce->guc_id.id); - __guc_context_policy_add_priority(&policy, ce->guc_state.prio); - __guc_context_set_context_policies(guc, &policy, true); + __guc_context_policy_start_klv(&policy, ce->guc_id.id); + __guc_context_policy_add_priority(&policy, ce->guc_state.prio); + __guc_context_set_context_policies(guc, &policy, true); + } else { + u32 action[] = { + INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY, + ce->guc_id.id, + ce->guc_state.prio, + }; + + guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); + } } static void guc_context_set_prio(struct intel_guc *guc, @@ -4527,17 +4718,19 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, guc_log_context_priority(p, ce); if (intel_context_is_parent(ce)) { - struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce); struct intel_context *child; drm_printf(p, "\t\tNumber children: %u\n", ce->parallel.number_children); - drm_printf(p, "\t\tWQI Head: %u\n", - READ_ONCE(wq_desc->head)); - drm_printf(p, "\t\tWQI Tail: %u\n", - READ_ONCE(wq_desc->tail)); - drm_printf(p, "\t\tWQI Status: %u\n\n", - READ_ONCE(wq_desc->wq_status)); + + if (ce->parallel.guc.wq_status) { + drm_printf(p, "\t\tWQI Head: %u\n", + READ_ONCE(*ce->parallel.guc.wq_head)); + drm_printf(p, "\t\tWQI Tail: %u\n", + READ_ONCE(*ce->parallel.guc.wq_tail)); + drm_printf(p, "\t\tWQI Status: %u\n\n", + READ_ONCE(*ce->parallel.guc.wq_status)); + } if (ce->engine->emit_bb_start == emit_bb_start_parent_no_preempt_mid_batch) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index f0d7b57b741e..703f42ba5ddd 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -70,6 +70,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \ fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1)) +#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \ + fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \ + fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3)) + #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \ fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \ fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \ @@ -105,6 +109,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, MODULE_FIRMWARE(uc_); INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH) +INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH) INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH) /* The below structs and macros are used to iterate across the list of blobs */ @@ -149,6 +154,9 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) static const struct uc_fw_platform_requirement blobs_guc[] = { INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB) }; + static const struct uc_fw_platform_requirement blobs_guc_fallback[] = { + INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB) + }; static const struct uc_fw_platform_requirement blobs_huc[] = { INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB) }; @@ -162,6 +170,15 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) u8 rev = INTEL_REVID(i915); int i; + /* + * The only difference between the ADL GuC FWs is the HWConfig support. + * ADL-N does not support HWConfig, so we should use the same binary as + * ADL-S, otherwise the GuC might attempt to fetch a config table that + * does not exist. + */ + if (IS_ADLP_N(i915)) + p = INTEL_ALDERLAKE_S; + GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all)); fw_blobs = blobs_all[uc_fw->type].blobs; fw_count = blobs_all[uc_fw->type].count; @@ -170,12 +187,29 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) { const struct uc_fw_blob *blob = &fw_blobs[i].blob; uc_fw->path = blob->path; + uc_fw->wanted_path = blob->path; uc_fw->major_ver_wanted = blob->major; uc_fw->minor_ver_wanted = blob->minor; break; } } + if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) { + const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback; + u32 count = ARRAY_SIZE(blobs_guc_fallback); + + for (i = 0; i < count && p <= blobs[i].p; i++) { + if (p == blobs[i].p && rev >= blobs[i].rev) { + const struct uc_fw_blob *blob = &blobs[i].blob; + + uc_fw->fallback.path = blob->path; + uc_fw->fallback.major_ver = blob->major; + uc_fw->fallback.minor_ver = blob->minor; + break; + } + } + } + /* make sure the list is ordered as expected */ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) { for (i = 1; i < fw_count; i++) { @@ -329,7 +363,24 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) __force_fw_fetch_failures(uc_fw, -EINVAL); __force_fw_fetch_failures(uc_fw, -ESTALE); - err = request_firmware(&fw, uc_fw->path, dev); + err = firmware_request_nowarn(&fw, uc_fw->path, dev); + if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) { + err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev); + if (!err) { + drm_notice(&i915->drm, + "%s firmware %s is recommended, but only %s was found\n", + intel_uc_fw_type_repr(uc_fw->type), + uc_fw->wanted_path, + uc_fw->fallback.path); + drm_info(&i915->drm, + "Consider updating your linux-firmware pkg or downloading from %s\n", + INTEL_UC_FIRMWARE_URL); + + uc_fw->path = uc_fw->fallback.path; + uc_fw->major_ver_wanted = uc_fw->fallback.major_ver; + uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver; + } + } if (err) goto fail; @@ -428,8 +479,8 @@ fail: INTEL_UC_FIRMWARE_MISSING : INTEL_UC_FIRMWARE_ERROR); - drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); + i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n", intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); @@ -787,7 +838,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len) void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) { drm_printf(p, "%s firmware: %s\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); + intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path); + if (uc_fw->fallback.path) { + drm_printf(p, "%s firmware fallback: %s\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path); + drm_printf(p, "fallback selected: %s\n", + str_yes_no(uc_fw->path == uc_fw->fallback.path)); + } drm_printf(p, "\tstatus: %s\n", intel_uc_fw_status_repr(uc_fw->status)); drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 3229018877d3..562acdf88adb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -74,6 +74,7 @@ struct intel_uc_fw { const enum intel_uc_fw_status status; enum intel_uc_fw_status __status; /* no accidental overwrites */ }; + const char *wanted_path; const char *path; bool user_overridden; size_t size; @@ -98,6 +99,12 @@ struct intel_uc_fw { u16 major_ver_found; u16 minor_ver_found; + struct { + const char *path; + u16 major_ver; + u16 minor_ver; + } fallback; + u32 rsa_size; u32 ucode_size; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index b9eb75a2b400..1c35a41620ae 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -3117,9 +3117,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) continue; vaddr = shmem_pin_map(engine->default_state); - if (IS_ERR(vaddr)) { - gvt_err("failed to map %s->default state, err:%zd\n", - engine->name, PTR_ERR(vaddr)); + if (!vaddr) { + gvt_err("failed to map %s->default state\n", + engine->name); return; } diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 90b0ce5051af..1041b5340465 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -530,6 +530,7 @@ mask_err: static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; int ret; if (i915_inject_probe_failure(dev_priv)) @@ -641,6 +642,15 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) intel_bw_init_hw(dev_priv); + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM and on s2idle cases. + */ + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_disable(root_pdev); + return 0; err_msi: @@ -664,11 +674,16 @@ err_perf: static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; i915_perf_fini(dev_priv); if (pdev->msi_enabled) pci_disable_msi(pdev); + + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_enable(root_pdev); } /** @@ -1193,14 +1208,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) goto out; } - /* - * FIXME: Temporary hammer to avoid freezing the machine on our DGFX - * This should be totally removed when we handle the pci states properly - * on runtime PM and on s2idle cases. - */ - if (suspend_to_idle(dev_priv)) - pci_d3cold_disable(pdev); - pci_disable_device(pdev); /* * During hibernation on some platforms the BIOS may try to access @@ -1365,8 +1372,6 @@ static int i915_drm_resume_early(struct drm_device *dev) pci_set_master(pdev); - pci_d3cold_enable(pdev); - disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); ret = vlv_resume_prepare(dev_priv, false); @@ -1543,7 +1548,6 @@ static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) @@ -1589,12 +1593,6 @@ static int intel_runtime_suspend(struct device *kdev) drm_err(&dev_priv->drm, "Unclaimed access detected prior to suspending\n"); - /* - * FIXME: Temporary hammer to avoid freezing the machine on our DGFX - * This should be totally removed when we handle the pci states properly - * on runtime PM and on s2idle cases. - */ - pci_d3cold_disable(pdev); rpm->suspended = true; /* @@ -1633,7 +1631,6 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) @@ -1646,7 +1643,6 @@ static int intel_runtime_resume(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D0); rpm->suspended = false; - pci_d3cold_enable(pdev); if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) drm_dbg(&dev_priv->drm, "Unclaimed access during suspend, bios?\n"); diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index 159571b9bd24..dcc081874ec8 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -68,6 +68,7 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size) * drm_mm_node * @node: The drm_mm_node. * @region_start: An offset to add to the dma addresses of the sg list. + * @page_alignment: Required page alignment for each sg entry. Power of two. * * Create a struct sg_table, initializing it from a struct drm_mm_node, * taking a maximum segment length into account, splitting into segments @@ -77,22 +78,25 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size) * error code cast to an error pointer on failure. */ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, - u64 region_start) + u64 region_start, + u32 page_alignment) { - const u64 max_segment = SZ_1G; /* Do we have a limit on this? */ - u64 segment_pages = max_segment >> PAGE_SHIFT; + const u32 max_segment = round_down(UINT_MAX, page_alignment); + const u32 segment_pages = max_segment >> PAGE_SHIFT; u64 block_size, offset, prev_end; struct i915_refct_sgt *rsgt; struct sg_table *st; struct scatterlist *sg; + GEM_BUG_ON(!max_segment); + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); if (!rsgt) return ERR_PTR(-ENOMEM); i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT); st = &rsgt->table; - if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages), + if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages), GFP_KERNEL)) { i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); @@ -112,12 +116,14 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, sg = __sg_next(sg); sg_dma_address(sg) = region_start + offset; + GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg), + page_alignment)); sg_dma_len(sg) = 0; sg->length = 0; st->nents++; } - len = min(block_size, max_segment - sg->length); + len = min_t(u64, block_size, max_segment - sg->length); sg->length += len; sg_dma_len(sg) += len; @@ -138,6 +144,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, * i915_buddy_block list * @res: The struct i915_ttm_buddy_resource. * @region_start: An offset to add to the dma addresses of the sg list. + * @page_alignment: Required page alignment for each sg entry. Power of two. * * Create a struct sg_table, initializing it from struct i915_buddy_block list, * taking a maximum segment length into account, splitting into segments @@ -147,11 +154,12 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, * error code cast to an error pointer on failure. */ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, - u64 region_start) + u64 region_start, + u32 page_alignment) { struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); const u64 size = res->num_pages << PAGE_SHIFT; - const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE); + const u32 max_segment = round_down(UINT_MAX, page_alignment); struct drm_buddy *mm = bman_res->mm; struct list_head *blocks = &bman_res->blocks; struct drm_buddy_block *block; @@ -161,6 +169,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, resource_size_t prev_end; GEM_BUG_ON(list_empty(blocks)); + GEM_BUG_ON(!max_segment); rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); if (!rsgt) @@ -191,12 +200,14 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, sg = __sg_next(sg); sg_dma_address(sg) = region_start + offset; + GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg), + page_alignment)); sg_dma_len(sg) = 0; sg->length = 0; st->nents++; } - len = min(block_size, max_segment - sg->length); + len = min_t(u64, block_size, max_segment - sg->length); sg->length += len; sg_dma_len(sg) += len; diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index 12c6a1684081..9ddb3e743a3e 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -213,9 +213,11 @@ static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt, void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size); struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, - u64 region_start); + u64 region_start, + u32 page_alignment); struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, - u64 region_start); + u64 region_start, + u32 page_alignment); #endif diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 0bffb70b3c5f..04d12f278f57 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1637,10 +1637,10 @@ static void force_unbind(struct i915_vma *vma) GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); } -static void release_references(struct i915_vma *vma, bool vm_ddestroy) +static void release_references(struct i915_vma *vma, struct intel_gt *gt, + bool vm_ddestroy) { struct drm_i915_gem_object *obj = vma->obj; - struct intel_gt *gt = vma->vm->gt; GEM_BUG_ON(i915_vma_is_active(vma)); @@ -1695,11 +1695,12 @@ void i915_vma_destroy_locked(struct i915_vma *vma) force_unbind(vma); list_del_init(&vma->vm_link); - release_references(vma, false); + release_references(vma, vma->vm->gt, false); } void i915_vma_destroy(struct i915_vma *vma) { + struct intel_gt *gt; bool vm_ddestroy; mutex_lock(&vma->vm->mutex); @@ -1707,8 +1708,11 @@ void i915_vma_destroy(struct i915_vma *vma) list_del_init(&vma->vm_link); vm_ddestroy = vma->vm_ddestroy; vma->vm_ddestroy = false; + + /* vma->vm may be freed when releasing vma->vm->mutex. */ + gt = vma->vm->gt; mutex_unlock(&vma->vm->mutex); - release_references(vma, vm_ddestroy); + release_references(vma, gt, vm_ddestroy); } void i915_vma_parked(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index 62ff77445b01..575d67bc6ffe 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -152,6 +152,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem) * Convert an opaque TTM resource manager resource to a refcounted sg_table. * @mem: The memory region. * @res: The resource manager resource obtained from the TTM resource manager. + * @page_alignment: Required page alignment for each sg entry. Power of two. * * The gem backends typically use sg-tables for operations on the underlying * io_memory. So provide a way for the backends to translate the @@ -161,16 +162,19 @@ int intel_region_ttm_fini(struct intel_memory_region *mem) */ struct i915_refct_sgt * intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, - struct ttm_resource *res) + struct ttm_resource *res, + u32 page_alignment) { if (mem->is_range_manager) { struct ttm_range_mgr_node *range_node = to_ttm_range_mgr_node(res); return i915_rsgt_from_mm_node(&range_node->mm_nodes[0], - mem->region.start); + mem->region.start, + page_alignment); } else { - return i915_rsgt_from_buddy_resource(res, mem->region.start); + return i915_rsgt_from_buddy_resource(res, mem->region.start, + page_alignment); } } diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h index cf9d86dcf409..5bb8d8b582ae 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.h +++ b/drivers/gpu/drm/i915/intel_region_ttm.h @@ -24,7 +24,8 @@ int intel_region_ttm_fini(struct intel_memory_region *mem); struct i915_refct_sgt * intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, - struct ttm_resource *res); + struct ttm_resource *res, + u32 page_alignment); void intel_region_ttm_resource_free(struct intel_memory_region *mem, struct ttm_resource *res); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 8633bec18fa7..ab9f17fc85bc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -742,7 +742,7 @@ static int pot_hole(struct i915_address_space *vm, u64 addr; for (addr = round_up(hole_start + min_alignment, step) - min_alignment; - addr <= round_down(hole_end - (2 * min_alignment), step) - min_alignment; + hole_end > addr && hole_end - addr >= 2 * min_alignment; addr += step) { err = i915_vma_pin(vma, 0, 0, addr | flags); if (err) { diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 73eb53edb8de..3b18e5905c86 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -451,7 +451,6 @@ out_put: static int igt_mock_max_segment(void *arg) { - const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE); struct intel_memory_region *mem = arg; struct drm_i915_private *i915 = mem->i915; struct i915_ttm_buddy_resource *res; @@ -460,7 +459,10 @@ static int igt_mock_max_segment(void *arg) struct drm_buddy *mm; struct list_head *blocks; struct scatterlist *sg; + I915_RND_STATE(prng); LIST_HEAD(objects); + unsigned int max_segment; + unsigned int ps; u64 size; int err = 0; @@ -472,7 +474,13 @@ static int igt_mock_max_segment(void *arg) */ size = SZ_8G; - mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0); + ps = PAGE_SIZE; + if (i915_prandom_u64_state(&prng) & 1) + ps = SZ_64K; /* For something like DG2 */ + + max_segment = round_down(UINT_MAX, ps); + + mem = mock_region_create(i915, 0, size, ps, 0, 0); if (IS_ERR(mem)) return PTR_ERR(mem); @@ -498,12 +506,21 @@ static int igt_mock_max_segment(void *arg) } for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) { + dma_addr_t daddr = sg_dma_address(sg); + if (sg->length > max_segment) { pr_err("%s: Created an oversized scatterlist entry, %u > %u\n", __func__, sg->length, max_segment); err = -EINVAL; goto out_close; } + + if (!IS_ALIGNED(daddr, ps)) { + pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n", + __func__, &daddr, ps); + err = -EINVAL; + goto out_close; + } } out_close: diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 670557ce1024..bac21fe84ca5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -33,7 +33,8 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj) return PTR_ERR(obj->mm.res); obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, - obj->mm.res); + obj->mm.res, + obj->mm.region->min_page_size); if (IS_ERR(obj->mm.rsgt)) { err = PTR_ERR(obj->mm.rsgt); goto err_free_resource; diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c index c849533ca83e..3f5750cc2673 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-dev.c +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c @@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output) ret = dcss_submodules_init(dcss); if (ret) { + of_node_put(dcss->of_port); dev_err(dev, "submodules initialization failed\n"); goto clks_err; } @@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss) dcss_clocks_disable(dcss); } + of_node_put(dcss->of_port); + pm_runtime_disable(dcss->dev); dcss_submodules_stop(dcss); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 3a462e327e0e..a1b8c4592943 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -1251,12 +1251,13 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, DPU_ATRACE_BEGIN("encoder_vblank_callback"); dpu_enc = to_dpu_encoder_virt(drm_enc); + atomic_inc(&phy_enc->vsync_cnt); + spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); if (dpu_enc->crtc) dpu_crtc_vblank_callback(dpu_enc->crtc); spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); - atomic_inc(&phy_enc->vsync_cnt); DPU_ATRACE_END("encoder_vblank_callback"); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 59da348ff339..0ec809ab06e7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -252,11 +252,6 @@ static int dpu_encoder_phys_wb_atomic_check( DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n", phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay); - if (!conn_state->writeback_job || !conn_state->writeback_job->fb) - return 0; - - fb = conn_state->writeback_job->fb; - if (!conn_state || !conn_state->connector) { DPU_ERROR("invalid connector state\n"); return -EINVAL; @@ -267,6 +262,11 @@ static int dpu_encoder_phys_wb_atomic_check( return -EINVAL; } + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id, fb->width, fb->height); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index ec26855079c1..239c8e3f2fbd 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -316,6 +316,8 @@ static void dp_display_unbind(struct device *dev, struct device *master, dp_power_client_deinit(dp->power); dp_aux_unregister(dp->aux); + dp->drm_dev = NULL; + dp->aux->drm_dev = NULL; priv->dp[dp->id] = NULL; } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3c3a0cfade36..c9e4aeb14f4a 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -928,7 +928,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, INT_MAX, GFP_KERNEL); } if (submit->fence_id < 0) { - ret = submit->fence_id = 0; + ret = submit->fence_id; submit->fence_id = 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 7ba66ad68a8a..16356611b5b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -680,7 +680,11 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, goto out_free_dma; for (i = 0; i < npages; i += max) { - args.end = start + (max << PAGE_SHIFT); + if (args.start + (max << PAGE_SHIFT) > end) + args.end = end; + else + args.end = args.start + (max << PAGE_SHIFT); + ret = migrate_vma_setup(&args); if (ret) goto out_free_pfns; diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index c96014464355..a189982601a4 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -713,7 +713,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms); desc->delay.hpd_reliable = reliable_ms; of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms); - desc->delay.hpd_reliable = absent_ms; + desc->delay.hpd_absent = absent_ms; /* Power the panel on so we can read the EDID */ ret = pm_runtime_get_sync(dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 087e69b98d06..b1e6d238674f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -433,8 +433,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, if (args->retained) { if (args->madv == PANFROST_MADV_DONTNEED) - list_add_tail(&bo->base.madv_list, - &pfdev->shrinker_list); + list_move_tail(&bo->base.madv_list, + &pfdev->shrinker_list); else if (args->madv == PANFROST_MADV_WILLNEED) list_del_init(&bo->base.madv_list); } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index d3f82b26a631..b285a8001b1d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -518,7 +518,7 @@ err_map: err_pages: drm_gem_shmem_put_pages(&bo->base); err_bo: - drm_gem_object_put(&bo->base.base); + panfrost_gem_mapping_put(bomapping); return ret; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 67d38f53d3e5..13ed33e74457 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -23,6 +23,14 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> +#if defined(CONFIG_ARM_DMA_USE_IOMMU) +#include <asm/dma-iommu.h> +#else +#define arm_iommu_detach_device(...) ({ }) +#define arm_iommu_release_mapping(...) ({ }) +#define to_dma_iommu_mapping(dev) NULL +#endif + #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" #include "rockchip_drm_gem.h" @@ -49,6 +57,15 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, if (!private->domain) return 0; + if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); + + if (mapping) { + arm_iommu_detach_device(dev); + arm_iommu_release_mapping(mapping); + } + } + ret = iommu_attach_device(private->domain, dev); if (ret) { DRM_DEV_ERROR(dev, "Failed to attach iommu device\n"); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 191c56064f19..6b25b2f4f5a3 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) } EXPORT_SYMBOL(drm_sched_entity_flush); -static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk) +static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) { struct drm_sched_job *job = container_of(wrk, typeof(*job), work); @@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct drm_sched_job *job = container_of(cb, struct drm_sched_job, finish_cb); - init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work); - irq_work_queue(&job->work); + INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); + schedule_work(&job->work); } static struct dma_fence * diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index 08394444dd6e..f4886e66ff34 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -350,7 +350,7 @@ static int ssd130x_init(struct ssd130x_device *ssd130x) /* Set precharge period in number of ticks from the internal clock */ precharge = (SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep1) | - SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep2)); + SSD130X_SET_PRECHARGE_PERIOD2_SET(ssd130x->prechargep2)); ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_PRECHARGE_PERIOD, precharge); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 768242a78e2b..5422363690e7 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -627,7 +627,7 @@ static const struct drm_connector_funcs simpledrm_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int +static enum drm_mode_status simpledrm_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe, const struct drm_display_mode *mode) { diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index c7f5adb6bcf8..79a74184d732 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -17,13 +17,16 @@ void vc4_perfmon_get(struct vc4_perfmon *perfmon) { - struct vc4_dev *vc4 = perfmon->dev; + struct vc4_dev *vc4; + if (!perfmon) + return; + + vc4 = perfmon->dev; if (WARN_ON_ONCE(vc4->is_vc5)) return; - if (perfmon) - refcount_inc(&perfmon->refcnt); + refcount_inc(&perfmon->refcnt); } void vc4_perfmon_put(struct vc4_perfmon *perfmon) diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index 5c4cf742f5ae..157e232aace0 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c @@ -550,7 +550,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle) res = platform_device_add(data->pdev); if (res) - goto ipmi_err; + goto dev_add_err; platform_set_drvdata(data->pdev, data); @@ -598,7 +598,9 @@ hwmon_reg_err: ipmi_destroy_user(data->ipmi.user); ipmi_err: platform_set_drvdata(data->pdev, NULL); - platform_device_unregister(data->pdev); + platform_device_del(data->pdev); +dev_add_err: + platform_device_put(data->pdev); dev_err: ida_free(&aem_ida, data->id); id_err: @@ -690,7 +692,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe, res = platform_device_add(data->pdev); if (res) - goto ipmi_err; + goto dev_add_err; platform_set_drvdata(data->pdev, data); @@ -738,7 +740,9 @@ hwmon_reg_err: ipmi_destroy_user(data->ipmi.user); ipmi_err: platform_set_drvdata(data->pdev, NULL); - platform_device_unregister(data->pdev); + platform_device_del(data->pdev); +dev_add_err: + platform_device_put(data->pdev); dev_err: ida_free(&aem_ida, data->id); id_err: diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 4e239bd75b1d..5a9d47a229e4 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -428,6 +428,10 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) data->ccd_offset = 0x154; k10temp_get_ccd_support(pdev, data, 8); break; + case 0xa0 ... 0xaf: + data->ccd_offset = 0x300; + k10temp_get_ccd_support(pdev, data, 8); + break; } } else if (boot_cpu_data.x86 == 0x19) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; @@ -445,6 +449,11 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) data->ccd_offset = 0x300; k10temp_get_ccd_support(pdev, data, 8); break; + case 0x60 ... 0x6f: + case 0x70 ... 0x7f: + data->ccd_offset = 0x308; + k10temp_get_ccd_support(pdev, data, 8); + break; case 0x10 ... 0x1f: case 0xa0 ... 0xaf: data->ccd_offset = 0x300; @@ -489,10 +498,13 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} }; diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index ea070b91e5b9..157b73a3da29 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -145,7 +145,7 @@ static int occ_poll(struct occ *occ) cmd[6] = 0; /* checksum lsb */ /* mutex should already be locked if necessary */ - rc = occ->send_cmd(occ, cmd, sizeof(cmd)); + rc = occ->send_cmd(occ, cmd, sizeof(cmd), &occ->resp, sizeof(occ->resp)); if (rc) { occ->last_error = rc; if (occ->error_count++ > OCC_ERROR_COUNT_THRESHOLD) @@ -182,6 +182,7 @@ static int occ_set_user_power_cap(struct occ *occ, u16 user_power_cap) { int rc; u8 cmd[8]; + u8 resp[8]; __be16 user_power_cap_be = cpu_to_be16(user_power_cap); cmd[0] = 0; /* sequence number */ @@ -198,7 +199,7 @@ static int occ_set_user_power_cap(struct occ *occ, u16 user_power_cap) if (rc) return rc; - rc = occ->send_cmd(occ, cmd, sizeof(cmd)); + rc = occ->send_cmd(occ, cmd, sizeof(cmd), resp, sizeof(resp)); mutex_unlock(&occ->lock); diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h index 64d5ec7e169b..7ac4b2febce6 100644 --- a/drivers/hwmon/occ/common.h +++ b/drivers/hwmon/occ/common.h @@ -96,7 +96,8 @@ struct occ { int powr_sample_time_us; /* average power sample time */ u8 poll_cmd_data; /* to perform OCC poll command */ - int (*send_cmd)(struct occ *occ, u8 *cmd, size_t len); + int (*send_cmd)(struct occ *occ, u8 *cmd, size_t len, void *resp, + size_t resp_len); unsigned long next_update; struct mutex lock; /* lock OCC access */ diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c index da39ea28df31..b221be1f35f3 100644 --- a/drivers/hwmon/occ/p8_i2c.c +++ b/drivers/hwmon/occ/p8_i2c.c @@ -111,7 +111,8 @@ static int p8_i2c_occ_putscom_be(struct i2c_client *client, u32 address, be32_to_cpu(data1)); } -static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len) +static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len, + void *resp, size_t resp_len) { int i, rc; unsigned long start; @@ -120,7 +121,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len) const long wait_time = msecs_to_jiffies(OCC_CMD_IN_PRG_WAIT_MS); struct p8_i2c_occ *ctx = to_p8_i2c_occ(occ); struct i2c_client *client = ctx->client; - struct occ_response *resp = &occ->resp; + struct occ_response *or = (struct occ_response *)resp; start = jiffies; @@ -151,7 +152,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len) return rc; /* wait for OCC */ - if (resp->return_status == OCC_RESP_CMD_IN_PRG) { + if (or->return_status == OCC_RESP_CMD_IN_PRG) { rc = -EALREADY; if (time_after(jiffies, start + timeout)) @@ -163,7 +164,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len) } while (rc); /* check the OCC response */ - switch (resp->return_status) { + switch (or->return_status) { case OCC_RESP_CMD_IN_PRG: rc = -ETIMEDOUT; break; @@ -192,8 +193,8 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len) if (rc < 0) return rc; - data_length = get_unaligned_be16(&resp->data_length); - if (data_length > OCC_RESP_DATA_BYTES) + data_length = get_unaligned_be16(&or->data_length); + if ((data_length + 7) > resp_len) return -EMSGSIZE; /* fetch the rest of the response data */ diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c index 42fc7b97bb34..a91937e28e12 100644 --- a/drivers/hwmon/occ/p9_sbe.c +++ b/drivers/hwmon/occ/p9_sbe.c @@ -78,11 +78,10 @@ done: return notify; } -static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len) +static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len, + void *resp, size_t resp_len) { - struct occ_response *resp = &occ->resp; struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ); - size_t resp_len = sizeof(*resp); int rc; rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len); @@ -96,7 +95,7 @@ static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len) return rc; } - switch (resp->return_status) { + switch (((struct occ_response *)resp)->return_status) { case OCC_RESP_CMD_IN_PRG: rc = -ETIMEDOUT; break; diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c index 6bc3273e31e7..3ad375a76f3e 100644 --- a/drivers/hwmon/pmbus/ucd9200.c +++ b/drivers/hwmon/pmbus/ucd9200.c @@ -148,7 +148,7 @@ static int ucd9200_probe(struct i2c_client *client) * This only affects the READ_IOUT and READ_TEMPERATURE2 registers. * READ_IOUT will return the sum of currents of all phases of a rail, * and READ_TEMPERATURE2 will return the maximum temperature detected - * for the the phases of the rail. + * for the phases of the rail. */ for (i = 0; i < info->pages; i++) { /* diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a1bae59208e3..708a67c7faaa 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -486,7 +486,7 @@ config I2C_BCM_KONA config I2C_BRCMSTB tristate "BRCM Settop/DSL I2C controller" - depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCM_63XX || \ + depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCMBCA || \ ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST default y help diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index b4c1ad19cdae..630cfa4ddd46 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr) */ static irqreturn_t cdns_i2c_master_isr(void *ptr) { - unsigned int isr_status, avail_bytes, updatetx; + unsigned int isr_status, avail_bytes; unsigned int bytes_to_send; - bool hold_quirk; + bool updatetx; struct cdns_i2c *id = ptr; /* Signal completion only after everything is updated */ int done_flag = 0; @@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) * Check if transfer size register needs to be updated again for a * large data receive operation. */ - updatetx = 0; - if (id->recv_count > id->curr_recv_count) - updatetx = 1; - - hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx; + updatetx = id->recv_count > id->curr_recv_count; /* When receiving, handle data interrupt and completion interrupt */ if (id->p_recv_buf && @@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) break; } - if (cdns_is_holdquirk(id, hold_quirk)) + if (cdns_is_holdquirk(id, updatetx)) break; } @@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) * maintain transfer size non-zero while performing a large * receive operation. */ - if (cdns_is_holdquirk(id, hold_quirk)) { + if (cdns_is_holdquirk(id, updatetx)) { /* wait while fifo is full */ while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH)) @@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) CDNS_I2C_XFER_SIZE_OFFSET); id->curr_recv_count = id->recv_count; } - } else if (id->recv_count && !hold_quirk && - !id->curr_recv_count) { - - /* Set the slave address in address register*/ - cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, - CDNS_I2C_ADDR_OFFSET); - - if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) { - cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, - CDNS_I2C_XFER_SIZE_OFFSET); - id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE; - } else { - cdns_i2c_writereg(id->recv_count, - CDNS_I2C_XFER_SIZE_OFFSET); - id->curr_recv_count = id->recv_count; - } } /* Clear hold (if not repeated start) and signal completion */ @@ -1338,6 +1318,7 @@ static int cdns_i2c_probe(struct platform_device *pdev) return 0; err_clk_dis: + clk_notifier_unregister(id->clk, &id->clk_rate_change_nb); clk_disable_unprepare(id->clk); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index e9e2db68b9fb..78fb1a4274a6 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -66,7 +66,7 @@ /* IMX I2C registers: * the I2C register offset is different between SoCs, - * to provid support for all these chips, split the + * to provide support for all these chips, split the * register offset into a fixed base address and a * variable shift value, then the full register offset * will be calculated by diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 56aa424fd71d..815cc561386b 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -49,7 +49,7 @@ #define MLXCPLD_LPCI2C_NACK_IND 2 #define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04 -#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c +#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0e #define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42 enum mlxcpld_i2c_frequency { diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index ac8e7d60672a..39cb1b7bb865 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -161,7 +161,6 @@ static const char *piix4_aux_port_name_sb800 = " port 1"; struct sb800_mmio_cfg { void __iomem *addr; - struct resource *res; bool use_mmio; }; @@ -179,13 +178,11 @@ static int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_cfg) { if (mmio_cfg->use_mmio) { - struct resource *res; void __iomem *addr; - res = request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR, - SB800_PIIX4_FCH_PM_SIZE, - "sb800_piix4_smb"); - if (!res) { + if (!request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR, + SB800_PIIX4_FCH_PM_SIZE, + "sb800_piix4_smb")) { dev_err(dev, "SMBus base address memory region 0x%x already in use.\n", SB800_PIIX4_FCH_PM_ADDR); @@ -195,12 +192,12 @@ static int piix4_sb800_region_request(struct device *dev, addr = ioremap(SB800_PIIX4_FCH_PM_ADDR, SB800_PIIX4_FCH_PM_SIZE); if (!addr) { - release_resource(res); + release_mem_region(SB800_PIIX4_FCH_PM_ADDR, + SB800_PIIX4_FCH_PM_SIZE); dev_err(dev, "SMBus base address mapping failed.\n"); return -ENOMEM; } - mmio_cfg->res = res; mmio_cfg->addr = addr; return 0; @@ -222,7 +219,8 @@ static void piix4_sb800_region_release(struct device *dev, { if (mmio_cfg->use_mmio) { iounmap(mmio_cfg->addr); - release_resource(mmio_cfg->res); + release_mem_region(SB800_PIIX4_FCH_PM_ADDR, + SB800_PIIX4_FCH_PM_SIZE); return; } diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 424ef470223d..445b19d20b9a 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -47,13 +47,16 @@ #include <linux/tick.h> #include <trace/events/power.h> #include <linux/sched.h> +#include <linux/sched/smt.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/moduleparam.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/nospec-branch.h> #include <asm/mwait.h> #include <asm/msr.h> +#include <asm/fpu/api.h> #define INTEL_IDLE_VERSION "0.5.1" @@ -106,6 +109,17 @@ static unsigned int mwait_substates __initdata; #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) /* + * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE + * above. + */ +#define CPUIDLE_FLAG_IBRS BIT(16) + +/* + * Initialize large xstate for the C6-state entrance. + */ +#define CPUIDLE_FLAG_INIT_XSTATE BIT(17) + +/* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc. @@ -154,11 +168,42 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, raw_local_irq_enable(); ret = __intel_idle(dev, drv, index); - raw_local_irq_disable(); + + /* + * The lockdep hardirqs state may be changed to 'on' with timer + * tick interrupt followed by __do_softirq(). Use local_irq_disable() + * to keep the hardirqs state correct. + */ + local_irq_disable(); + + return ret; +} + +static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + bool smt_active = sched_smt_active(); + u64 spec_ctrl = spec_ctrl_current(); + int ret; + + if (smt_active) + wrmsrl(MSR_IA32_SPEC_CTRL, 0); + + ret = __intel_idle(dev, drv, index); + + if (smt_active) + wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl); return ret; } +static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + fpu_idle_fpregs(); + return __intel_idle(dev, drv, index); +} + /** * intel_idle_s2idle - Ask the processor to enter the given idle state. * @dev: cpuidle device of the target CPU. @@ -174,8 +219,12 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - unsigned long eax = flg2MWAIT(drv->states[index].flags); unsigned long ecx = 1; /* break on interrupt flag */ + struct cpuidle_state *state = &drv->states[index]; + unsigned long eax = flg2MWAIT(state->flags); + + if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) + fpu_idle_fpregs(); mwait_idle_with_hints(eax, ecx); @@ -680,7 +729,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C6", .desc = "MWAIT 0x20", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 85, .target_residency = 200, .enter = &intel_idle, @@ -688,7 +737,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C7s", .desc = "MWAIT 0x33", - .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 124, .target_residency = 800, .enter = &intel_idle, @@ -696,7 +745,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C8", .desc = "MWAIT 0x40", - .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, @@ -704,7 +753,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C9", .desc = "MWAIT 0x50", - .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 480, .target_residency = 5000, .enter = &intel_idle, @@ -712,7 +761,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C10", .desc = "MWAIT 0x60", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 890, .target_residency = 5000, .enter = &intel_idle, @@ -741,7 +790,7 @@ static struct cpuidle_state skx_cstates[] __initdata = { { .name = "C6", .desc = "MWAIT 0x20", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 133, .target_residency = 600, .enter = &intel_idle, @@ -910,7 +959,8 @@ static struct cpuidle_state spr_cstates[] __initdata = { { .name = "C6", .desc = "MWAIT 0x20", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE, .exit_latency = 290, .target_residency = 800, .enter = &intel_idle, @@ -1819,6 +1869,15 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE) drv->states[drv->state_count].enter = intel_idle_irq; + if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) && + cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) { + WARN_ON_ONCE(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE); + drv->states[drv->state_count].enter = intel_idle_ibrs; + } + + if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_INIT_XSTATE) + drv->states[drv->state_count].enter = intel_idle_xstate; + if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && intel_idle_off_by_default(mwait_hint) && diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1c107d6d03b9..b985e0d9bc05 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1252,8 +1252,10 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, return ERR_CAST(cm_id_priv); err = cm_init_listen(cm_id_priv, service_id, 0); - if (err) + if (err) { + ib_destroy_cm_id(&cm_id_priv->id); return ERR_PTR(err); + } spin_lock_irq(&cm_id_priv->lock); listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler); diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c index 638bf4a1ed94..646fa8677490 100644 --- a/drivers/infiniband/hw/irdma/cm.c +++ b/drivers/infiniband/hw/irdma/cm.c @@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, struct irdma_cm_node *cm_node; struct list_head teardown_list; struct ib_qp_attr attr; - struct irdma_sc_vsi *vsi = &iwdev->vsi; - struct irdma_sc_qp *sc_qp; - struct irdma_qp *qp; - int i; INIT_LIST_HEAD(&teardown_list); @@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, irdma_cm_disconn(cm_node->iwqp); irdma_rem_ref_cm_node(cm_node); } - if (!iwdev->roce_mode) - return; - - INIT_LIST_HEAD(&teardown_list); - for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { - mutex_lock(&vsi->qos[i].qos_mutex); - list_for_each_safe (list_node, list_core_temp, - &vsi->qos[i].qplist) { - u32 qp_ip[4]; - - sc_qp = container_of(list_node, struct irdma_sc_qp, - list); - if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC) - continue; - - qp = sc_qp->qp_uk.back_qp; - if (!disconnect_all) { - if (nfo->ipv4) - qp_ip[0] = qp->udp_info.local_ipaddr[3]; - else - memcpy(qp_ip, - &qp->udp_info.local_ipaddr[0], - sizeof(qp_ip)); - } - - if (disconnect_all || - (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) && - !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) { - spin_lock(&iwdev->rf->qptable_lock); - if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) { - irdma_qp_add_ref(&qp->ibqp); - list_add(&qp->teardown_entry, - &teardown_list); - } - spin_unlock(&iwdev->rf->qptable_lock); - } - } - mutex_unlock(&vsi->qos[i].qos_mutex); - } - - list_for_each_safe (list_node, list_core_temp, &teardown_list) { - qp = container_of(list_node, struct irdma_qp, teardown_entry); - attr.qp_state = IB_QPS_ERR; - irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL); - irdma_qp_rem_ref(&qp->ibqp); - } } /** diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c index e46fc110004d..50299f58b6b3 100644 --- a/drivers/infiniband/hw/irdma/i40iw_hw.c +++ b/drivers/infiniband/hw/irdma/i40iw_hw.c @@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev) dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD; dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT; dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE; + dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M; dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE; dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES; diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c index cf53b17510cd..5986fd906308 100644 --- a/drivers/infiniband/hw/irdma/icrdma_hw.c +++ b/drivers/infiniband/hw/irdma/icrdma_hw.c @@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev) dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; dev->irq_ops = &icrdma_irq_ops; + dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G; dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h index 46c12334c735..4789e85d717b 100644 --- a/drivers/infiniband/hw/irdma/irdma.h +++ b/drivers/infiniband/hw/irdma/irdma.h @@ -127,6 +127,7 @@ struct irdma_hw_attrs { u64 max_hw_outbound_msg_size; u64 max_hw_inbound_msg_size; u64 max_mr_size; + u64 page_size_cap; u32 min_hw_qp_id; u32 min_hw_aeq_size; u32 max_hw_aeq_size; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index c4412ece5a6d..96135a228f26 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -32,7 +32,7 @@ static int irdma_query_device(struct ib_device *ibdev, props->vendor_part_id = pcidev->device; props->hw_ver = rf->pcidev->revision; - props->page_size_cap = SZ_4K | SZ_2M | SZ_1G; + props->page_size_cap = hw_attrs->page_size_cap; props->max_mr_size = hw_attrs->max_mr_size; props->max_qp = rf->max_qp - rf->used_qps; props->max_qp_wr = hw_attrs->max_qp_wr; @@ -2781,7 +2781,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { iwmr->page_size = ib_umem_find_best_pgsz(region, - SZ_4K | SZ_2M | SZ_1G, + iwdev->rf->sc_dev.hw_attrs.page_size_cap, virt); if (unlikely(!iwmr->page_size)) { kfree(iwmr); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 8def88cfa300..db9ef3e1eb97 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -418,6 +418,7 @@ struct qedr_qp { u32 sq_psn; u32 qkey; u32 dest_qp_num; + u8 timeout; /* Relevant to qps created from kernel space only (ULPs) */ u8 prev_wqe_size; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index f0f43b6db89e..03ed7c0fae50 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2613,6 +2613,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1 << max_t(int, attr->timeout - 8, 0); else qp_params.ack_timeout = 0; + + qp->timeout = attr->timeout; } if (attr_mask & IB_QP_RETRY_CNT) { @@ -2772,7 +2774,7 @@ int qedr_query_qp(struct ib_qp *ibqp, rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]); rdma_ah_set_port_num(&qp_attr->ah_attr, 1); rdma_ah_set_sl(&qp_attr->ah_attr, 0); - qp_attr->timeout = params.timeout; + qp_attr->timeout = qp->timeout; qp_attr->rnr_retry = params.rnr_retry; qp_attr->retry_cnt = params.retry_cnt; qp_attr->min_rnr_timer = params.min_rnr_nak_timer; diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 3ad9870db108..aa45a9fee6a0 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -900,6 +900,11 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) } else { dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n", ts->gpio_count, ts->gpio_int_idx); + /* + * On some devices _PS0 does a reset for us and + * sometimes this is necessary for things to work. + */ + acpi_device_fix_up_power(ACPI_COMPANION(dev)); return -EINVAL; } diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 43c521f50c85..3dda6eaabdab 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -1654,6 +1654,9 @@ static int usbtouch_probe(struct usb_interface *intf, if (id->driver_info == DEVTYPE_IGNORE) return -ENODEV; + if (id->driver_info >= ARRAY_SIZE(usbtouch_dev_info)) + return -ENODEV; + endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting); if (!endpoint) return -ENXIO; diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index 2757c7768ffe..f51ab5614532 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -758,7 +758,9 @@ batt_err: static int wm97xx_mfd_remove(struct platform_device *pdev) { - return wm97xx_remove(&pdev->dev); + wm97xx_remove(&pdev->dev); + + return 0; } static int __maybe_unused wm97xx_suspend(struct device *dev) diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index e285a220c913..51bd66a45a11 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -194,7 +194,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) u32 vector; struct irq_cfg *cfg; int ioapic_id; - struct cpumask *affinity; + const struct cpumask *affinity; int cpu; struct hv_interrupt_entry entry; struct hyperv_root_ir_data *data = irq_data->chip_data; diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 592c1e1a5d4b..9699ca101c62 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -382,7 +382,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, static struct notifier_block dmar_pci_bus_nb = { .notifier_call = dmar_pci_bus_notifier, - .priority = INT_MIN, + .priority = 1, }; static struct dmar_drhd_unit * diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 44016594831d..5c0dce78586a 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -320,30 +320,6 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); -/* - * Iterate over elements in device_domain_list and call the specified - * callback @fn against each element. - */ -int for_each_device_domain(int (*fn)(struct device_domain_info *info, - void *data), void *data) -{ - int ret = 0; - unsigned long flags; - struct device_domain_info *info; - - spin_lock_irqsave(&device_domain_lock, flags); - list_for_each_entry(info, &device_domain_list, global) { - ret = fn(info, data); - if (ret) { - spin_unlock_irqrestore(&device_domain_lock, flags); - return ret; - } - } - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; -} - const struct iommu_ops intel_iommu_ops; static bool translation_pre_enabled(struct intel_iommu *iommu) diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index cb4c1d0cf25c..17cad7c1f62d 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -86,54 +86,6 @@ void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid) /* * Per device pasid table management: */ -static inline void -device_attach_pasid_table(struct device_domain_info *info, - struct pasid_table *pasid_table) -{ - info->pasid_table = pasid_table; - list_add(&info->table, &pasid_table->dev); -} - -static inline void -device_detach_pasid_table(struct device_domain_info *info, - struct pasid_table *pasid_table) -{ - info->pasid_table = NULL; - list_del(&info->table); -} - -struct pasid_table_opaque { - struct pasid_table **pasid_table; - int segment; - int bus; - int devfn; -}; - -static int search_pasid_table(struct device_domain_info *info, void *opaque) -{ - struct pasid_table_opaque *data = opaque; - - if (info->iommu->segment == data->segment && - info->bus == data->bus && - info->devfn == data->devfn && - info->pasid_table) { - *data->pasid_table = info->pasid_table; - return 1; - } - - return 0; -} - -static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque) -{ - struct pasid_table_opaque *data = opaque; - - data->segment = pci_domain_nr(pdev->bus); - data->bus = PCI_BUS_NUM(alias); - data->devfn = alias & 0xff; - - return for_each_device_domain(&search_pasid_table, data); -} /* * Allocate a pasid table for @dev. It should be called in a @@ -143,28 +95,18 @@ int intel_pasid_alloc_table(struct device *dev) { struct device_domain_info *info; struct pasid_table *pasid_table; - struct pasid_table_opaque data; struct page *pages; u32 max_pasid = 0; - int ret, order; - int size; + int order, size; might_sleep(); info = dev_iommu_priv_get(dev); if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table)) return -EINVAL; - /* DMA alias device already has a pasid table, use it: */ - data.pasid_table = &pasid_table; - ret = pci_for_each_dma_alias(to_pci_dev(dev), - &get_alias_pasid_table, &data); - if (ret) - goto attach_out; - pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL); if (!pasid_table) return -ENOMEM; - INIT_LIST_HEAD(&pasid_table->dev); if (info->pasid_supported) max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)), @@ -182,9 +124,7 @@ int intel_pasid_alloc_table(struct device *dev) pasid_table->table = page_address(pages); pasid_table->order = order; pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3); - -attach_out: - device_attach_pasid_table(info, pasid_table); + info->pasid_table = pasid_table; return 0; } @@ -202,10 +142,7 @@ void intel_pasid_free_table(struct device *dev) return; pasid_table = info->pasid_table; - device_detach_pasid_table(info, pasid_table); - - if (!list_empty(&pasid_table->dev)) - return; + info->pasid_table = NULL; /* Free scalable mode PASID directory tables: */ dir = pasid_table->table; diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 583ea67fc783..bf5b937848b4 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -74,7 +74,6 @@ struct pasid_table { void *table; /* pasid table pointer */ int order; /* page order of pasid table */ u32 max_pasid; /* max pasid */ - struct list_head dev; /* device list */ }; /* Get PRESENT bit of a PASID directory entry. */ diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 4dd4f2108f7a..66b9fa408bf2 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -8,7 +8,7 @@ config IRQCHIP config ARM_GIC bool select IRQ_DOMAIN_HIERARCHY - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ARM_GIC_PM bool @@ -34,7 +34,7 @@ config ARM_GIC_V3 bool select IRQ_DOMAIN_HIERARCHY select PARTITION_PERCPU - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ARM_GIC_V3_ITS bool @@ -76,7 +76,7 @@ config ARMADA_370_XP_IRQ bool select GENERIC_IRQ_CHIP select PCI_MSI if PCI - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ALPINE_MSI bool @@ -112,7 +112,7 @@ config BCM6345_L1_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config BCM7038_L1_IRQ tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver" @@ -120,7 +120,7 @@ config BCM7038_L1_IRQ default ARCH_BRCMSTB || BMIPS_GENERIC select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config BCM7120_L2_IRQ tristate "Broadcom STB 7120-style L2 interrupt controller driver" @@ -177,9 +177,9 @@ config MADERA_IRQ config IRQ_MIPS_CPU bool select GENERIC_IRQ_CHIP - select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING + select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config CLPS711X_IRQCHIP bool @@ -242,6 +242,14 @@ config RENESAS_RZA1_IRQC Enable support for the Renesas RZ/A1 Interrupt Controller, to use up to 8 external interrupts with configurable sense select. +config RENESAS_RZG2L_IRQC + bool "Renesas RZ/G2L (and alike SoC) IRQC support" if COMPILE_TEST + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN_HIERARCHY + help + Enable support for the Renesas RZ/G2L (and alike SoC) Interrupt Controller + for external devices. + config SL28CPLD_INTC bool "Kontron sl28cpld IRQ controller" depends on MFD_SL28CPLD=y || COMPILE_TEST @@ -294,11 +302,11 @@ config VERSATILE_FPGA_IRQ_NR config XTENSA_MX bool select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config XILINX_INTC bool "Xilinx Interrupt Controller IP" - depends on OF + depends on OF_ADDRESS select IRQ_DOMAIN help Support for the Xilinx Interrupt Controller IP core. @@ -322,7 +330,8 @@ config KEYSTONE_IRQ config MIPS_GIC bool - select GENERIC_IRQ_IPI + select GENERIC_IRQ_IPI if SMP + select IRQ_DOMAIN_HIERARCHY select MIPS_CM config INGENIC_IRQ @@ -530,6 +539,7 @@ config SIFIVE_PLIC bool "SiFive Platform-Level Interrupt Controller" depends on RISCV select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP help This enables support for the PLIC chip found in SiFive (and potentially other) RISC-V systems. The PLIC controls devices @@ -546,6 +556,16 @@ config EXYNOS_IRQ_COMBINER Say yes here to add support for the IRQ combiner devices embedded in Samsung Exynos chips. +config IRQ_LOONGARCH_CPU + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + help + Support for the LoongArch CPU Interrupt Controller. For details of + irq chip hierarchy on LoongArch platforms please read the document + Documentation/loongarch/irq-chip-model.rst. + config LOONGSON_LIOINTC bool "Loongson Local I/O Interrupt Controller" depends on MACH_LOONGSON64 @@ -555,6 +575,16 @@ config LOONGSON_LIOINTC help Support for the Loongson Local I/O Interrupt Controller. +config LOONGSON_EIOINTC + bool "Loongson Extend I/O Interrupt Controller" + depends on LOONGARCH + depends on MACH_LOONGSON64 + default MACH_LOONGSON64 + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_CHIP + help + Support for the Loongson3 Extend I/O Interrupt Vector Controller. + config LOONGSON_HTPIC bool "Loongson3 HyperTransport PIC Controller" depends on MACH_LOONGSON64 && MIPS @@ -574,7 +604,7 @@ config LOONGSON_HTVEC config LOONGSON_PCH_PIC bool "Loongson PCH PIC Controller" - depends on MACH_LOONGSON64 || COMPILE_TEST + depends on MACH_LOONGSON64 default MACH_LOONGSON64 select IRQ_DOMAIN_HIERARCHY select IRQ_FASTEOI_HIERARCHY_HANDLERS @@ -583,7 +613,7 @@ config LOONGSON_PCH_PIC config LOONGSON_PCH_MSI bool "Loongson PCH MSI Controller" - depends on MACH_LOONGSON64 || COMPILE_TEST + depends on MACH_LOONGSON64 depends on PCI default MACH_LOONGSON64 select IRQ_DOMAIN_HIERARCHY @@ -591,6 +621,14 @@ config LOONGSON_PCH_MSI help Support for the Loongson PCH MSI Controller. +config LOONGSON_PCH_LPC + bool "Loongson PCH LPC Controller" + depends on MACH_LOONGSON64 + default (MACH_LOONGSON64 && LOONGARCH) + select IRQ_DOMAIN_HIERARCHY + help + Support for the Loongson PCH LPC Controller. + config MST_IRQ bool "MStar Interrupt Controller" depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index cb12a152663e..b6acbca2248b 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o +obj-$(CONFIG_RENESAS_RZG2L_IRQC) += irq-renesas-rzg2l.o obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o @@ -103,11 +104,14 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o +obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o +obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o +obj-$(CONFIG_LOONGSON_PCH_LPC) += irq-loongson-pch-lpc.o obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 5ac83185ff47..1c2813ad8bbe 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -228,7 +228,7 @@ #define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS #define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT -DEFINE_STATIC_KEY_TRUE(use_fast_ipi); +static DEFINE_STATIC_KEY_TRUE(use_fast_ipi); struct aic_info { int version; diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index 142a7431745f..6899e37810a8 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -216,11 +216,11 @@ static int bcm6345_l1_set_affinity(struct irq_data *d, enabled = intc->cpus[old_cpu]->enable_cache[word] & mask; if (enabled) __bcm6345_l1_mask(d); - cpumask_copy(irq_data_get_affinity_mask(d), dest); + irq_data_update_affinity(d, dest); if (enabled) __bcm6345_l1_unmask(d); } else { - cpumask_copy(irq_data_get_affinity_mask(d), dest); + irq_data_update_affinity(d, dest); } raw_spin_unlock_irqrestore(&intc->lock, flags); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 5c1cf907ee68..262658fd5f9e 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1783,7 +1783,7 @@ static void gic_enable_nmi_support(void) * the security state of the GIC (controlled by the GICD_CTRL.DS bit) * and if Group 0 interrupts can be delivered to Linux in the non-secure * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the - * the ICC_PMR_EL1 register and the priority that software assigns to + * ICC_PMR_EL1 register and the priority that software assigns to * interrupts: * * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority @@ -2042,15 +2042,40 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) vgic_set_kvm_info(&gic_v3_kvm_info); } +static void gic_request_region(resource_size_t base, resource_size_t size, + const char *name) +{ + if (!request_mem_region(base, size, name)) + pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", + name, &base); +} + +static void __iomem *gic_of_iomap(struct device_node *node, int idx, + const char *name, struct resource *res) +{ + void __iomem *base; + int ret; + + ret = of_address_to_resource(node, idx, res); + if (ret) + return IOMEM_ERR_PTR(ret); + + gic_request_region(res->start, resource_size(res), name); + base = of_iomap(node, idx); + + return base ?: IOMEM_ERR_PTR(-ENOMEM); +} + static int __init gic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *dist_base; struct redist_region *rdist_regs; + struct resource res; u64 redist_stride; u32 nr_redist_regions; int err, i; - dist_base = of_io_request_and_map(node, 0, "GICD"); + dist_base = gic_of_iomap(node, 0, "GICD", &res); if (IS_ERR(dist_base)) { pr_err("%pOF: unable to map gic dist registers\n", node); return PTR_ERR(dist_base); @@ -2073,12 +2098,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare } for (i = 0; i < nr_redist_regions; i++) { - struct resource res; - int ret; - - ret = of_address_to_resource(node, 1 + i, &res); - rdist_regs[i].redist_base = of_io_request_and_map(node, 1 + i, "GICR"); - if (ret || IS_ERR(rdist_regs[i].redist_base)) { + rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res); + if (IS_ERR(rdist_regs[i].redist_base)) { pr_err("%pOF: couldn't map region %d\n", node, i); err = -ENODEV; goto out_unmap_rdist; @@ -2151,7 +2172,7 @@ gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, pr_err("Couldn't map GICR region @%llx\n", redist->base_address); return -ENOMEM; } - request_mem_region(redist->base_address, redist->length, "GICR"); + gic_request_region(redist->base_address, redist->length, "GICR"); gic_acpi_register_redist(redist->base_address, redist_base); return 0; @@ -2174,7 +2195,7 @@ gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, redist_base = ioremap(gicc->gicr_base_address, size); if (!redist_base) return -ENOMEM; - request_mem_region(gicc->gicr_base_address, size, "GICR"); + gic_request_region(gicc->gicr_base_address, size, "GICR"); gic_acpi_register_redist(gicc->gicr_base_address, redist_base); return 0; @@ -2360,11 +2381,17 @@ static void __init gic_acpi_setup_kvm_info(void) vgic_set_kvm_info(&gic_v3_kvm_info); } +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; +} + static int __init gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; - struct fwnode_handle *domain_handle; size_t size; int i, err; @@ -2376,7 +2403,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) pr_err("Unable to map GICD registers\n"); return -ENOMEM; } - request_mem_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); + gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); err = gic_validate_dist_version(acpi_data.dist_base); if (err) { @@ -2396,18 +2423,18 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) if (err) goto out_redist_unmap; - domain_handle = irq_domain_alloc_fwnode(&dist->base_address); - if (!domain_handle) { + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { err = -ENOMEM; goto out_redist_unmap; } err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, - acpi_data.nr_redist_regions, 0, domain_handle); + acpi_data.nr_redist_regions, 0, gsi_domain_handle); if (err) goto out_fwhandle_free; - acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id); if (static_branch_likely(&supports_deactivate_key)) gic_acpi_setup_kvm_info(); @@ -2415,7 +2442,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) return 0; out_fwhandle_free: - irq_domain_free_fwnode(domain_handle); + irq_domain_free_fwnode(gsi_domain_handle); out_redist_unmap: for (i = 0; i < acpi_data.nr_redist_regions; i++) if (acpi_data.redist_regs[i].redist_base) diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 820404cb56bc..4c7bae0ec8f9 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1682,11 +1682,17 @@ static void __init gic_acpi_setup_kvm_info(void) vgic_set_kvm_info(&gic_v2_kvm_info); } +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; +} + static int __init gic_v2_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; - struct fwnode_handle *domain_handle; struct gic_chip_data *gic = &gic_data[0]; int count, ret; @@ -1724,22 +1730,22 @@ static int __init gic_v2_acpi_init(union acpi_subtable_headers *header, /* * Initialize GIC instance zero (no multi-GIC support). */ - domain_handle = irq_domain_alloc_fwnode(&dist->base_address); - if (!domain_handle) { + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { pr_err("Unable to allocate domain handle\n"); gic_teardown(gic); return -ENOMEM; } - ret = __gic_init_bases(gic, domain_handle); + ret = __gic_init_bases(gic, gsi_domain_handle); if (ret) { pr_err("Failed to initialise GIC\n"); - irq_domain_free_fwnode(domain_handle); + irq_domain_free_fwnode(gsi_domain_handle); gic_teardown(gic); return ret; } - acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id); if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) gicv2m_init(NULL, gic_data[0].domain); diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c new file mode 100644 index 000000000000..327f3ab62c03 --- /dev/null +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> + +#include <asm/loongarch.h> +#include <asm/setup.h> + +static struct irq_domain *irq_domain; +struct fwnode_handle *cpuintc_handle; + +static u32 lpic_gsi_to_irq(u32 gsi) +{ + /* Only pch irqdomain transferring is required for LoongArch. */ + if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ) + return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); + + return 0; +} + +static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi) +{ + int id; + struct fwnode_handle *domain_handle = NULL; + + switch (gsi) { + case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ: + if (liointc_handle) + domain_handle = liointc_handle; + break; + + case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ: + if (pch_lpc_handle) + domain_handle = pch_lpc_handle; + break; + + case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ: + id = find_pch_pic(gsi); + if (id >= 0 && pch_pic_handle[id]) + domain_handle = pch_pic_handle[id]; + break; + } + + return domain_handle; +} + +static void mask_loongarch_irq(struct irq_data *d) +{ + clear_csr_ecfg(ECFGF(d->hwirq)); +} + +static void unmask_loongarch_irq(struct irq_data *d) +{ + set_csr_ecfg(ECFGF(d->hwirq)); +} + +static struct irq_chip cpu_irq_controller = { + .name = "CPUINTC", + .irq_mask = mask_loongarch_irq, + .irq_unmask = unmask_loongarch_irq, +}; + +static void handle_cpu_irq(struct pt_regs *regs) +{ + int hwirq; + unsigned int estat = read_csr_estat() & CSR_ESTAT_IS; + + while ((hwirq = ffs(estat))) { + estat &= ~BIT(hwirq - 1); + generic_handle_domain_irq(irq_domain, hwirq - 1); + } +} + +static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_noprobe(irq); + irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq); + + return 0; +} + +static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = { + .map = loongarch_cpu_intc_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int __init +liointc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header; + + return liointc_acpi_init(irq_domain, liointc_entry); +} + +static int __init +eiointc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header; + + return eiointc_acpi_init(irq_domain, eiointc_entry); +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC, + liointc_parse_madt, 0); + acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, + eiointc_parse_madt, 0); + return 0; +} + +static int __init cpuintc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end) +{ + if (irq_domain) + return 0; + + /* Mask interrupts. */ + clear_csr_ecfg(ECFG0_IM); + clear_csr_estat(ESTATF_IP); + + cpuintc_handle = irq_domain_alloc_fwnode(NULL); + irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM, + &loongarch_cpu_intc_irq_domain_ops, NULL); + + if (!irq_domain) + panic("Failed to add irqdomain for LoongArch CPU"); + + set_handle_irq(&handle_cpu_irq); + acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id); + acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq); + acpi_cascade_irqdomain_init(); + + return 0; +} + +IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC, + NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init); diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c new file mode 100644 index 000000000000..80d8ca6f2d46 --- /dev/null +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Loongson Extend I/O Interrupt Controller support + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#define pr_fmt(fmt) "eiointc: " fmt + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> + +#define EIOINTC_REG_NODEMAP 0x14a0 +#define EIOINTC_REG_IPMAP 0x14c0 +#define EIOINTC_REG_ENABLE 0x1600 +#define EIOINTC_REG_BOUNCE 0x1680 +#define EIOINTC_REG_ISR 0x1800 +#define EIOINTC_REG_ROUTE 0x1c00 + +#define VEC_REG_COUNT 4 +#define VEC_COUNT_PER_REG 64 +#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) +#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) +#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) +#define EIOINTC_ALL_ENABLE 0xffffffff + +#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) + +static int nr_pics; + +struct eiointc_priv { + u32 node; + nodemask_t node_map; + cpumask_t cpuspan_map; + struct fwnode_handle *domain_handle; + struct irq_domain *eiointc_domain; +}; + +static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; + +static void eiointc_enable(void) +{ + uint64_t misc; + + misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); + misc |= IOCSR_MISC_FUNC_EXT_IOI_EN; + iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC); +} + +static int cpu_to_eio_node(int cpu) +{ + return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; +} + +static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) +{ + int i, node, cpu_node, route_node; + unsigned char coremap; + uint32_t pos_off, data, data_byte, data_mask; + + pos_off = pos & ~3; + data_byte = pos & 3; + data_mask = ~BIT_MASK(data_byte) & 0xf; + + /* Calculate node and coremap of target irq */ + cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE); + + for_each_online_cpu(i) { + node = cpu_to_eio_node(i); + if (!node_isset(node, *node_map)) + continue; + + /* EIO node 0 is in charge of inter-node interrupt dispatch */ + route_node = (node == mnode) ? cpu_node : node; + data = ((coremap | (route_node << 4)) << (data_byte * 8)); + csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE); + } +} + +static DEFINE_RAW_SPINLOCK(affinity_lock); + +static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) +{ + unsigned int cpu; + unsigned long flags; + uint32_t vector, regaddr; + struct cpumask intersect_affinity; + struct eiointc_priv *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&affinity_lock, flags); + + cpumask_and(&intersect_affinity, affinity, cpu_online_mask); + cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map); + + if (cpumask_empty(&intersect_affinity)) { + raw_spin_unlock_irqrestore(&affinity_lock, flags); + return -EINVAL; + } + cpu = cpumask_first(&intersect_affinity); + + vector = d->hwirq; + regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); + + /* Mask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0); + /* Set route for target vector */ + eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); + /* Unmask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + raw_spin_unlock_irqrestore(&affinity_lock, flags); + + return IRQ_SET_MASK_OK; +} + +static int eiointc_index(int node) +{ + int i; + + for (i = 0; i < nr_pics; i++) { + if (node_isset(node, eiointc_priv[i]->node_map)) + return i; + } + + return -1; +} + +static int eiointc_router_init(unsigned int cpu) +{ + int i, bit; + uint32_t data; + uint32_t node = cpu_to_eio_node(cpu); + uint32_t index = eiointc_index(node); + + if (index < 0) { + pr_err("Error: invalid nodemap!\n"); + return -1; + } + + if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { + eiointc_enable(); + + for (i = 0; i < VEC_COUNT / 32; i++) { + data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2))); + iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4); + } + + for (i = 0; i < VEC_COUNT / 32 / 4; i++) { + bit = BIT(1 + index); /* Route to IP[1 + index] */ + data = bit | (bit << 8) | (bit << 16) | (bit << 24); + iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); + } + + for (i = 0; i < VEC_COUNT / 4; i++) { + /* Route to Node-0 Core-0 */ + if (index == 0) + bit = BIT(cpu_logical_map(0)); + else + bit = (eiointc_priv[index]->node << 4) | 1; + + data = bit | (bit << 8) | (bit << 16) | (bit << 24); + iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4); + } + + for (i = 0; i < VEC_COUNT / 32; i++) { + data = 0xffffffff; + iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4); + iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4); + } + } + + return 0; +} + +static void eiointc_irq_dispatch(struct irq_desc *desc) +{ + int i; + u64 pending; + bool handled = false; + struct irq_chip *chip = irq_desc_get_chip(desc); + struct eiointc_priv *priv = irq_desc_get_handler_data(desc); + + chained_irq_enter(chip, desc); + + for (i = 0; i < VEC_REG_COUNT; i++) { + pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); + iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); + while (pending) { + int bit = __ffs(pending); + int irq = bit + VEC_COUNT_PER_REG * i; + + generic_handle_domain_irq(priv->eiointc_domain, irq); + pending &= ~BIT(bit); + handled = true; + } + } + + if (!handled) + spurious_interrupt(); + + chained_irq_exit(chip, desc); +} + +static void eiointc_ack_irq(struct irq_data *d) +{ +} + +static void eiointc_mask_irq(struct irq_data *d) +{ +} + +static void eiointc_unmask_irq(struct irq_data *d) +{ +} + +static struct irq_chip eiointc_irq_chip = { + .name = "EIOINTC", + .irq_ack = eiointc_ack_irq, + .irq_mask = eiointc_mask_irq, + .irq_unmask = eiointc_unmask_irq, + .irq_set_affinity = eiointc_set_irq_affinity, +}; + +static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int ret; + unsigned int i, type; + unsigned long hwirq = 0; + struct eiointc *priv = domain->host_data; + + ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip, + priv, handle_edge_irq, NULL, NULL); + } + + return 0; +} + +static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static const struct irq_domain_ops eiointc_domain_ops = { + .translate = irq_domain_translate_onecell, + .alloc = eiointc_domain_alloc, + .free = eiointc_domain_free, +}; + +static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group) +{ + int i; + + if (cpu_has_flatmode) + node = cpu_to_node(node * CORES_PER_EIO_NODE); + + for (i = 0; i < MAX_IO_PICS; i++) { + if (node == vec_group[i].node) { + vec_group[i].parent = parent; + return; + } + } +} + +struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group) +{ + int i; + + for (i = 0; i < MAX_IO_PICS; i++) { + if (node == vec_group[i].node) + return vec_group[i].parent; + } + return NULL; +} + +static int __init +pch_pic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; + unsigned int node = (pchpic_entry->address >> 44) & 0xf; + struct irq_domain *parent = acpi_get_vec_parent(node, pch_group); + + if (parent) + return pch_pic_acpi_init(parent, pchpic_entry); + + return -EINVAL; +} + +static int __init +pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; + struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group); + + if (parent) + return pch_msi_acpi_init(parent, pchmsi_entry); + + return -EINVAL; +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, + pch_pic_parse_madt, 0); + acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, + pch_msi_parse_madt, 1); + return 0; +} + +int __init eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc) +{ + int i, parent_irq; + unsigned long node_map; + struct eiointc_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc); + if (!priv->domain_handle) { + pr_err("Unable to allocate domain handle\n"); + goto out_free_priv; + } + + priv->node = acpi_eiointc->node; + node_map = acpi_eiointc->node_map ? : -1ULL; + + for_each_possible_cpu(i) { + if (node_map & (1ULL << cpu_to_eio_node(i))) { + node_set(cpu_to_eio_node(i), priv->node_map); + cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i)); + } + } + + /* Setup IRQ domain */ + priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT, + &eiointc_domain_ops, priv); + if (!priv->eiointc_domain) { + pr_err("loongson-eiointc: cannot add IRQ domain\n"); + goto out_free_handle; + } + + eiointc_priv[nr_pics++] = priv; + + eiointc_router_init(0); + + parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); + irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); + + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, + "irqchip/loongarch/intc:starting", + eiointc_router_init, NULL); + + acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group); + acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group); + acpi_cascade_irqdomain_init(); + + return 0; + +out_free_handle: + irq_domain_free_fwnode(priv->domain_handle); + priv->domain_handle = NULL; +out_free_priv: + kfree(priv); + + return -ENOMEM; +} diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 8d05d8bcf56f..c4f3c886ad61 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -23,7 +23,7 @@ #endif #define LIOINTC_CHIP_IRQ 32 -#define LIOINTC_NUM_PARENT 4 +#define LIOINTC_NUM_PARENT 4 #define LIOINTC_NUM_CORES 4 #define LIOINTC_INTC_CHIP_START 0x20 @@ -58,6 +58,8 @@ struct liointc_priv { bool has_lpc_irq_errata; }; +struct fwnode_handle *liointc_handle; + static void liointc_chained_handle_irq(struct irq_desc *desc) { struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); @@ -153,97 +155,79 @@ static void liointc_resume(struct irq_chip_generic *gc) irq_gc_unlock_irqrestore(gc, flags); } -static const char * const parent_names[] = {"int0", "int1", "int2", "int3"}; -static const char * const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"}; +static int parent_irq[LIOINTC_NUM_PARENT]; +static u32 parent_int_map[LIOINTC_NUM_PARENT]; +static const char *const parent_names[] = {"int0", "int1", "int2", "int3"}; +static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"}; -static void __iomem *liointc_get_reg_byname(struct device_node *node, - const char *name) +static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) { - int index = of_property_match_string(node, "reg-names", name); - - if (index < 0) - return NULL; - - return of_iomap(node, index); + if (WARN_ON(intsize < 1)) + return -EINVAL; + *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ; + *out_type = IRQ_TYPE_NONE; + return 0; } -static int __init liointc_of_init(struct device_node *node, - struct device_node *parent) +static const struct irq_domain_ops acpi_irq_gc_ops = { + .map = irq_map_generic_chip, + .unmap = irq_unmap_generic_chip, + .xlate = liointc_domain_xlate, +}; + +static int liointc_init(phys_addr_t addr, unsigned long size, int revision, + struct fwnode_handle *domain_handle, struct device_node *node) { + int i, err; + void __iomem *base; + struct irq_chip_type *ct; struct irq_chip_generic *gc; struct irq_domain *domain; - struct irq_chip_type *ct; struct liointc_priv *priv; - void __iomem *base; - u32 of_parent_int_map[LIOINTC_NUM_PARENT]; - int parent_irq[LIOINTC_NUM_PARENT]; - bool have_parent = FALSE; - int sz, i, err = 0; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - if (of_device_is_compatible(node, "loongson,liointc-2.0")) { - base = liointc_get_reg_byname(node, "main"); - if (!base) { - err = -ENODEV; - goto out_free_priv; - } + base = ioremap(addr, size); + if (!base) + goto out_free_priv; - for (i = 0; i < LIOINTC_NUM_CORES; i++) - priv->core_isr[i] = liointc_get_reg_byname(node, core_reg_names[i]); - if (!priv->core_isr[0]) { - err = -ENODEV; - goto out_iounmap_base; - } - } else { - base = of_iomap(node, 0); - if (!base) { - err = -ENODEV; - goto out_free_priv; - } + for (i = 0; i < LIOINTC_NUM_CORES; i++) + priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; - for (i = 0; i < LIOINTC_NUM_CORES; i++) - priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; - } + for (i = 0; i < LIOINTC_NUM_PARENT; i++) + priv->handler[i].parent_int_map = parent_int_map[i]; - for (i = 0; i < LIOINTC_NUM_PARENT; i++) { - parent_irq[i] = of_irq_get_byname(node, parent_names[i]); - if (parent_irq[i] > 0) - have_parent = TRUE; - } - if (!have_parent) { - err = -ENODEV; - goto out_iounmap_isr; - } + if (revision > 1) { + for (i = 0; i < LIOINTC_NUM_CORES; i++) { + int index = of_property_match_string(node, + "reg-names", core_reg_names[i]); - sz = of_property_read_variable_u32_array(node, - "loongson,parent_int_map", - &of_parent_int_map[0], - LIOINTC_NUM_PARENT, - LIOINTC_NUM_PARENT); - if (sz < 4) { - pr_err("loongson-liointc: No parent_int_map\n"); - err = -ENODEV; - goto out_iounmap_isr; - } + if (index < 0) + return -EINVAL; - for (i = 0; i < LIOINTC_NUM_PARENT; i++) - priv->handler[i].parent_int_map = of_parent_int_map[i]; + priv->core_isr[i] = of_iomap(node, index); + } + } /* Setup IRQ domain */ - domain = irq_domain_add_linear(node, 32, + if (!acpi_disabled) + domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ, + &acpi_irq_gc_ops, priv); + else + domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ, &irq_generic_chip_ops, priv); if (!domain) { pr_err("loongson-liointc: cannot add IRQ domain\n"); - err = -EINVAL; - goto out_iounmap_isr; + goto out_iounmap; } - err = irq_alloc_domain_generic_chips(domain, 32, 1, - node->full_name, handle_level_irq, - IRQ_NOPROBE, 0, 0); + err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1, + (node ? node->full_name : "LIOINTC"), + handle_level_irq, 0, IRQ_NOPROBE, 0); if (err) { pr_err("loongson-liointc: unable to register IRQ domain\n"); goto out_free_domain; @@ -299,24 +283,93 @@ static int __init liointc_of_init(struct device_node *node, liointc_chained_handle_irq, &priv->handler[i]); } + liointc_handle = domain_handle; return 0; out_free_domain: irq_domain_remove(domain); -out_iounmap_isr: - for (i = 0; i < LIOINTC_NUM_CORES; i++) { - if (!priv->core_isr[i]) - continue; - iounmap(priv->core_isr[i]); - } -out_iounmap_base: +out_iounmap: iounmap(base); out_free_priv: kfree(priv); - return err; + return -EINVAL; +} + +#ifdef CONFIG_OF + +static int __init liointc_of_init(struct device_node *node, + struct device_node *parent) +{ + bool have_parent = FALSE; + int sz, i, index, revision, err = 0; + struct resource res; + + if (!of_device_is_compatible(node, "loongson,liointc-2.0")) { + index = 0; + revision = 1; + } else { + index = of_property_match_string(node, "reg-names", "main"); + revision = 2; + } + + if (of_address_to_resource(node, index, &res)) + return -EINVAL; + + for (i = 0; i < LIOINTC_NUM_PARENT; i++) { + parent_irq[i] = of_irq_get_byname(node, parent_names[i]); + if (parent_irq[i] > 0) + have_parent = TRUE; + } + if (!have_parent) + return -ENODEV; + + sz = of_property_read_variable_u32_array(node, + "loongson,parent_int_map", + &parent_int_map[0], + LIOINTC_NUM_PARENT, + LIOINTC_NUM_PARENT); + if (sz < 4) { + pr_err("loongson-liointc: No parent_int_map\n"); + return -ENODEV; + } + + err = liointc_init(res.start, resource_size(&res), + revision, of_node_to_fwnode(node), node); + if (err < 0) + return err; + + return 0; } IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init); IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init); IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init); + +#endif + +#ifdef CONFIG_ACPI +int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc) +{ + int ret; + struct fwnode_handle *domain_handle; + + parent_int_map[0] = acpi_liointc->cascade_map[0]; + parent_int_map[1] = acpi_liointc->cascade_map[1]; + + parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]); + parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]); + + domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc); + if (!domain_handle) { + pr_err("Unable to allocate domain handle\n"); + return -ENOMEM; + } + ret = liointc_init(acpi_liointc->address, acpi_liointc->size, + 1, domain_handle, NULL); + if (ret) + irq_domain_free_fwnode(domain_handle); + + return ret; +} +#endif diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c new file mode 100644 index 000000000000..bf2324910a75 --- /dev/null +++ b/drivers/irqchip/irq-loongson-pch-lpc.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Loongson LPC Interrupt Controller support + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#define pr_fmt(fmt) "lpc: " fmt + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> + +/* Registers */ +#define LPC_INT_CTL 0x00 +#define LPC_INT_ENA 0x04 +#define LPC_INT_STS 0x08 +#define LPC_INT_CLR 0x0c +#define LPC_INT_POL 0x10 +#define LPC_COUNT 16 + +/* LPC_INT_CTL */ +#define LPC_INT_CTL_EN BIT(31) + +struct pch_lpc { + void __iomem *base; + struct irq_domain *lpc_domain; + raw_spinlock_t lpc_lock; + u32 saved_reg_ctl; + u32 saved_reg_ena; + u32 saved_reg_pol; +}; + +struct fwnode_handle *pch_lpc_handle; + +static void lpc_irq_ack(struct irq_data *d) +{ + unsigned long flags; + struct pch_lpc *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&priv->lpc_lock, flags); + writel(0x1 << d->hwirq, priv->base + LPC_INT_CLR); + raw_spin_unlock_irqrestore(&priv->lpc_lock, flags); +} + +static void lpc_irq_mask(struct irq_data *d) +{ + unsigned long flags; + struct pch_lpc *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&priv->lpc_lock, flags); + writel(readl(priv->base + LPC_INT_ENA) & (~(0x1 << (d->hwirq))), + priv->base + LPC_INT_ENA); + raw_spin_unlock_irqrestore(&priv->lpc_lock, flags); +} + +static void lpc_irq_unmask(struct irq_data *d) +{ + unsigned long flags; + struct pch_lpc *priv = d->domain->host_data; + + raw_spin_lock_irqsave(&priv->lpc_lock, flags); + writel(readl(priv->base + LPC_INT_ENA) | (0x1 << (d->hwirq)), + priv->base + LPC_INT_ENA); + raw_spin_unlock_irqrestore(&priv->lpc_lock, flags); +} + +static int lpc_irq_set_type(struct irq_data *d, unsigned int type) +{ + u32 val; + u32 mask = 0x1 << (d->hwirq); + struct pch_lpc *priv = d->domain->host_data; + + if (!(type & IRQ_TYPE_LEVEL_MASK)) + return 0; + + val = readl(priv->base + LPC_INT_POL); + + if (type == IRQ_TYPE_LEVEL_HIGH) + val |= mask; + else + val &= ~mask; + + writel(val, priv->base + LPC_INT_POL); + + return 0; +} + +static const struct irq_chip pch_lpc_irq_chip = { + .name = "PCH LPC", + .irq_mask = lpc_irq_mask, + .irq_unmask = lpc_irq_unmask, + .irq_ack = lpc_irq_ack, + .irq_set_type = lpc_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static void lpc_irq_dispatch(struct irq_desc *desc) +{ + u32 pending, bit; + struct irq_chip *chip = irq_desc_get_chip(desc); + struct pch_lpc *priv = irq_desc_get_handler_data(desc); + + chained_irq_enter(chip, desc); + + pending = readl(priv->base + LPC_INT_ENA); + pending &= readl(priv->base + LPC_INT_STS); + if (!pending) + spurious_interrupt(); + + while (pending) { + bit = __ffs(pending); + + generic_handle_domain_irq(priv->lpc_domain, bit); + pending &= ~BIT(bit); + } + chained_irq_exit(chip, desc); +} + +static int pch_lpc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + irq_set_chip_and_handler(irq, &pch_lpc_irq_chip, handle_level_irq); + return 0; +} + +static const struct irq_domain_ops pch_lpc_domain_ops = { + .map = pch_lpc_map, + .translate = irq_domain_translate_twocell, +}; + +static void pch_lpc_reset(struct pch_lpc *priv) +{ + /* Enable the LPC interrupt, bit31: en bit30: edge */ + writel(LPC_INT_CTL_EN, priv->base + LPC_INT_CTL); + writel(0, priv->base + LPC_INT_ENA); + /* Clear all 18-bit interrpt bit */ + writel(GENMASK(17, 0), priv->base + LPC_INT_CLR); +} + +static int pch_lpc_disabled(struct pch_lpc *priv) +{ + return (readl(priv->base + LPC_INT_ENA) == 0xffffffff) && + (readl(priv->base + LPC_INT_STS) == 0xffffffff); +} + +int __init pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc) +{ + int parent_irq; + struct pch_lpc *priv; + struct irq_fwspec fwspec; + struct fwnode_handle *irq_handle; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + raw_spin_lock_init(&priv->lpc_lock); + + priv->base = ioremap(acpi_pchlpc->address, acpi_pchlpc->size); + if (!priv->base) + goto free_priv; + + if (pch_lpc_disabled(priv)) { + pr_err("Failed to get LPC status\n"); + goto iounmap_base; + } + + irq_handle = irq_domain_alloc_named_fwnode("lpcintc"); + if (!irq_handle) { + pr_err("Unable to allocate domain handle\n"); + goto iounmap_base; + } + + priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT, + &pch_lpc_domain_ops, priv); + if (!priv->lpc_domain) { + pr_err("Failed to create IRQ domain\n"); + goto free_irq_handle; + } + pch_lpc_reset(priv); + + fwspec.fwnode = parent->fwnode; + fwspec.param[0] = acpi_pchlpc->cascade + GSI_MIN_PCH_IRQ; + fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH; + fwspec.param_count = 2; + parent_irq = irq_create_fwspec_mapping(&fwspec); + irq_set_chained_handler_and_data(parent_irq, lpc_irq_dispatch, priv); + + pch_lpc_handle = irq_handle; + return 0; + +free_irq_handle: + irq_domain_free_fwnode(irq_handle); +iounmap_base: + iounmap(priv->base); +free_priv: + kfree(priv); + + return -ENOMEM; +} diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c index e3801c4a77ed..d0e8551bebfa 100644 --- a/drivers/irqchip/irq-loongson-pch-msi.c +++ b/drivers/irqchip/irq-loongson-pch-msi.c @@ -15,6 +15,8 @@ #include <linux/pci.h> #include <linux/slab.h> +static int nr_pics; + struct pch_msi_data { struct mutex msi_map_lock; phys_addr_t doorbell; @@ -23,6 +25,8 @@ struct pch_msi_data { unsigned long *msi_map; }; +static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS]; + static void pch_msi_mask_msi_irq(struct irq_data *d) { pci_msi_mask_irq(d); @@ -154,12 +158,12 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = { }; static int pch_msi_init_domains(struct pch_msi_data *priv, - struct device_node *node, - struct irq_domain *parent) + struct irq_domain *parent, + struct fwnode_handle *domain_handle) { struct irq_domain *middle_domain, *msi_domain; - middle_domain = irq_domain_create_linear(of_node_to_fwnode(node), + middle_domain = irq_domain_create_linear(domain_handle, priv->num_irqs, &pch_msi_middle_domain_ops, priv); @@ -171,7 +175,7 @@ static int pch_msi_init_domains(struct pch_msi_data *priv, middle_domain->parent = parent; irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS); - msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), + msi_domain = pci_msi_create_irq_domain(domain_handle, &pch_msi_domain_info, middle_domain); if (!msi_domain) { @@ -183,19 +187,11 @@ static int pch_msi_init_domains(struct pch_msi_data *priv, return 0; } -static int pch_msi_init(struct device_node *node, - struct device_node *parent) +static int pch_msi_init(phys_addr_t msg_address, int irq_base, int irq_count, + struct irq_domain *parent_domain, struct fwnode_handle *domain_handle) { - struct pch_msi_data *priv; - struct irq_domain *parent_domain; - struct resource res; int ret; - - parent_domain = irq_find_host(parent); - if (!parent_domain) { - pr_err("Failed to find the parent domain\n"); - return -ENXIO; - } + struct pch_msi_data *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -203,48 +199,95 @@ static int pch_msi_init(struct device_node *node, mutex_init(&priv->msi_map_lock); - ret = of_address_to_resource(node, 0, &res); - if (ret) { - pr_err("Failed to allocate resource\n"); - goto err_priv; - } - - priv->doorbell = res.start; - - if (of_property_read_u32(node, "loongson,msi-base-vec", - &priv->irq_first)) { - pr_err("Unable to parse MSI vec base\n"); - ret = -EINVAL; - goto err_priv; - } - - if (of_property_read_u32(node, "loongson,msi-num-vecs", - &priv->num_irqs)) { - pr_err("Unable to parse MSI vec number\n"); - ret = -EINVAL; - goto err_priv; - } + priv->doorbell = msg_address; + priv->irq_first = irq_base; + priv->num_irqs = irq_count; priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL); - if (!priv->msi_map) { - ret = -ENOMEM; + if (!priv->msi_map) goto err_priv; - } pr_debug("Registering %d MSIs, starting at %d\n", priv->num_irqs, priv->irq_first); - ret = pch_msi_init_domains(priv, node, parent_domain); + ret = pch_msi_init_domains(priv, parent_domain, domain_handle); if (ret) goto err_map; + pch_msi_handle[nr_pics++] = domain_handle; return 0; err_map: bitmap_free(priv->msi_map); err_priv: kfree(priv); - return ret; + + return -EINVAL; +} + +#ifdef CONFIG_OF +static int pch_msi_of_init(struct device_node *node, struct device_node *parent) +{ + int err; + int irq_base, irq_count; + struct resource res; + struct irq_domain *parent_domain; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("Failed to find the parent domain\n"); + return -ENXIO; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("Failed to allocate resource\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "loongson,msi-base-vec", &irq_base)) { + pr_err("Unable to parse MSI vec base\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "loongson,msi-num-vecs", &irq_count)) { + pr_err("Unable to parse MSI vec number\n"); + return -EINVAL; + } + + err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_node_to_fwnode(node)); + if (err < 0) + return err; + + return 0; } -IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_init); +IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init); +#endif + +#ifdef CONFIG_ACPI +struct fwnode_handle *get_pch_msi_handle(int pci_segment) +{ + int i; + + for (i = 0; i < MAX_IO_PICS; i++) { + if (msi_group[i].pci_segment == pci_segment) + return pch_msi_handle[i]; + } + return NULL; +} + +int __init pch_msi_acpi_init(struct irq_domain *parent, + struct acpi_madt_msi_pic *acpi_pchmsi) +{ + int ret; + struct fwnode_handle *domain_handle; + + domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi); + ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start, + acpi_pchmsi->count, parent, domain_handle); + if (ret < 0) + irq_domain_free_fwnode(domain_handle); + + return ret; +} +#endif diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index a4eb8a2181c7..b6f1392964b1 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -33,13 +33,40 @@ #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +static int nr_pics; + struct pch_pic { void __iomem *base; struct irq_domain *pic_domain; u32 ht_vec_base; raw_spinlock_t pic_lock; + u32 vec_count; + u32 gsi_base; }; +static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; + +struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; + +int find_pch_pic(u32 gsi) +{ + int i; + + /* Find the PCH_PIC that manages this GSI. */ + for (i = 0; i < MAX_IO_PICS; i++) { + struct pch_pic *priv = pch_pic_priv[i]; + + if (!priv) + return -1; + + if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count)) + return i; + } + + pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi); + return -1; +} + static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit) { u32 reg; @@ -139,6 +166,28 @@ static struct irq_chip pch_pic_irq_chip = { .irq_set_type = pch_pic_set_type, }; +static int pch_pic_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct pch_pic *priv = d->host_data; + struct device_node *of_node = to_of_node(fwspec->fwnode); + + if (fwspec->param_count < 1) + return -EINVAL; + + if (of_node) { + *hwirq = fwspec->param[0] + priv->ht_vec_base; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + } else { + *hwirq = fwspec->param[0] - priv->gsi_base; + *type = IRQ_TYPE_NONE; + } + + return 0; +} + static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -149,13 +198,13 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, struct irq_fwspec parent_fwspec; struct pch_pic *priv = domain->host_data; - err = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type); + err = pch_pic_domain_translate(domain, fwspec, &hwirq, &type); if (err) return err; parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 1; - parent_fwspec.param[0] = hwirq + priv->ht_vec_base; + parent_fwspec.param[0] = hwirq; err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); if (err) @@ -170,7 +219,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, } static const struct irq_domain_ops pch_pic_domain_ops = { - .translate = irq_domain_translate_twocell, + .translate = pch_pic_domain_translate, .alloc = pch_pic_alloc, .free = irq_domain_free_irqs_parent, }; @@ -180,7 +229,7 @@ static void pch_pic_reset(struct pch_pic *priv) int i; for (i = 0; i < PIC_COUNT; i++) { - /* Write vectored ID */ + /* Write vector ID */ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); /* Hardcode route to HT0 Lo */ writeb(1, priv->base + PCH_INT_ROUTE(i)); @@ -198,50 +247,37 @@ static void pch_pic_reset(struct pch_pic *priv) } } -static int pch_pic_of_init(struct device_node *node, - struct device_node *parent) +static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, + struct irq_domain *parent_domain, struct fwnode_handle *domain_handle, + u32 gsi_base) { struct pch_pic *priv; - struct irq_domain *parent_domain; - int err; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; raw_spin_lock_init(&priv->pic_lock); - priv->base = of_iomap(node, 0); - if (!priv->base) { - err = -ENOMEM; + priv->base = ioremap(addr, size); + if (!priv->base) goto free_priv; - } - parent_domain = irq_find_host(parent); - if (!parent_domain) { - pr_err("Failed to find the parent domain\n"); - err = -ENXIO; - goto iounmap_base; - } - - if (of_property_read_u32(node, "loongson,pic-base-vec", - &priv->ht_vec_base)) { - pr_err("Failed to determine pic-base-vec\n"); - err = -EINVAL; - goto iounmap_base; - } + priv->ht_vec_base = vec_base; + priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1; + priv->gsi_base = gsi_base; priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0, - PIC_COUNT, - of_node_to_fwnode(node), - &pch_pic_domain_ops, - priv); + priv->vec_count, domain_handle, + &pch_pic_domain_ops, priv); + if (!priv->pic_domain) { pr_err("Failed to create IRQ domain\n"); - err = -ENOMEM; goto iounmap_base; } pch_pic_reset(priv); + pch_pic_handle[nr_pics] = domain_handle; + pch_pic_priv[nr_pics++] = priv; return 0; @@ -250,7 +286,86 @@ iounmap_base: free_priv: kfree(priv); - return err; + return -EINVAL; +} + +#ifdef CONFIG_OF + +static int pch_pic_of_init(struct device_node *node, + struct device_node *parent) +{ + int err, vec_base; + struct resource res; + struct irq_domain *parent_domain; + + if (of_address_to_resource(node, 0, &res)) + return -EINVAL; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("Failed to find the parent domain\n"); + return -ENXIO; + } + + if (of_property_read_u32(node, "loongson,pic-base-vec", &vec_base)) { + pr_err("Failed to determine pic-base-vec\n"); + return -EINVAL; + } + + err = pch_pic_init(res.start, resource_size(&res), vec_base, + parent_domain, of_node_to_fwnode(node), 0); + if (err < 0) + return err; + + return 0; } IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init); + +#endif + +#ifdef CONFIG_ACPI +static int __init +pch_lpc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_lpc_pic *pchlpc_entry = (struct acpi_madt_lpc_pic *)header; + + return pch_lpc_acpi_init(pch_pic_priv[0]->pic_domain, pchlpc_entry); +} + +static int __init acpi_cascade_irqdomain_init(void) +{ + acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC, + pch_lpc_parse_madt, 0); + return 0; +} + +int __init pch_pic_acpi_init(struct irq_domain *parent, + struct acpi_madt_bio_pic *acpi_pchpic) +{ + int ret, vec_base; + struct fwnode_handle *domain_handle; + + vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ; + + domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic); + if (!domain_handle) { + pr_err("Unable to allocate domain handle\n"); + return -ENOMEM; + } + + ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size, + vec_base, parent, domain_handle, acpi_pchpic->gsi_base); + + if (ret < 0) { + irq_domain_free_fwnode(domain_handle); + return ret; + } + + if (acpi_pchpic->id == 0) + acpi_cascade_irqdomain_init(); + + return ret; +} +#endif diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index ff89b36267dd..1ba0f1555c80 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -52,13 +52,15 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); static DEFINE_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; -static struct irq_domain *gic_ipi_domain; static int gic_shared_intrs; static unsigned int gic_cpu_pin; static unsigned int timer_cpu_pin; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; + +#ifdef CONFIG_GENERIC_IRQ_IPI static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); +#endif /* CONFIG_GENERIC_IRQ_IPI */ static struct gic_all_vpes_chip_data { u32 map; @@ -472,9 +474,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, u32 map; if (hwirq >= GIC_SHARED_HWIRQ_BASE) { +#ifdef CONFIG_GENERIC_IRQ_IPI /* verify that shared irqs don't conflict with an IPI irq */ if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) return -EBUSY; +#endif /* CONFIG_GENERIC_IRQ_IPI */ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, &gic_level_irq_controller, @@ -567,6 +571,8 @@ static const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, }; +#ifdef CONFIG_GENERIC_IRQ_IPI + static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, @@ -670,6 +676,48 @@ static const struct irq_domain_ops gic_ipi_domain_ops = { .match = gic_ipi_domain_match, }; +static int gic_register_ipi_domain(struct device_node *node) +{ + struct irq_domain *gic_ipi_domain; + unsigned int v[2], num_ipis; + + gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, + IRQ_DOMAIN_FLAG_IPI_PER_CPU, + GIC_NUM_LOCAL_INTRS + gic_shared_intrs, + node, &gic_ipi_domain_ops, NULL); + if (!gic_ipi_domain) { + pr_err("Failed to add IPI domain"); + return -ENXIO; + } + + irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); + + if (node && + !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { + bitmap_set(ipi_resrv, v[0], v[1]); + } else { + /* + * Reserve 2 interrupts per possible CPU/VP for use as IPIs, + * meeting the requirements of arch/mips SMP. + */ + num_ipis = 2 * num_possible_cpus(); + bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); + } + + bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + + return 0; +} + +#else /* !CONFIG_GENERIC_IRQ_IPI */ + +static inline int gic_register_ipi_domain(struct device_node *node) +{ + return 0; +} + +#endif /* !CONFIG_GENERIC_IRQ_IPI */ + static int gic_cpu_startup(unsigned int cpu) { /* Enable or disable EIC */ @@ -688,11 +736,12 @@ static int gic_cpu_startup(unsigned int cpu) static int __init gic_of_init(struct device_node *node, struct device_node *parent) { - unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; + unsigned int cpu_vec, i, gicconfig; unsigned long reserved; phys_addr_t gic_base; struct resource res; size_t gic_len; + int ret; /* Find the first available CPU vector. */ i = 0; @@ -734,6 +783,10 @@ static int __init gic_of_init(struct device_node *node, } mips_gic_base = ioremap(gic_base, gic_len); + if (!mips_gic_base) { + pr_err("Failed to ioremap gic_base\n"); + return -ENOMEM; + } gicconfig = read_gic_config(); gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig); @@ -780,30 +833,9 @@ static int __init gic_of_init(struct device_node *node, return -ENXIO; } - gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, - IRQ_DOMAIN_FLAG_IPI_PER_CPU, - GIC_NUM_LOCAL_INTRS + gic_shared_intrs, - node, &gic_ipi_domain_ops, NULL); - if (!gic_ipi_domain) { - pr_err("Failed to add IPI domain"); - return -ENXIO; - } - - irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); - - if (node && - !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { - bitmap_set(ipi_resrv, v[0], v[1]); - } else { - /* - * Reserve 2 interrupts per possible CPU/VP for use as IPIs, - * meeting the requirements of arch/mips SMP. - */ - num_ipis = 2 * num_possible_cpus(); - bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); - } - - bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + ret = gic_register_ipi_domain(node); + if (ret) + return ret; board_bind_eic_interrupt = &gic_bind_eic_interrupt; diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c index 49b47e787644..f289ccd95291 100644 --- a/drivers/irqchip/irq-or1k-pic.c +++ b/drivers/irqchip/irq-or1k-pic.c @@ -66,7 +66,6 @@ static struct or1k_pic_dev or1k_pic_level = { .name = "or1k-PIC-level", .irq_unmask = or1k_pic_unmask, .irq_mask = or1k_pic_mask, - .irq_mask_ack = or1k_pic_mask_ack, }, .handle = handle_level_irq, .flags = IRQ_LEVEL | IRQ_NOPROBE, diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c new file mode 100644 index 000000000000..25fd8ee66565 --- /dev/null +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/G2L IRQC Driver + * + * Copyright (C) 2022 Renesas Electronics Corporation. + * + * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/spinlock.h> + +#define IRQC_IRQ_START 1 +#define IRQC_IRQ_COUNT 8 +#define IRQC_TINT_START (IRQC_IRQ_START + IRQC_IRQ_COUNT) +#define IRQC_TINT_COUNT 32 +#define IRQC_NUM_IRQ (IRQC_TINT_START + IRQC_TINT_COUNT) + +#define ISCR 0x10 +#define IITSR 0x14 +#define TSCR 0x20 +#define TITSR0 0x24 +#define TITSR1 0x28 +#define TITSR0_MAX_INT 16 +#define TITSEL_WIDTH 0x2 +#define TSSR(n) (0x30 + ((n) * 4)) +#define TIEN BIT(7) +#define TSSEL_SHIFT(n) (8 * (n)) +#define TSSEL_MASK GENMASK(7, 0) +#define IRQ_MASK 0x3 + +#define TSSR_OFFSET(n) ((n) % 4) +#define TSSR_INDEX(n) ((n) / 4) + +#define TITSR_TITSEL_EDGE_RISING 0 +#define TITSR_TITSEL_EDGE_FALLING 1 +#define TITSR_TITSEL_LEVEL_HIGH 2 +#define TITSR_TITSEL_LEVEL_LOW 3 + +#define IITSR_IITSEL(n, sense) ((sense) << ((n) * 2)) +#define IITSR_IITSEL_LEVEL_LOW 0 +#define IITSR_IITSEL_EDGE_FALLING 1 +#define IITSR_IITSEL_EDGE_RISING 2 +#define IITSR_IITSEL_EDGE_BOTH 3 +#define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3) + +#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x)) +#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x)) + +struct rzg2l_irqc_priv { + void __iomem *base; + struct irq_fwspec fwspec[IRQC_NUM_IRQ]; + raw_spinlock_t lock; +}; + +static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data) +{ + return data->domain->host_data; +} + +static void rzg2l_irq_eoi(struct irq_data *d) +{ + unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START; + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u32 bit = BIT(hw_irq); + u32 reg; + + reg = readl_relaxed(priv->base + ISCR); + if (reg & bit) + writel_relaxed(reg & ~bit, priv->base + ISCR); +} + +static void rzg2l_tint_eoi(struct irq_data *d) +{ + unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START; + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u32 bit = BIT(hw_irq); + u32 reg; + + reg = readl_relaxed(priv->base + TSCR); + if (reg & bit) + writel_relaxed(reg & ~bit, priv->base + TSCR); +} + +static void rzg2l_irqc_eoi(struct irq_data *d) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hw_irq = irqd_to_hwirq(d); + + raw_spin_lock(&priv->lock); + if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT) + rzg2l_irq_eoi(d); + else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) + rzg2l_tint_eoi(d); + raw_spin_unlock(&priv->lock); + irq_chip_eoi_parent(d); +} + +static void rzg2l_irqc_irq_disable(struct irq_data *d) +{ + unsigned int hw_irq = irqd_to_hwirq(d); + + if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u32 offset = hw_irq - IRQC_TINT_START; + u32 tssr_offset = TSSR_OFFSET(offset); + u8 tssr_index = TSSR_INDEX(offset); + u32 reg; + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + TSSR(tssr_index)); + reg &= ~(TSSEL_MASK << tssr_offset); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + } + irq_chip_disable_parent(d); +} + +static void rzg2l_irqc_irq_enable(struct irq_data *d) +{ + unsigned int hw_irq = irqd_to_hwirq(d); + + if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned long tint = (uintptr_t)d->chip_data; + u32 offset = hw_irq - IRQC_TINT_START; + u32 tssr_offset = TSSR_OFFSET(offset); + u8 tssr_index = TSSR_INDEX(offset); + u32 reg; + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + TSSR(tssr_index)); + reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + raw_spin_unlock(&priv->lock); + } + irq_chip_enable_parent(d); +} + +static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START; + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + u16 sense, tmp; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_LEVEL_LOW: + sense = IITSR_IITSEL_LEVEL_LOW; + break; + + case IRQ_TYPE_EDGE_FALLING: + sense = IITSR_IITSEL_EDGE_FALLING; + break; + + case IRQ_TYPE_EDGE_RISING: + sense = IITSR_IITSEL_EDGE_RISING; + break; + + case IRQ_TYPE_EDGE_BOTH: + sense = IITSR_IITSEL_EDGE_BOTH; + break; + + default: + return -EINVAL; + } + + raw_spin_lock(&priv->lock); + tmp = readl_relaxed(priv->base + IITSR); + tmp &= ~IITSR_IITSEL_MASK(hw_irq); + tmp |= IITSR_IITSEL(hw_irq, sense); + writel_relaxed(tmp, priv->base + IITSR); + raw_spin_unlock(&priv->lock); + + return 0; +} + +static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type) +{ + struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hwirq = irqd_to_hwirq(d); + u32 titseln = hwirq - IRQC_TINT_START; + u32 offset; + u8 sense; + u32 reg; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_RISING: + sense = TITSR_TITSEL_EDGE_RISING; + break; + + case IRQ_TYPE_EDGE_FALLING: + sense = TITSR_TITSEL_EDGE_FALLING; + break; + + default: + return -EINVAL; + } + + offset = TITSR0; + if (titseln >= TITSR0_MAX_INT) { + titseln -= TITSR0_MAX_INT; + offset = TITSR1; + } + + raw_spin_lock(&priv->lock); + reg = readl_relaxed(priv->base + offset); + reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH)); + reg |= sense << (titseln * TITSEL_WIDTH); + writel_relaxed(reg, priv->base + offset); + raw_spin_unlock(&priv->lock); + + return 0; +} + +static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int hw_irq = irqd_to_hwirq(d); + int ret = -EINVAL; + + if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT) + ret = rzg2l_irq_set_type(d, type); + else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) + ret = rzg2l_tint_set_edge(d, type); + if (ret) + return ret; + + return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); +} + +static const struct irq_chip irqc_chip = { + .name = "rzg2l-irqc", + .irq_eoi = rzg2l_irqc_eoi, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_disable = rzg2l_irqc_irq_disable, + .irq_enable = rzg2l_irqc_irq_enable, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = rzg2l_irqc_set_type, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE, +}; + +static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct rzg2l_irqc_priv *priv = domain->host_data; + unsigned long tint = 0; + irq_hw_number_t hwirq; + unsigned int type; + int ret; + + ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type); + if (ret) + return ret; + + /* + * For TINT interrupts ie where pinctrl driver is child of irqc domain + * the hwirq and TINT are encoded in fwspec->param[0]. + * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT + * from 16-31 bits. TINT from the pinctrl driver needs to be programmed + * in IRQC registers to enable a given gpio pin as interrupt. + */ + if (hwirq > IRQC_IRQ_COUNT) { + tint = TINT_EXTRACT_GPIOINT(hwirq); + hwirq = TINT_EXTRACT_HWIRQ(hwirq); + + if (hwirq < IRQC_TINT_START) + return -EINVAL; + } + + if (hwirq > (IRQC_NUM_IRQ - 1)) + return -EINVAL; + + ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip, + (void *)(uintptr_t)tint); + if (ret) + return ret; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]); +} + +static const struct irq_domain_ops rzg2l_irqc_domain_ops = { + .alloc = rzg2l_irqc_alloc, + .free = irq_domain_free_irqs_common, + .translate = irq_domain_translate_twocell, +}; + +static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv, + struct device_node *np) +{ + struct of_phandle_args map; + unsigned int i; + int ret; + + for (i = 0; i < IRQC_NUM_IRQ; i++) { + ret = of_irq_parse_one(np, i, &map); + if (ret) + return ret; + of_phandle_args_to_fwspec(np, map.args, map.args_count, + &priv->fwspec[i]); + } + + return 0; +} + +static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent) +{ + struct irq_domain *irq_domain, *parent_domain; + struct platform_device *pdev; + struct reset_control *resetn; + struct rzg2l_irqc_priv *priv; + int ret; + + pdev = of_find_device_by_node(node); + if (!pdev) + return -ENODEV; + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + dev_err(&pdev->dev, "cannot find parent domain\n"); + return -ENODEV; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + ret = rzg2l_irqc_parse_interrupts(priv, node); + if (ret) { + dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret); + return ret; + } + + resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(resetn)) + return PTR_ERR(resetn); + + ret = reset_control_deassert(resetn); + if (ret) { + dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret); + return ret; + } + + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret); + goto pm_disable; + } + + raw_spin_lock_init(&priv->lock); + + irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ, + node, &rzg2l_irqc_domain_ops, + priv); + if (!irq_domain) { + dev_err(&pdev->dev, "failed to add irq domain\n"); + ret = -ENOMEM; + goto pm_put; + } + + return 0; + +pm_put: + pm_runtime_put(&pdev->dev); +pm_disable: + pm_runtime_disable(&pdev->dev); + reset_control_assert(resetn); + return ret; +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc) +IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init) +IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc) +MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index bb87e4c3b88e..ba4938188570 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -60,10 +60,13 @@ #define PLIC_DISABLE_THRESHOLD 0x7 #define PLIC_ENABLE_THRESHOLD 0 +#define PLIC_QUIRK_EDGE_INTERRUPT 0 + struct plic_priv { struct cpumask lmask; struct irq_domain *irqdomain; void __iomem *regs; + unsigned long plic_quirks; }; struct plic_handler { @@ -81,6 +84,8 @@ static int plic_parent_irq __ro_after_init; static bool plic_cpuhp_setup_done __ro_after_init; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); +static int plic_irq_set_type(struct irq_data *d, unsigned int type); + static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) { u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); @@ -103,37 +108,43 @@ static inline void plic_irq_toggle(const struct cpumask *mask, struct irq_data *d, int enable) { int cpu; - struct plic_priv *priv = irq_data_get_irq_chip_data(d); - writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); for_each_cpu(cpu, mask) { struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); - if (handler->present && - cpumask_test_cpu(cpu, &handler->priv->lmask)) - plic_toggle(handler, d->hwirq, enable); + plic_toggle(handler, d->hwirq, enable); } } +static void plic_irq_enable(struct irq_data *d) +{ + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); +} + +static void plic_irq_disable(struct irq_data *d) +{ + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); +} + static void plic_irq_unmask(struct irq_data *d) { - struct cpumask amask; - unsigned int cpu; struct plic_priv *priv = irq_data_get_irq_chip_data(d); - cpumask_and(&amask, &priv->lmask, cpu_online_mask); - cpu = cpumask_any_and(irq_data_get_affinity_mask(d), - &amask); - if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) - return; - plic_irq_toggle(cpumask_of(cpu), d, 1); + writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); } static void plic_irq_mask(struct irq_data *d) { struct plic_priv *priv = irq_data_get_irq_chip_data(d); - plic_irq_toggle(&priv->lmask, d, 0); + writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); +} + +static void plic_irq_eoi(struct irq_data *d) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); } #ifdef CONFIG_SMP @@ -154,38 +165,68 @@ static int plic_set_affinity(struct irq_data *d, if (cpu >= nr_cpu_ids) return -EINVAL; - plic_irq_toggle(&priv->lmask, d, 0); - plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d)); + plic_irq_disable(d); irq_data_update_effective_affinity(d, cpumask_of(cpu)); + if (!irqd_irq_disabled(d)) + plic_irq_enable(d); + return IRQ_SET_MASK_OK_DONE; } #endif -static void plic_irq_eoi(struct irq_data *d) -{ - struct plic_handler *handler = this_cpu_ptr(&plic_handlers); - - if (irqd_irq_masked(d)) { - plic_irq_unmask(d); - writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); - plic_irq_mask(d); - } else { - writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); - } -} +static struct irq_chip plic_edge_chip = { + .name = "SiFive PLIC", + .irq_enable = plic_irq_enable, + .irq_disable = plic_irq_disable, + .irq_ack = plic_irq_eoi, + .irq_mask = plic_irq_mask, + .irq_unmask = plic_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = plic_set_affinity, +#endif + .irq_set_type = plic_irq_set_type, + .flags = IRQCHIP_AFFINITY_PRE_STARTUP, +}; static struct irq_chip plic_chip = { .name = "SiFive PLIC", + .irq_enable = plic_irq_enable, + .irq_disable = plic_irq_disable, .irq_mask = plic_irq_mask, .irq_unmask = plic_irq_unmask, .irq_eoi = plic_irq_eoi, #ifdef CONFIG_SMP .irq_set_affinity = plic_set_affinity, #endif + .irq_set_type = plic_irq_set_type, + .flags = IRQCHIP_AFFINITY_PRE_STARTUP, }; +static int plic_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct plic_priv *priv = irq_data_get_irq_chip_data(d); + + if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) + return IRQ_SET_MASK_OK_NOCOPY; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + irq_set_chip_handler_name_locked(d, &plic_edge_chip, + handle_edge_irq, NULL); + break; + case IRQ_TYPE_LEVEL_HIGH: + irq_set_chip_handler_name_locked(d, &plic_chip, + handle_fasteoi_irq, NULL); + break; + default: + return -EINVAL; + } + + return IRQ_SET_MASK_OK; +} + static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { @@ -198,6 +239,19 @@ static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, return 0; } +static int plic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct plic_priv *priv = d->host_data; + + if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) + return irq_domain_translate_twocell(d, fwspec, hwirq, type); + + return irq_domain_translate_onecell(d, fwspec, hwirq, type); +} + static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -206,7 +260,7 @@ static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int type; struct irq_fwspec *fwspec = arg; - ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); + ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); if (ret) return ret; @@ -220,7 +274,7 @@ static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, } static const struct irq_domain_ops plic_irqdomain_ops = { - .translate = irq_domain_translate_onecell, + .translate = plic_irq_domain_translate, .alloc = plic_irq_domain_alloc, .free = irq_domain_free_irqs_top, }; @@ -281,8 +335,9 @@ static int plic_starting_cpu(unsigned int cpu) return 0; } -static int __init plic_init(struct device_node *node, - struct device_node *parent) +static int __init __plic_init(struct device_node *node, + struct device_node *parent, + unsigned long plic_quirks) { int error = 0, nr_contexts, nr_handlers = 0, i; u32 nr_irqs; @@ -293,6 +348,8 @@ static int __init plic_init(struct device_node *node, if (!priv) return -ENOMEM; + priv->plic_quirks = plic_quirks; + priv->regs = of_iomap(node, 0); if (WARN_ON(!priv->regs)) { error = -EIO; @@ -382,8 +439,11 @@ static int __init plic_init(struct device_node *node, i * CONTEXT_ENABLE_SIZE; handler->priv = priv; done: - for (hwirq = 1; hwirq <= nr_irqs; hwirq++) + for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { plic_toggle(handler, hwirq, 0); + writel(1, priv->regs + PRIORITY_BASE + + hwirq * PRIORITY_PER_ID); + } nr_handlers++; } @@ -410,6 +470,20 @@ out_free_priv: return error; } +static int __init plic_init(struct device_node *node, + struct device_node *parent) +{ + return __plic_init(node, parent, 0); +} + IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ -IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */ + +static int __init plic_edge_init(struct device_node *node, + struct device_node *parent) +{ + return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT)); +} + +IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init); +IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init); diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 9d18f47040eb..a73763d475f0 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -34,21 +34,15 @@ struct stm32_exti_bank { u32 swier_ofst; u32 rpr_ofst; u32 fpr_ofst; + u32 trg_ofst; }; #define UNDEF_REG ~0 -struct stm32_desc_irq { - u32 exti; - u32 irq_parent; - struct irq_chip *chip; -}; - struct stm32_exti_drv_data { const struct stm32_exti_bank **exti_banks; - const struct stm32_desc_irq *desc_irqs; + const u8 *desc_irqs; u32 bank_nr; - u32 irq_nr; }; struct stm32_exti_chip_data { @@ -78,6 +72,7 @@ static const struct stm32_exti_bank stm32f4xx_exti_b1 = { .swier_ofst = 0x10, .rpr_ofst = 0x14, .fpr_ofst = UNDEF_REG, + .trg_ofst = UNDEF_REG, }; static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = { @@ -97,6 +92,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b1 = { .swier_ofst = 0x08, .rpr_ofst = 0x88, .fpr_ofst = UNDEF_REG, + .trg_ofst = UNDEF_REG, }; static const struct stm32_exti_bank stm32h7xx_exti_b2 = { @@ -107,6 +103,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b2 = { .swier_ofst = 0x28, .rpr_ofst = 0x98, .fpr_ofst = UNDEF_REG, + .trg_ofst = UNDEF_REG, }; static const struct stm32_exti_bank stm32h7xx_exti_b3 = { @@ -117,6 +114,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b3 = { .swier_ofst = 0x48, .rpr_ofst = 0xA8, .fpr_ofst = UNDEF_REG, + .trg_ofst = UNDEF_REG, }; static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = { @@ -132,32 +130,35 @@ static const struct stm32_exti_drv_data stm32h7xx_drv_data = { static const struct stm32_exti_bank stm32mp1_exti_b1 = { .imr_ofst = 0x80, - .emr_ofst = 0x84, + .emr_ofst = UNDEF_REG, .rtsr_ofst = 0x00, .ftsr_ofst = 0x04, .swier_ofst = 0x08, .rpr_ofst = 0x0C, .fpr_ofst = 0x10, + .trg_ofst = 0x3EC, }; static const struct stm32_exti_bank stm32mp1_exti_b2 = { .imr_ofst = 0x90, - .emr_ofst = 0x94, + .emr_ofst = UNDEF_REG, .rtsr_ofst = 0x20, .ftsr_ofst = 0x24, .swier_ofst = 0x28, .rpr_ofst = 0x2C, .fpr_ofst = 0x30, + .trg_ofst = 0x3E8, }; static const struct stm32_exti_bank stm32mp1_exti_b3 = { .imr_ofst = 0xA0, - .emr_ofst = 0xA4, + .emr_ofst = UNDEF_REG, .rtsr_ofst = 0x40, .ftsr_ofst = 0x44, .swier_ofst = 0x48, .rpr_ofst = 0x4C, .fpr_ofst = 0x50, + .trg_ofst = 0x3E4, }; static const struct stm32_exti_bank *stm32mp1_exti_banks[] = { @@ -169,126 +170,114 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = { static struct irq_chip stm32_exti_h_chip; static struct irq_chip stm32_exti_h_chip_direct; -static const struct stm32_desc_irq stm32mp1_desc_irq[] = { - { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip }, - { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip }, - { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip }, - { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip }, - { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip }, - { .exti = 5, .irq_parent = 23, .chip = &stm32_exti_h_chip }, - { .exti = 6, .irq_parent = 64, .chip = &stm32_exti_h_chip }, - { .exti = 7, .irq_parent = 65, .chip = &stm32_exti_h_chip }, - { .exti = 8, .irq_parent = 66, .chip = &stm32_exti_h_chip }, - { .exti = 9, .irq_parent = 67, .chip = &stm32_exti_h_chip }, - { .exti = 10, .irq_parent = 40, .chip = &stm32_exti_h_chip }, - { .exti = 11, .irq_parent = 42, .chip = &stm32_exti_h_chip }, - { .exti = 12, .irq_parent = 76, .chip = &stm32_exti_h_chip }, - { .exti = 13, .irq_parent = 77, .chip = &stm32_exti_h_chip }, - { .exti = 14, .irq_parent = 121, .chip = &stm32_exti_h_chip }, - { .exti = 15, .irq_parent = 127, .chip = &stm32_exti_h_chip }, - { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip }, - { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct }, - { .exti = 21, .irq_parent = 31, .chip = &stm32_exti_h_chip_direct }, - { .exti = 22, .irq_parent = 33, .chip = &stm32_exti_h_chip_direct }, - { .exti = 23, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct }, - { .exti = 24, .irq_parent = 95, .chip = &stm32_exti_h_chip_direct }, - { .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct }, - { .exti = 26, .irq_parent = 37, .chip = &stm32_exti_h_chip_direct }, - { .exti = 27, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct }, - { .exti = 28, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct }, - { .exti = 29, .irq_parent = 71, .chip = &stm32_exti_h_chip_direct }, - { .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct }, - { .exti = 31, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct }, - { .exti = 32, .irq_parent = 82, .chip = &stm32_exti_h_chip_direct }, - { .exti = 33, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct }, - { .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct }, - { .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct }, - { .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct }, - { .exti = 52, .irq_parent = 140, .chip = &stm32_exti_h_chip_direct }, - { .exti = 53, .irq_parent = 141, .chip = &stm32_exti_h_chip_direct }, - { .exti = 54, .irq_parent = 135, .chip = &stm32_exti_h_chip_direct }, - { .exti = 61, .irq_parent = 100, .chip = &stm32_exti_h_chip_direct }, - { .exti = 65, .irq_parent = 144, .chip = &stm32_exti_h_chip }, - { .exti = 68, .irq_parent = 143, .chip = &stm32_exti_h_chip }, - { .exti = 70, .irq_parent = 62, .chip = &stm32_exti_h_chip_direct }, - { .exti = 73, .irq_parent = 129, .chip = &stm32_exti_h_chip }, +#define EXTI_INVALID_IRQ U8_MAX +#define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK) + +static const u8 stm32mp1_desc_irq[] = { + /* default value */ + [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ, + + [0] = 6, + [1] = 7, + [2] = 8, + [3] = 9, + [4] = 10, + [5] = 23, + [6] = 64, + [7] = 65, + [8] = 66, + [9] = 67, + [10] = 40, + [11] = 42, + [12] = 76, + [13] = 77, + [14] = 121, + [15] = 127, + [16] = 1, + [19] = 3, + [21] = 31, + [22] = 33, + [23] = 72, + [24] = 95, + [25] = 107, + [26] = 37, + [27] = 38, + [28] = 39, + [29] = 71, + [30] = 52, + [31] = 53, + [32] = 82, + [33] = 83, + [47] = 93, + [48] = 138, + [50] = 139, + [52] = 140, + [53] = 141, + [54] = 135, + [61] = 100, + [65] = 144, + [68] = 143, + [70] = 62, + [73] = 129, }; -static const struct stm32_desc_irq stm32mp13_desc_irq[] = { - { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip }, - { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip }, - { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip }, - { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip }, - { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip }, - { .exti = 5, .irq_parent = 24, .chip = &stm32_exti_h_chip }, - { .exti = 6, .irq_parent = 65, .chip = &stm32_exti_h_chip }, - { .exti = 7, .irq_parent = 66, .chip = &stm32_exti_h_chip }, - { .exti = 8, .irq_parent = 67, .chip = &stm32_exti_h_chip }, - { .exti = 9, .irq_parent = 68, .chip = &stm32_exti_h_chip }, - { .exti = 10, .irq_parent = 41, .chip = &stm32_exti_h_chip }, - { .exti = 11, .irq_parent = 43, .chip = &stm32_exti_h_chip }, - { .exti = 12, .irq_parent = 77, .chip = &stm32_exti_h_chip }, - { .exti = 13, .irq_parent = 78, .chip = &stm32_exti_h_chip }, - { .exti = 14, .irq_parent = 106, .chip = &stm32_exti_h_chip }, - { .exti = 15, .irq_parent = 109, .chip = &stm32_exti_h_chip }, - { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip }, - { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct }, - { .exti = 21, .irq_parent = 32, .chip = &stm32_exti_h_chip_direct }, - { .exti = 22, .irq_parent = 34, .chip = &stm32_exti_h_chip_direct }, - { .exti = 23, .irq_parent = 73, .chip = &stm32_exti_h_chip_direct }, - { .exti = 24, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct }, - { .exti = 25, .irq_parent = 114, .chip = &stm32_exti_h_chip_direct }, - { .exti = 26, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct }, - { .exti = 27, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct }, - { .exti = 28, .irq_parent = 40, .chip = &stm32_exti_h_chip_direct }, - { .exti = 29, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct }, - { .exti = 30, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct }, - { .exti = 31, .irq_parent = 54, .chip = &stm32_exti_h_chip_direct }, - { .exti = 32, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct }, - { .exti = 33, .irq_parent = 84, .chip = &stm32_exti_h_chip_direct }, - { .exti = 44, .irq_parent = 96, .chip = &stm32_exti_h_chip_direct }, - { .exti = 47, .irq_parent = 92, .chip = &stm32_exti_h_chip_direct }, - { .exti = 48, .irq_parent = 116, .chip = &stm32_exti_h_chip_direct }, - { .exti = 50, .irq_parent = 117, .chip = &stm32_exti_h_chip_direct }, - { .exti = 52, .irq_parent = 118, .chip = &stm32_exti_h_chip_direct }, - { .exti = 53, .irq_parent = 119, .chip = &stm32_exti_h_chip_direct }, - { .exti = 68, .irq_parent = 63, .chip = &stm32_exti_h_chip_direct }, - { .exti = 70, .irq_parent = 98, .chip = &stm32_exti_h_chip_direct }, +static const u8 stm32mp13_desc_irq[] = { + /* default value */ + [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ, + + [0] = 6, + [1] = 7, + [2] = 8, + [3] = 9, + [4] = 10, + [5] = 24, + [6] = 65, + [7] = 66, + [8] = 67, + [9] = 68, + [10] = 41, + [11] = 43, + [12] = 77, + [13] = 78, + [14] = 106, + [15] = 109, + [16] = 1, + [19] = 3, + [21] = 32, + [22] = 34, + [23] = 73, + [24] = 93, + [25] = 114, + [26] = 38, + [27] = 39, + [28] = 40, + [29] = 72, + [30] = 53, + [31] = 54, + [32] = 83, + [33] = 84, + [44] = 96, + [47] = 92, + [48] = 116, + [50] = 117, + [52] = 118, + [53] = 119, + [68] = 63, + [70] = 98, }; static const struct stm32_exti_drv_data stm32mp1_drv_data = { .exti_banks = stm32mp1_exti_banks, .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), .desc_irqs = stm32mp1_desc_irq, - .irq_nr = ARRAY_SIZE(stm32mp1_desc_irq), }; static const struct stm32_exti_drv_data stm32mp13_drv_data = { .exti_banks = stm32mp1_exti_banks, .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), .desc_irqs = stm32mp13_desc_irq, - .irq_nr = ARRAY_SIZE(stm32mp13_desc_irq), }; -static const struct -stm32_desc_irq *stm32_exti_get_desc(const struct stm32_exti_drv_data *drv_data, - irq_hw_number_t hwirq) -{ - const struct stm32_desc_irq *desc = NULL; - int i; - - if (!drv_data->desc_irqs) - return NULL; - - for (i = 0; i < drv_data->irq_nr; i++) { - desc = &drv_data->desc_irqs[i]; - if (desc->exti == hwirq) - break; - } - - return desc; -} - static unsigned long stm32_exti_pending(struct irq_chip_generic *gc) { struct stm32_exti_chip_data *chip_data = gc->private; @@ -614,7 +603,7 @@ static int stm32_exti_h_set_affinity(struct irq_data *d, if (d->parent_data->chip) return irq_chip_set_affinity_parent(d, dest, force); - return -EINVAL; + return IRQ_SET_MASK_OK_DONE; } static int __maybe_unused stm32_exti_h_suspend(void) @@ -691,8 +680,8 @@ static struct irq_chip stm32_exti_h_chip_direct = { .name = "stm32-exti-h-direct", .irq_eoi = irq_chip_eoi_parent, .irq_ack = irq_chip_ack_parent, - .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, + .irq_mask = stm32_exti_h_mask, + .irq_unmask = stm32_exti_h_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_type = irq_chip_set_type_parent, .irq_set_wake = stm32_exti_h_set_wake, @@ -706,28 +695,36 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm, { struct stm32_exti_host_data *host_data = dm->host_data; struct stm32_exti_chip_data *chip_data; - const struct stm32_desc_irq *desc; + u8 desc_irq; struct irq_fwspec *fwspec = data; struct irq_fwspec p_fwspec; irq_hw_number_t hwirq; int bank; + u32 event_trg; + struct irq_chip *chip; hwirq = fwspec->param[0]; + if (hwirq >= host_data->drv_data->bank_nr * IRQS_PER_BANK) + return -EINVAL; + bank = hwirq / IRQS_PER_BANK; chip_data = &host_data->chips_data[bank]; + event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst); + chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ? + &stm32_exti_h_chip : &stm32_exti_h_chip_direct; + + irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data); - desc = stm32_exti_get_desc(host_data->drv_data, hwirq); - if (!desc) + if (!host_data->drv_data || !host_data->drv_data->desc_irqs) return -EINVAL; - irq_domain_set_hwirq_and_chip(dm, virq, hwirq, desc->chip, - chip_data); - if (desc->irq_parent) { + desc_irq = host_data->drv_data->desc_irqs[hwirq]; + if (desc_irq != EXTI_INVALID_IRQ) { p_fwspec.fwnode = dm->parent->fwnode; p_fwspec.param_count = 3; p_fwspec.param[0] = GIC_SPI; - p_fwspec.param[1] = desc->irq_parent; + p_fwspec.param[1] = desc_irq; p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec); @@ -792,7 +789,8 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, * clear registers to avoid residue */ writel_relaxed(0, base + stm32_bank->imr_ofst); - writel_relaxed(0, base + stm32_bank->emr_ofst); + if (stm32_bank->emr_ofst != UNDEF_REG) + writel_relaxed(0, base + stm32_bank->emr_ofst); pr_info("%pOF: bank%d\n", node, bank_idx); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 9526ccbedafb..80c9f7134e9b 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1001,12 +1001,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) static int validate_raid_redundancy(struct raid_set *rs) { unsigned int i, rebuild_cnt = 0; - unsigned int rebuilds_per_group = 0, copies; + unsigned int rebuilds_per_group = 0, copies, raid_disks; unsigned int group_size, last_group_start; - for (i = 0; i < rs->md.raid_disks; i++) - if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || - !rs->dev[i].rdev.sb_page) + for (i = 0; i < rs->raid_disks; i++) + if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) && + ((!test_bit(In_sync, &rs->dev[i].rdev.flags) || + !rs->dev[i].rdev.sb_page))) rebuild_cnt++; switch (rs->md.level) { @@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(struct raid_set *rs) * A A B B C * C D D E E */ + raid_disks = min(rs->raid_disks, rs->md.raid_disks); if (__is_raid10_near(rs->md.new_layout)) { - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < raid_disks; i++) { if (!(i % copies)) rebuilds_per_group = 0; if ((!rs->dev[i].rdev.sb_page || @@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(struct raid_set *rs) * results in the need to treat the last (potentially larger) * set differently. */ - group_size = (rs->md.raid_disks / copies); - last_group_start = (rs->md.raid_disks / group_size) - 1; + group_size = (raid_disks / copies); + last_group_start = (raid_disks / group_size) - 1; last_group_start *= group_size; - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < raid_disks; i++) { if (!(i % copies) && !(i > last_group_start)) rebuilds_per_group = 0; if ((!rs->dev[i].rdev.sb_page || @@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct raid_set *rs) { int i; - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < rs->raid_disks; i++) { struct md_rdev *rdev = &rs->dev[i].rdev; if (!test_bit(Journal, &rdev->flags) && @@ -3766,13 +3768,13 @@ static int raid_iterate_devices(struct dm_target *ti, unsigned int i; int r = 0; - for (i = 0; !r && i < rs->md.raid_disks; i++) - if (rs->dev[i].data_dev) - r = fn(ti, - rs->dev[i].data_dev, - 0, /* No offset on data devs */ - rs->md.dev_sectors, - data); + for (i = 0; !r && i < rs->raid_disks; i++) { + if (rs->dev[i].data_dev) { + r = fn(ti, rs->dev[i].data_dev, + 0, /* No offset on data devs */ + rs->md.dev_sectors, data); + } + } return r; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 5d09256d7f81..c8539d0e12dd 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7304,7 +7304,9 @@ static struct r5conf *setup_conf(struct mddev *mddev) goto abort; conf->mddev = mddev; - if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) + ret = -ENOMEM; + conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!conf->stripe_hashtbl) goto abort; /* We init hash_locks[0] separately to that it can be used @@ -7933,7 +7935,7 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) int err = 0; int number = rdev->raid_disk; struct md_rdev __rcu **rdevp; - struct disk_info *p = conf->disks + number; + struct disk_info *p; struct md_rdev *tmp; print_raid5_conf(conf); @@ -7952,6 +7954,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) log_exit(conf); return 0; } + if (unlikely(number >= conf->pool_size)) + return 0; + p = conf->disks + number; if (rdev == rcu_access_pointer(p->rdev)) rdevp = &p->rdev; else if (rdev == rcu_access_pointer(p->replacement)) @@ -8062,6 +8067,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) */ if (rdev->saved_raid_disk >= 0 && rdev->saved_raid_disk >= first && + rdev->saved_raid_disk <= last && conf->disks[rdev->saved_raid_disk].rdev == NULL) first = rdev->saved_raid_disk; diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 4c5154e0bf00..d7cb7ead2ac7 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -21,11 +21,13 @@ /* SMI COMMON */ #define SMI_L1LEN 0x100 +#define SMI_L1_ARB 0x200 #define SMI_BUS_SEL 0x220 #define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1) /* All are MMU0 defaultly. Only specialize mmu1 here. */ #define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid)) +#define SMI_READ_FIFO_TH 0x230 #define SMI_M4U_TH 0x234 #define SMI_FIFO_TH1 0x238 #define SMI_FIFO_TH2 0x23c @@ -360,6 +362,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { {.compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701}, {.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712}, {.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779}, + {.compatible = "mediatek,mt6795-smi-larb", .data = &mtk_smi_larb_mt8173}, {.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167}, {.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173}, {.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183}, @@ -544,6 +547,13 @@ static struct platform_driver mtk_smi_larb_driver = { } }; +static const struct mtk_smi_reg_pair mtk_smi_common_mt6795_init[SMI_COMMON_INIT_REGS_NR] = { + {SMI_L1_ARB, 0x1b}, + {SMI_M4U_TH, 0xce810c85}, + {SMI_FIFO_TH1, 0x43214c8}, + {SMI_READ_FIFO_TH, 0x191f}, +}; + static const struct mtk_smi_reg_pair mtk_smi_common_mt8195_init[SMI_COMMON_INIT_REGS_NR] = { {SMI_L1LEN, 0xb}, {SMI_M4U_TH, 0xe100e10}, @@ -568,6 +578,12 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = { F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7), }; +static const struct mtk_smi_common_plat mtk_smi_common_mt6795 = { + .type = MTK_SMI_GEN2, + .bus_sel = F_MMU1_LARB(0), + .init = mtk_smi_common_mt6795_init, +}; + static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = { .type = MTK_SMI_GEN2, .has_gals = true, @@ -612,6 +628,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = { {.compatible = "mediatek,mt2701-smi-common", .data = &mtk_smi_common_gen1}, {.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779}, + {.compatible = "mediatek,mt6795-smi-common", .data = &mtk_smi_common_mt6795}, {.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183}, diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c index e23ebd421f17..a9e8fd99730f 100644 --- a/drivers/memory/tegra/tegra234.c +++ b/drivers/memory/tegra/tegra234.c @@ -11,6 +11,76 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { { + .id = TEGRA234_MEMORY_CLIENT_MGBEARD, + .name = "mgbeard", + .sid = TEGRA234_SID_MGBE, + .regs = { + .sid = { + .override = 0x2c0, + .security = 0x2c4, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEBRD, + .name = "mgbebrd", + .sid = TEGRA234_SID_MGBE_VF1, + .regs = { + .sid = { + .override = 0x2c8, + .security = 0x2cc, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBECRD, + .name = "mgbecrd", + .sid = TEGRA234_SID_MGBE_VF2, + .regs = { + .sid = { + .override = 0x2d0, + .security = 0x2d4, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEDRD, + .name = "mgbedrd", + .sid = TEGRA234_SID_MGBE_VF3, + .regs = { + .sid = { + .override = 0x2d8, + .security = 0x2dc, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEAWR, + .name = "mgbeawr", + .sid = TEGRA234_SID_MGBE, + .regs = { + .sid = { + .override = 0x2e0, + .security = 0x2e4, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEBWR, + .name = "mgbebwr", + .sid = TEGRA234_SID_MGBE_VF1, + .regs = { + .sid = { + .override = 0x2f8, + .security = 0x2fc, + }, + }, + }, { + .id = TEGRA234_MEMORY_CLIENT_MGBECWR, + .name = "mgbecwr", + .sid = TEGRA234_SID_MGBE_VF2, + .regs = { + .sid = { + .override = 0x308, + .security = 0x30c, + }, + }, + }, { .id = TEGRA234_MEMORY_CLIENT_SDMMCRAB, .name = "sdmmcrab", .sid = TEGRA234_SID_SDMMC4, @@ -21,6 +91,16 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, }, { + .id = TEGRA234_MEMORY_CLIENT_MGBEDWR, + .name = "mgbedwr", + .sid = TEGRA234_SID_MGBE_VF3, + .regs = { + .sid = { + .override = 0x328, + .security = 0x32c, + }, + }, + }, { .id = TEGRA234_MEMORY_CLIENT_SDMMCWAB, .name = "sdmmcwab", .sid = TEGRA234_SID_SDMMC4, diff --git a/drivers/mfd/bcm2835-pm.c b/drivers/mfd/bcm2835-pm.c index 42fe67f1538e..49cd1f03884a 100644 --- a/drivers/mfd/bcm2835-pm.c +++ b/drivers/mfd/bcm2835-pm.c @@ -25,9 +25,52 @@ static const struct mfd_cell bcm2835_power_devs[] = { { .name = "bcm2835-power" }, }; +static int bcm2835_pm_get_pdata(struct platform_device *pdev, + struct bcm2835_pm *pm) +{ + if (of_find_property(pm->dev->of_node, "reg-names", NULL)) { + struct resource *res; + + pm->base = devm_platform_ioremap_resource_byname(pdev, "pm"); + if (IS_ERR(pm->base)) + return PTR_ERR(pm->base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "asb"); + if (res) { + pm->asb = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pm->asb)) + pm->asb = NULL; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "rpivid_asb"); + if (res) { + pm->rpivid_asb = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pm->rpivid_asb)) + pm->rpivid_asb = NULL; + } + + return 0; + } + + /* If no 'reg-names' property is found we can assume we're using old DTB. */ + pm->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pm->base)) + return PTR_ERR(pm->base); + + pm->asb = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(pm->asb)) + pm->asb = NULL; + + pm->rpivid_asb = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(pm->rpivid_asb)) + pm->rpivid_asb = NULL; + + return 0; +} + static int bcm2835_pm_probe(struct platform_device *pdev) { - struct resource *res; struct device *dev = &pdev->dev; struct bcm2835_pm *pm; int ret; @@ -39,10 +82,9 @@ static int bcm2835_pm_probe(struct platform_device *pdev) pm->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pm->base = devm_ioremap_resource(dev, res); - if (IS_ERR(pm->base)) - return PTR_ERR(pm->base); + ret = bcm2835_pm_get_pdata(pdev, pm); + if (ret) + return ret; ret = devm_mfd_add_devices(dev, -1, bcm2835_pm_devs, ARRAY_SIZE(bcm2835_pm_devs), @@ -50,30 +92,22 @@ static int bcm2835_pm_probe(struct platform_device *pdev) if (ret) return ret; - /* We'll use the presence of the AXI ASB regs in the + /* + * We'll use the presence of the AXI ASB regs in the * bcm2835-pm binding as the key for whether we can reference * the full PM register range and support power domains. */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) { - pm->asb = devm_ioremap_resource(dev, res); - if (IS_ERR(pm->asb)) - return PTR_ERR(pm->asb); - - ret = devm_mfd_add_devices(dev, -1, - bcm2835_power_devs, - ARRAY_SIZE(bcm2835_power_devs), - NULL, 0, NULL); - if (ret) - return ret; - } - + if (pm->asb) + return devm_mfd_add_devices(dev, -1, bcm2835_power_devs, + ARRAY_SIZE(bcm2835_power_devs), + NULL, 0, NULL); return 0; } static const struct of_device_id bcm2835_pm_of_match[] = { { .compatible = "brcm,bcm2835-pm-wdt", }, { .compatible = "brcm,bcm2835-pm", }, + { .compatible = "brcm,bcm2711-pm", }, {}, }; MODULE_DEVICE_TABLE(of, bcm2835_pm_of_match); diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c index 1ef9b61077c4..f150d8769f19 100644 --- a/drivers/misc/cardreader/rtsx_usb.c +++ b/drivers/misc/cardreader/rtsx_usb.c @@ -631,16 +631,20 @@ static int rtsx_usb_probe(struct usb_interface *intf, ucr->pusb_dev = usb_dev; - ucr->iobuf = usb_alloc_coherent(ucr->pusb_dev, IOBUF_SIZE, - GFP_KERNEL, &ucr->iobuf_dma); - if (!ucr->iobuf) + ucr->cmd_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL); + if (!ucr->cmd_buf) return -ENOMEM; + ucr->rsp_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL); + if (!ucr->rsp_buf) { + ret = -ENOMEM; + goto out_free_cmd_buf; + } + usb_set_intfdata(intf, ucr); ucr->vendor_id = id->idVendor; ucr->product_id = id->idProduct; - ucr->cmd_buf = ucr->rsp_buf = ucr->iobuf; mutex_init(&ucr->dev_mutex); @@ -668,8 +672,11 @@ static int rtsx_usb_probe(struct usb_interface *intf, out_init_fail: usb_set_intfdata(ucr->pusb_intf, NULL); - usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf, - ucr->iobuf_dma); + kfree(ucr->rsp_buf); + ucr->rsp_buf = NULL; +out_free_cmd_buf: + kfree(ucr->cmd_buf); + ucr->cmd_buf = NULL; return ret; } @@ -682,8 +689,12 @@ static void rtsx_usb_disconnect(struct usb_interface *intf) mfd_remove_devices(&intf->dev); usb_set_intfdata(ucr->pusb_intf, NULL); - usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf, - ucr->iobuf_dma); + + kfree(ucr->cmd_buf); + ucr->cmd_buf = NULL; + + kfree(ucr->rsp_buf); + ucr->rsp_buf = NULL; } #ifdef CONFIG_PM diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index c9c56fd194c1..bdffc6543f6f 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -80,10 +80,9 @@ static int at25_ee_read(void *priv, unsigned int offset, struct at25_data *at25 = priv; char *buf = val; size_t max_chunk = spi_max_transfer_size(at25->spi); - size_t num_msgs = DIV_ROUND_UP(count, max_chunk); - size_t nr_bytes = 0; - unsigned int msg_offset; - size_t msg_count; + unsigned int msg_offset = offset; + size_t bytes_left = count; + size_t segment; u8 *cp; ssize_t status; struct spi_transfer t[2]; @@ -97,9 +96,8 @@ static int at25_ee_read(void *priv, unsigned int offset, if (unlikely(!count)) return -EINVAL; - msg_offset = (unsigned int)offset; - msg_count = min(count, max_chunk); - while (num_msgs) { + do { + segment = min(bytes_left, max_chunk); cp = at25->command; instr = AT25_READ; @@ -131,8 +129,8 @@ static int at25_ee_read(void *priv, unsigned int offset, t[0].len = at25->addrlen + 1; spi_message_add_tail(&t[0], &m); - t[1].rx_buf = buf + nr_bytes; - t[1].len = msg_count; + t[1].rx_buf = buf; + t[1].len = segment; spi_message_add_tail(&t[1], &m); status = spi_sync(at25->spi, &m); @@ -142,10 +140,10 @@ static int at25_ee_read(void *priv, unsigned int offset, if (status) return status; - --num_msgs; - msg_offset += msg_count; - nr_bytes += msg_count; - } + msg_offset += segment; + buf += segment; + bytes_left -= segment; + } while (bytes_left > 0); dev_dbg(&at25->spi->dev, "read %zu bytes at %d\n", count, offset); @@ -229,7 +227,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) do { unsigned long timeout, retries; unsigned segment; - unsigned offset = (unsigned) off; + unsigned offset = off; u8 *cp = bounce; int sr; u8 instr; diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 2e0aa74ac185..95ef971b5e1c 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM) += cfi.o lkdtm-$(CONFIG_LKDTM) += fortify.o lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o -KASAN_SANITIZE_rodata.o := n KASAN_SANITIZE_stackleak.o := n -KCOV_INSTRUMENT_rodata.o := n -CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) + +KASAN_SANITIZE_rodata.o := n +KCSAN_SANITIZE_rodata.o := n +KCOV_INSTRUMENT_rodata.o := n +OBJECT_FILES_NON_STANDARD_rodata.o := y +CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 86e867ffbb10..033be559a730 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1298,8 +1298,9 @@ static int sdhci_omap_probe(struct platform_device *pdev) /* * omap_device_pm_domain has callbacks to enable the main * functional clock, interface clock and also configure the - * SYSCONFIG register of omap devices. The callback will be invoked - * as part of pm_runtime_get_sync. + * SYSCONFIG register to clear any boot loader set voltage + * capabilities before calling sdhci_setup_host(). The + * callback will be invoked as part of pm_runtime_get_sync. */ pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, 50); @@ -1441,7 +1442,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); - sdhci_runtime_suspend_host(host); + if (omap_host->con != -EINVAL) + sdhci_runtime_suspend_host(host); sdhci_omap_context_save(omap_host); @@ -1458,10 +1460,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev) pinctrl_pm_select_default_state(dev); - if (omap_host->con != -EINVAL) + if (omap_host->con != -EINVAL) { sdhci_omap_context_restore(omap_host); - - sdhci_runtime_resume_host(host, 0); + sdhci_runtime_resume_host(host, 0); + } return 0; } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 889e40329956..93da23682d86 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -850,9 +850,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, unsigned int tRP_ps; bool use_half_period; int sample_delay_ps, sample_delay_factor; - u16 busy_timeout_cycles; + unsigned int busy_timeout_cycles; u8 wrn_dly_sel; unsigned long clk_rate, min_rate; + u64 busy_timeout_ps; if (sdr->tRC_min >= 30000) { /* ONFI non-EDO modes [0-3] */ @@ -885,7 +886,8 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); - busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); + busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max); + busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps); hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b2a4f998c180..8c1eeb5a8db8 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -94,6 +94,7 @@ config WIREGUARD select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2 select CRYPTO_POLY1305_MIPS if MIPS + select CRYPTO_CHACHA_S390 if S390 help WireGuard is a secure, fast, and easy to use replacement for IPSec that uses modern cryptography and clever networking tricks. It's diff --git a/drivers/net/amt.c b/drivers/net/amt.c index be2719a3ba70..e019526e1df6 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -563,7 +563,7 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt) ihv3->nsrcs = 0; ihv3->resv = 0; ihv3->suppress = false; - ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv; + ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv); ihv3->csum = 0; csum = &ihv3->csum; csum_start = (void *)ihv3; @@ -577,14 +577,14 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt) return skb; } -static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status, - bool validate) +static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status, + bool validate) { if (validate && amt->status >= status) return; netdev_dbg(amt->dev, "Update GW status %s -> %s", status_str[amt->status], status_str[status]); - amt->status = status; + WRITE_ONCE(amt->status, status); } static void __amt_update_relay_status(struct amt_tunnel_list *tunnel, @@ -600,14 +600,6 @@ static void __amt_update_relay_status(struct amt_tunnel_list *tunnel, tunnel->status = status; } -static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status, - bool validate) -{ - spin_lock_bh(&amt->lock); - __amt_update_gw_status(amt, status, validate); - spin_unlock_bh(&amt->lock); -} - static void amt_update_relay_status(struct amt_tunnel_list *tunnel, enum amt_status status, bool validate) { @@ -700,9 +692,7 @@ static void amt_send_discovery(struct amt_dev *amt) if (unlikely(net_xmit_eval(err))) amt->dev->stats.tx_errors++; - spin_lock_bh(&amt->lock); - __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true); - spin_unlock_bh(&amt->lock); + amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true); out: rcu_read_unlock(); } @@ -900,6 +890,28 @@ static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel) } #endif +static bool amt_queue_event(struct amt_dev *amt, enum amt_event event, + struct sk_buff *skb) +{ + int index; + + spin_lock_bh(&amt->lock); + if (amt->nr_events >= AMT_MAX_EVENTS) { + spin_unlock_bh(&amt->lock); + return 1; + } + + index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS; + amt->events[index].event = event; + amt->events[index].skb = skb; + amt->nr_events++; + amt->event_idx %= AMT_MAX_EVENTS; + queue_work(amt_wq, &amt->event_wq); + spin_unlock_bh(&amt->lock); + + return 0; +} + static void amt_secret_work(struct work_struct *work) { struct amt_dev *amt = container_of(to_delayed_work(work), @@ -913,58 +925,72 @@ static void amt_secret_work(struct work_struct *work) msecs_to_jiffies(AMT_SECRET_TIMEOUT)); } -static void amt_discovery_work(struct work_struct *work) +static void amt_event_send_discovery(struct amt_dev *amt) { - struct amt_dev *amt = container_of(to_delayed_work(work), - struct amt_dev, - discovery_wq); - - spin_lock_bh(&amt->lock); if (amt->status > AMT_STATUS_SENT_DISCOVERY) goto out; get_random_bytes(&amt->nonce, sizeof(__be32)); - spin_unlock_bh(&amt->lock); amt_send_discovery(amt); - spin_lock_bh(&amt->lock); out: mod_delayed_work(amt_wq, &amt->discovery_wq, msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT)); - spin_unlock_bh(&amt->lock); } -static void amt_req_work(struct work_struct *work) +static void amt_discovery_work(struct work_struct *work) { struct amt_dev *amt = container_of(to_delayed_work(work), struct amt_dev, - req_wq); + discovery_wq); + + if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL)) + mod_delayed_work(amt_wq, &amt->discovery_wq, + msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT)); +} + +static void amt_event_send_request(struct amt_dev *amt) +{ u32 exp; - spin_lock_bh(&amt->lock); if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT) goto out; if (amt->req_cnt > AMT_MAX_REQ_COUNT) { netdev_dbg(amt->dev, "Gateway is not ready"); amt->qi = AMT_INIT_REQ_TIMEOUT; - amt->ready4 = false; - amt->ready6 = false; + WRITE_ONCE(amt->ready4, false); + WRITE_ONCE(amt->ready6, false); amt->remote_ip = 0; - __amt_update_gw_status(amt, AMT_STATUS_INIT, false); + amt_update_gw_status(amt, AMT_STATUS_INIT, false); amt->req_cnt = 0; + amt->nonce = 0; goto out; } - spin_unlock_bh(&amt->lock); + + if (!amt->req_cnt) { + WRITE_ONCE(amt->ready4, false); + WRITE_ONCE(amt->ready6, false); + get_random_bytes(&amt->nonce, sizeof(__be32)); + } amt_send_request(amt, false); amt_send_request(amt, true); - spin_lock_bh(&amt->lock); - __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); + amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); amt->req_cnt++; out: exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT); mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000)); - spin_unlock_bh(&amt->lock); +} + +static void amt_req_work(struct work_struct *work) +{ + struct amt_dev *amt = container_of(to_delayed_work(work), + struct amt_dev, + req_wq); + + if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL)) + mod_delayed_work(amt_wq, &amt->req_wq, + msecs_to_jiffies(100)); } static bool amt_send_membership_update(struct amt_dev *amt, @@ -1220,7 +1246,8 @@ static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev) /* Gateway only passes IGMP/MLD packets */ if (!report) goto free; - if ((!v6 && !amt->ready4) || (v6 && !amt->ready6)) + if ((!v6 && !READ_ONCE(amt->ready4)) || + (v6 && !READ_ONCE(amt->ready6))) goto free; if (amt_send_membership_update(amt, skb, v6)) goto free; @@ -2236,6 +2263,10 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb) ipv4_is_zeronet(amta->ip4)) return true; + if (amt->status != AMT_STATUS_SENT_DISCOVERY || + amt->nonce != amta->nonce) + return true; + amt->remote_ip = amta->ip4; netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip); mod_delayed_work(amt_wq, &amt->req_wq, 0); @@ -2251,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) struct ethhdr *eth; struct iphdr *iph; + if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE) + return true; + hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2325,6 +2359,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt, if (amtmq->reserved || amtmq->version) return true; + if (amtmq->nonce != amt->nonce) + return true; + hdr_size -= sizeof(*eth); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false)) return true; @@ -2339,6 +2376,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt, iph = ip_hdr(skb); if (iph->version == 4) { + if (READ_ONCE(amt->ready4)) + return true; + if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3))) return true; @@ -2349,12 +2389,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt, ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS); - spin_lock_bh(&amt->lock); - amt->ready4 = true; + WRITE_ONCE(amt->ready4, true); amt->mac = amtmq->response_mac; amt->req_cnt = 0; amt->qi = ihv3->qqic; - spin_unlock_bh(&amt->lock); skb->protocol = htons(ETH_P_IP); eth->h_proto = htons(ETH_P_IP); ip_eth_mc_map(iph->daddr, eth->h_dest); @@ -2363,6 +2401,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt, struct mld2_query *mld2q; struct ipv6hdr *ip6h; + if (READ_ONCE(amt->ready6)) + return true; + if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS + sizeof(*mld2q))) return true; @@ -2374,12 +2415,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt, mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); - spin_lock_bh(&amt->lock); - amt->ready6 = true; + WRITE_ONCE(amt->ready6, true); amt->mac = amtmq->response_mac; amt->req_cnt = 0; amt->qi = mld2q->mld2q_qqic; - spin_unlock_bh(&amt->lock); skb->protocol = htons(ETH_P_IPV6); eth->h_proto = htons(ETH_P_IPV6); ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); @@ -2392,12 +2431,14 @@ static bool amt_membership_query_handler(struct amt_dev *amt, skb->pkt_type = PACKET_MULTICAST; skb->ip_summed = CHECKSUM_NONE; len = skb->len; + local_bh_disable(); if (__netif_rx(skb) == NET_RX_SUCCESS) { amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true); dev_sw_netstats_rx_add(amt->dev, len); } else { amt->dev->stats.rx_dropped++; } + local_bh_enable(); return false; } @@ -2638,7 +2679,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) if (tunnel->ip4 == iph->saddr) goto send; + spin_lock_bh(&amt->lock); if (amt->nr_tunnels >= amt->max_tunnels) { + spin_unlock_bh(&amt->lock); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); return true; } @@ -2646,8 +2689,10 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) tunnel = kzalloc(sizeof(*tunnel) + (sizeof(struct hlist_head) * amt->hash_buckets), GFP_ATOMIC); - if (!tunnel) + if (!tunnel) { + spin_unlock_bh(&amt->lock); return true; + } tunnel->source_port = udph->source; tunnel->ip4 = iph->saddr; @@ -2660,10 +2705,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire); - spin_lock_bh(&amt->lock); list_add_tail_rcu(&tunnel->list, &amt->tunnel_list); tunnel->key = amt->key; - amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true); + __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true); amt->nr_tunnels++; mod_delayed_work(amt_wq, &tunnel->gc_wq, msecs_to_jiffies(amt_gmi(amt))); @@ -2688,6 +2732,38 @@ send: return false; } +static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb) +{ + int type = amt_parse_type(skb); + int err = 1; + + if (type == -1) + goto drop; + + if (amt->mode == AMT_MODE_GATEWAY) { + switch (type) { + case AMT_MSG_ADVERTISEMENT: + err = amt_advertisement_handler(amt, skb); + break; + case AMT_MSG_MEMBERSHIP_QUERY: + err = amt_membership_query_handler(amt, skb); + if (!err) + return; + break; + default: + netdev_dbg(amt->dev, "Invalid type of Gateway\n"); + break; + } + } +drop: + if (err) { + amt->dev->stats.rx_dropped++; + kfree_skb(skb); + } else { + consume_skb(skb); + } +} + static int amt_rcv(struct sock *sk, struct sk_buff *skb) { struct amt_dev *amt; @@ -2719,8 +2795,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) err = true; goto drop; } - err = amt_advertisement_handler(amt, skb); - break; + if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) { + netdev_dbg(amt->dev, "AMT Event queue full\n"); + err = true; + goto drop; + } + goto out; case AMT_MSG_MULTICAST_DATA: if (iph->saddr != amt->remote_ip) { netdev_dbg(amt->dev, "Invalid Relay IP\n"); @@ -2738,11 +2818,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) err = true; goto drop; } - err = amt_membership_query_handler(amt, skb); - if (err) + if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) { + netdev_dbg(amt->dev, "AMT Event queue full\n"); + err = true; goto drop; - else - goto out; + } + goto out; default: err = true; netdev_dbg(amt->dev, "Invalid type of Gateway\n"); @@ -2780,6 +2861,46 @@ out: return 0; } +static void amt_event_work(struct work_struct *work) +{ + struct amt_dev *amt = container_of(work, struct amt_dev, event_wq); + struct sk_buff *skb; + u8 event; + int i; + + for (i = 0; i < AMT_MAX_EVENTS; i++) { + spin_lock_bh(&amt->lock); + if (amt->nr_events == 0) { + spin_unlock_bh(&amt->lock); + return; + } + event = amt->events[amt->event_idx].event; + skb = amt->events[amt->event_idx].skb; + amt->events[amt->event_idx].event = AMT_EVENT_NONE; + amt->events[amt->event_idx].skb = NULL; + amt->nr_events--; + amt->event_idx++; + amt->event_idx %= AMT_MAX_EVENTS; + spin_unlock_bh(&amt->lock); + + switch (event) { + case AMT_EVENT_RECEIVE: + amt_gw_rcv(amt, skb); + break; + case AMT_EVENT_SEND_DISCOVERY: + amt_event_send_discovery(amt); + break; + case AMT_EVENT_SEND_REQUEST: + amt_event_send_request(amt); + break; + default: + if (skb) + kfree_skb(skb); + break; + } + } +} + static int amt_err_lookup(struct sock *sk, struct sk_buff *skb) { struct amt_dev *amt; @@ -2804,7 +2925,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb) break; case AMT_MSG_REQUEST: case AMT_MSG_MEMBERSHIP_UPDATE: - if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT) + if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT) mod_delayed_work(amt_wq, &amt->req_wq, 0); break; default: @@ -2867,6 +2988,8 @@ static int amt_dev_open(struct net_device *dev) amt->ready4 = false; amt->ready6 = false; + amt->event_idx = 0; + amt->nr_events = 0; err = amt_socket_create(amt); if (err) @@ -2874,6 +2997,7 @@ static int amt_dev_open(struct net_device *dev) amt->req_cnt = 0; amt->remote_ip = 0; + amt->nonce = 0; get_random_bytes(&amt->key, sizeof(siphash_key_t)); amt->status = AMT_STATUS_INIT; @@ -2892,6 +3016,8 @@ static int amt_dev_stop(struct net_device *dev) struct amt_dev *amt = netdev_priv(dev); struct amt_tunnel_list *tunnel, *tmp; struct socket *sock; + struct sk_buff *skb; + int i; cancel_delayed_work_sync(&amt->req_wq); cancel_delayed_work_sync(&amt->discovery_wq); @@ -2904,6 +3030,15 @@ static int amt_dev_stop(struct net_device *dev) if (sock) udp_tunnel_sock_release(sock); + cancel_work_sync(&amt->event_wq); + for (i = 0; i < AMT_MAX_EVENTS; i++) { + skb = amt->events[i].skb; + if (skb) + kfree_skb(skb); + amt->events[i].event = AMT_EVENT_NONE; + amt->events[i].skb = NULL; + } + amt->ready4 = false; amt->ready6 = false; amt->req_cnt = 0; @@ -3095,7 +3230,7 @@ static int amt_newlink(struct net *net, struct net_device *dev, goto err; } if (amt->mode == AMT_MODE_RELAY) { - amt->qrv = amt->net->ipv4.sysctl_igmp_qrv; + amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv); amt->qri = 10; dev->needed_headroom = amt->stream_dev->needed_headroom + AMT_RELAY_HLEN; @@ -3146,8 +3281,8 @@ static int amt_newlink(struct net *net, struct net_device *dev, INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work); INIT_DELAYED_WORK(&amt->req_wq, amt_req_work); INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work); + INIT_WORK(&amt->event_wq, amt_event_work); INIT_LIST_HEAD(&amt->tunnel_list); - return 0; err: dev_put(amt->stream_dev); @@ -3280,7 +3415,7 @@ static int __init amt_init(void) if (err < 0) goto unregister_notifier; - amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1); + amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0); if (!amt_wq) { err = -ENOMEM; goto rtnl_unregister; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a86b1f71762e..d7fb33c078e8 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2228,7 +2228,8 @@ void bond_3ad_unbind_slave(struct slave *slave) temp_aggregator->num_of_ports--; if (__agg_active_ports(temp_aggregator) == 0) { select_new_active_agg = temp_aggregator->is_active; - ad_clear_agg(temp_aggregator); + if (temp_aggregator->num_of_ports == 0) + ad_clear_agg(temp_aggregator); if (select_new_active_agg) { slave_info(bond->dev, slave->dev, "Removing an active aggregator\n"); /* select new active aggregator */ diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 303c8d32d451..007d43e46dcb 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1302,12 +1302,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled) return res; if (rlb_enabled) { - bond->alb_info.rlb_enabled = 1; res = rlb_initialize(bond); if (res) { tlb_deinitialize(bond); return res; } + bond->alb_info.rlb_enabled = 1; } else { bond->alb_info.rlb_enabled = 0; } diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 5458f57177a0..0b0f234b0b50 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -722,13 +722,21 @@ static int cfv_probe(struct virtio_device *vdev) /* Carrier is off until netdevice is opened */ netif_carrier_off(netdev); + /* serialize netdev register + virtio_device_ready() with ndo_open() */ + rtnl_lock(); + /* register Netdev */ - err = register_netdev(netdev); + err = register_netdevice(netdev); if (err) { + rtnl_unlock(); dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err); goto err; } + virtio_device_ready(vdev); + + rtnl_unlock(); + debugfs_init(cfv); return 0; diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 76df4807d366..4c47c1055eff 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1646,7 +1646,6 @@ static int grcan_probe(struct platform_device *ofdev) */ sysid_parent = of_find_node_by_path("/ambapp0"); if (sysid_parent) { - of_node_get(sysid_parent); err = of_property_read_u32(sysid_parent, "systemid", &sysid); if (!err && ((sysid & GRLIB_VERSION_MASK) >= GRCAN_TXBUG_SAFE_GRLIB_VERSION)) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 5d0c82d8b9a9..7931f9c71ef3 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -529,7 +529,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs) /* acknowledge rx fifo 0 */ m_can_write(cdev, M_CAN_RXF0A, fgi); - timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc); + timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16; m_can_receive_skb(cdev, skb, timestamp); @@ -1030,7 +1030,7 @@ static int m_can_echo_tx_event(struct net_device *dev) } msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe); - timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe); + timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16; /* ack txe element */ m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK, @@ -1351,7 +1351,9 @@ static void m_can_chip_config(struct net_device *dev) /* enable internal timestamp generation, with a prescalar of 16. The * prescalar is applied to the nominal bit timing */ - m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf)); + m_can_write(cdev, M_CAN_TSCC, + FIELD_PREP(TSCC_TCP_MASK, 0xf) | + FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL)); m_can_config_endisable(cdev, false); diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 40a11445d021..cb0321ea853c 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1332,7 +1332,10 @@ static void rcar_canfd_set_bittiming(struct net_device *dev) cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) | RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2)); - rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg); + if (is_v3u(gpriv)) + rcar_canfd_write(priv->base, RCANFD_V3U_DCFG(ch), cfg); + else + rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg); netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", brp, sjw, tseg1, tseg2); } else { @@ -1840,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) of_child = of_get_child_by_name(pdev->dev.of_node, name); if (of_child && of_device_is_available(of_child)) channels_mask |= BIT(i); + of_node_put(of_child); } if (chip_id != RENESAS_RZG2L) { diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index b21252390216..bc6518504fd4 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -12,6 +12,7 @@ // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> // +#include <asm/unaligned.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/device.h> @@ -1650,6 +1651,7 @@ static int mcp251xfd_stop(struct net_device *ndev) netif_stop_queue(ndev); set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); hrtimer_cancel(&priv->rx_irq_timer); + hrtimer_cancel(&priv->tx_irq_timer); mcp251xfd_chip_interrupts_disable(priv); free_irq(ndev->irq, priv); can_rx_offload_disable(&priv->offload); @@ -1688,8 +1690,8 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) u32 osc; int err; - /* The OSC_LPMEN is only supported on MCP2518FD, so use it to - * autodetect the model. + /* The OSC_LPMEN is only supported on MCP2518FD and MCP251863, + * so use it to autodetect the model. */ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC, MCP251XFD_REG_OSC_LPMEN, @@ -1701,10 +1703,18 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) if (err) return err; - if (osc & MCP251XFD_REG_OSC_LPMEN) - devtype_data = &mcp251xfd_devtype_data_mcp2518fd; - else + if (osc & MCP251XFD_REG_OSC_LPMEN) { + /* We cannot distinguish between MCP2518FD and + * MCP251863. If firmware specifies MCP251863, keep + * it, otherwise set to MCP2518FD. + */ + if (mcp251xfd_is_251863(priv)) + devtype_data = &mcp251xfd_devtype_data_mcp251863; + else + devtype_data = &mcp251xfd_devtype_data_mcp2518fd; + } else { devtype_data = &mcp251xfd_devtype_data_mcp2517fd; + } if (!mcp251xfd_is_251XFD(priv) && priv->devtype_data.model != devtype_data->model) { @@ -1777,7 +1787,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id, xfer[0].len = sizeof(buf_tx->cmd); xfer[0].speed_hz = priv->spi_max_speed_hz_slow; xfer[1].rx_buf = buf_rx->data; - xfer[1].len = sizeof(dev_id); + xfer[1].len = sizeof(*dev_id); xfer[1].speed_hz = priv->spi_max_speed_hz_fast; mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID); @@ -1786,7 +1796,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id, if (err) goto out_kfree_buf_tx; - *dev_id = be32_to_cpup((__be32 *)buf_rx->data); + *dev_id = get_unaligned_le32(buf_rx->data); *effective_speed_hz_slow = xfer[0].effective_speed_hz; *effective_speed_hz_fast = xfer[1].effective_speed_hz; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c index 217510c12af5..92b7bc7f14b9 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c @@ -334,19 +334,21 @@ mcp251xfd_regmap_crc_read(void *context, * register. It increments once per SYS clock tick, * which is 20 or 40 MHz. * - * Observation shows that if the lowest byte (which is - * transferred first on the SPI bus) of that register - * is 0x00 or 0x80 the calculated CRC doesn't always - * match the transferred one. + * Observation on the mcp2518fd shows that if the + * lowest byte (which is transferred first on the SPI + * bus) of that register is 0x00 or 0x80 the + * calculated CRC doesn't always match the transferred + * one. On the mcp2517fd this problem is not limited + * to the first byte being 0x00 or 0x80. * * If the highest bit in the lowest byte is flipped * the transferred CRC matches the calculated one. We - * assume for now the CRC calculation in the chip - * works on wrong data and the transferred data is - * correct. + * assume for now the CRC operates on the correct + * data. */ if (reg == MCP251XFD_REG_TBC && - (buf_rx->data[0] == 0x0 || buf_rx->data[0] == 0x80)) { + ((buf_rx->data[0] & 0xf8) == 0x0 || + (buf_rx->data[0] & 0xf8) == 0x80)) { /* Flip highest bit in lowest byte of le32 */ buf_rx->data[0] ^= 0x80; @@ -356,10 +358,8 @@ mcp251xfd_regmap_crc_read(void *context, val_len); if (!err) { /* If CRC is now correct, assume - * transferred data was OK, flip bit - * back to original value. + * flipped data is OK. */ - buf_rx->data[0] ^= 0x80; goto out; } } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index b29ba9138866..d3a658b444b5 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -268,6 +268,8 @@ struct gs_can { struct usb_anchor tx_submitted; atomic_t active_tx_urbs; + void *rxbuf[GS_MAX_RX_URBS]; + dma_addr_t rxbuf_dma[GS_MAX_RX_URBS]; }; /* usb interface struct */ @@ -742,6 +744,7 @@ static int gs_can_open(struct net_device *netdev) for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; + dma_addr_t buf_dma; /* alloc rx urb */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -752,7 +755,7 @@ static int gs_can_open(struct net_device *netdev) buf = usb_alloc_coherent(dev->udev, dev->parent->hf_size_rx, GFP_KERNEL, - &urb->transfer_dma); + &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); @@ -760,6 +763,8 @@ static int gs_can_open(struct net_device *netdev) return -ENOMEM; } + urb->transfer_dma = buf_dma; + /* fill, anchor, and submit rx urb */ usb_fill_bulk_urb(urb, dev->udev, @@ -781,10 +786,17 @@ static int gs_can_open(struct net_device *netdev) "usb_submit failed (err=%d)\n", rc); usb_unanchor_urb(urb); + usb_free_coherent(dev->udev, + sizeof(struct gs_host_frame), + buf, + buf_dma); usb_free_urb(urb); break; } + dev->rxbuf[i] = buf; + dev->rxbuf_dma[i] = buf_dma; + /* Drop reference, * USB core will take care of freeing it */ @@ -842,13 +854,20 @@ static int gs_can_close(struct net_device *netdev) int rc; struct gs_can *dev = netdev_priv(netdev); struct gs_usb *parent = dev->parent; + unsigned int i; netif_stop_queue(netdev); /* Stop polling */ parent->active_channels--; - if (!parent->active_channels) + if (!parent->active_channels) { usb_kill_anchored_urbs(&parent->rx_submitted); + for (i = 0; i < GS_MAX_RX_URBS; i++) + usb_free_coherent(dev->udev, + sizeof(struct gs_host_frame), + dev->rxbuf[i], + dev->rxbuf_dma[i]); + } /* Stop sending URBs */ usb_kill_anchored_urbs(&dev->tx_submitted); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 3a49257f9fa6..eefcbe3aadce 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -35,9 +35,10 @@ #define KVASER_USB_RX_BUFFER_SIZE 3072 #define KVASER_USB_MAX_NET_DEVICES 5 -/* USB devices features */ -#define KVASER_USB_HAS_SILENT_MODE BIT(0) -#define KVASER_USB_HAS_TXRX_ERRORS BIT(1) +/* Kvaser USB device quirks */ +#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0) +#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1) +#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2) /* Device capabilities */ #define KVASER_USB_CAP_BERR_CAP 0x01 @@ -65,12 +66,7 @@ struct kvaser_usb_dev_card_data_hydra { struct kvaser_usb_dev_card_data { u32 ctrlmode_supported; u32 capabilities; - union { - struct { - enum kvaser_usb_leaf_family family; - } leaf; - struct kvaser_usb_dev_card_data_hydra hydra; - }; + struct kvaser_usb_dev_card_data_hydra hydra; }; /* Context for an outstanding, not yet ACKed, transmission */ @@ -83,7 +79,7 @@ struct kvaser_usb { struct usb_device *udev; struct usb_interface *intf; struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES]; - const struct kvaser_usb_dev_ops *ops; + const struct kvaser_usb_driver_info *driver_info; const struct kvaser_usb_dev_cfg *cfg; struct usb_endpoint_descriptor *bulk_in, *bulk_out; @@ -165,6 +161,12 @@ struct kvaser_usb_dev_ops { u16 transid); }; +struct kvaser_usb_driver_info { + u32 quirks; + enum kvaser_usb_leaf_family family; + const struct kvaser_usb_dev_ops *ops; +}; + struct kvaser_usb_dev_cfg { const struct can_clock clock; const unsigned int timestamp_freq; @@ -184,4 +186,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, int len); int kvaser_usb_can_rx_over_error(struct net_device *netdev); + +extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const; + #endif /* KVASER_USB_H */ diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index e67658b53d02..f211bfcb1d97 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -61,8 +61,6 @@ #define USB_USBCAN_R_V2_PRODUCT_ID 294 #define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 295 #define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 296 -#define USB_LEAF_PRODUCT_ID_END \ - USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID /* Kvaser USBCan-II devices product ids */ #define USB_USBCAN_REVB_PRODUCT_ID 2 @@ -89,116 +87,153 @@ #define USB_USBCAN_PRO_4HS_PRODUCT_ID 276 #define USB_HYBRID_CANLIN_PRODUCT_ID 277 #define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278 -#define USB_HYDRA_PRODUCT_ID_END \ - USB_HYBRID_PRO_CANLIN_PRODUCT_ID -static inline bool kvaser_is_leaf(const struct usb_device_id *id) -{ - return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && - id->idProduct <= USB_CAN_R_PRODUCT_ID) || - (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID && - id->idProduct <= USB_LEAF_PRODUCT_ID_END); -} +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = { + .quirks = 0, + .ops = &kvaser_usb_hydra_dev_ops, +}; -static inline bool kvaser_is_usbcan(const struct usb_device_id *id) -{ - return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID && - id->idProduct <= USB_MEMORATOR_PRODUCT_ID; -} +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = { + .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | + KVASER_USB_QUIRK_HAS_SILENT_MODE, + .family = KVASER_USBCAN, + .ops = &kvaser_usb_leaf_dev_ops, +}; -static inline bool kvaser_is_hydra(const struct usb_device_id *id) -{ - return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID && - id->idProduct <= USB_HYDRA_PRODUCT_ID_END; -} +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = { + .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = { + .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | + KVASER_USB_QUIRK_IGNORE_CLK_FREQ, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = { + .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | + KVASER_USB_QUIRK_HAS_SILENT_MODE | + KVASER_USB_QUIRK_IGNORE_CLK_FREQ, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = { + .quirks = 0, + .ops = &kvaser_usb_leaf_dev_ops, +}; static const struct usb_device_id kvaser_usb_table[] = { - /* Leaf USB product IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) }, + /* Leaf M32C USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID) }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, + + /* Leaf i.MX28 USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, /* USBCANII USB product IDs */ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, /* Minihydra USB product IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); @@ -285,6 +320,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev) static void kvaser_usb_read_bulk_callback(struct urb *urb) { struct kvaser_usb *dev = urb->context; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; unsigned int i; @@ -301,8 +337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) goto resubmit_urb; } - dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer, - urb->actual_length); + ops->dev_read_bulk_callback(dev, urb->transfer_buffer, + urb->actual_length); resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, @@ -396,6 +432,7 @@ static int kvaser_usb_open(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; err = open_candev(netdev); @@ -406,11 +443,11 @@ static int kvaser_usb_open(struct net_device *netdev) if (err) goto error; - err = dev->ops->dev_set_opt_mode(priv); + err = ops->dev_set_opt_mode(priv); if (err) goto error; - err = dev->ops->dev_start_chip(priv); + err = ops->dev_start_chip(priv); if (err) { netdev_warn(netdev, "Cannot start device, error %d\n", err); goto error; @@ -467,22 +504,23 @@ static int kvaser_usb_close(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; netif_stop_queue(netdev); - err = dev->ops->dev_flush_queue(priv); + err = ops->dev_flush_queue(priv); if (err) netdev_warn(netdev, "Cannot flush queue, error %d\n", err); - if (dev->ops->dev_reset_chip) { - err = dev->ops->dev_reset_chip(dev, priv->channel); + if (ops->dev_reset_chip) { + err = ops->dev_reset_chip(dev, priv->channel); if (err) netdev_warn(netdev, "Cannot reset card, error %d\n", err); } - err = dev->ops->dev_stop_chip(priv); + err = ops->dev_stop_chip(priv); if (err) netdev_warn(netdev, "Cannot stop device, error %d\n", err); @@ -521,6 +559,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; struct net_device_stats *stats = &netdev->stats; struct kvaser_usb_tx_urb_context *context = NULL; struct urb *urb; @@ -563,8 +602,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, goto freeurb; } - buf = dev->ops->dev_frame_to_cmd(priv, skb, &cmd_len, - context->echo_index); + buf = ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index); if (!buf) { stats->tx_dropped++; dev_kfree_skb(skb); @@ -648,15 +686,16 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) } } -static int kvaser_usb_init_one(struct kvaser_usb *dev, - const struct usb_device_id *id, int channel) +static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) { struct net_device *netdev; struct kvaser_usb_net_priv *priv; + const struct kvaser_usb_driver_info *driver_info = dev->driver_info; + const struct kvaser_usb_dev_ops *ops = driver_info->ops; int err; - if (dev->ops->dev_reset_chip) { - err = dev->ops->dev_reset_chip(dev, channel); + if (ops->dev_reset_chip) { + err = ops->dev_reset_chip(dev, channel); if (err) return err; } @@ -685,20 +724,19 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = dev->cfg->clock.freq; priv->can.bittiming_const = dev->cfg->bittiming_const; - priv->can.do_set_bittiming = dev->ops->dev_set_bittiming; - priv->can.do_set_mode = dev->ops->dev_set_mode; - if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) || + priv->can.do_set_bittiming = ops->dev_set_bittiming; + priv->can.do_set_mode = ops->dev_set_mode; + if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) || (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) - priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter; - if (id->driver_info & KVASER_USB_HAS_SILENT_MODE) + priv->can.do_get_berr_counter = ops->dev_get_berr_counter; + if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE) priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported; if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; - priv->can.do_set_data_bittiming = - dev->ops->dev_set_data_bittiming; + priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming; } netdev->flags |= IFF_ECHO; @@ -729,29 +767,22 @@ static int kvaser_usb_probe(struct usb_interface *intf, struct kvaser_usb *dev; int err; int i; + const struct kvaser_usb_driver_info *driver_info; + const struct kvaser_usb_dev_ops *ops; + + driver_info = (const struct kvaser_usb_driver_info *)id->driver_info; + if (!driver_info) + return -ENODEV; dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; - if (kvaser_is_leaf(id)) { - dev->card_data.leaf.family = KVASER_LEAF; - dev->ops = &kvaser_usb_leaf_dev_ops; - } else if (kvaser_is_usbcan(id)) { - dev->card_data.leaf.family = KVASER_USBCAN; - dev->ops = &kvaser_usb_leaf_dev_ops; - } else if (kvaser_is_hydra(id)) { - dev->ops = &kvaser_usb_hydra_dev_ops; - } else { - dev_err(&intf->dev, - "Product ID (%d) is not a supported Kvaser USB device\n", - id->idProduct); - return -ENODEV; - } - dev->intf = intf; + dev->driver_info = driver_info; + ops = driver_info->ops; - err = dev->ops->dev_setup_endpoints(dev); + err = ops->dev_setup_endpoints(dev); if (err) { dev_err(&intf->dev, "Cannot get usb endpoint(s)"); return err; @@ -765,22 +796,22 @@ static int kvaser_usb_probe(struct usb_interface *intf, dev->card_data.ctrlmode_supported = 0; dev->card_data.capabilities = 0; - err = dev->ops->dev_init_card(dev); + err = ops->dev_init_card(dev); if (err) { dev_err(&intf->dev, "Failed to initialize card, error %d\n", err); return err; } - err = dev->ops->dev_get_software_info(dev); + err = ops->dev_get_software_info(dev); if (err) { dev_err(&intf->dev, "Cannot get software info, error %d\n", err); return err; } - if (dev->ops->dev_get_software_details) { - err = dev->ops->dev_get_software_details(dev); + if (ops->dev_get_software_details) { + err = ops->dev_get_software_details(dev); if (err) { dev_err(&intf->dev, "Cannot get software details, error %d\n", err); @@ -798,14 +829,14 @@ static int kvaser_usb_probe(struct usb_interface *intf, dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs); - err = dev->ops->dev_get_card_info(dev); + err = ops->dev_get_card_info(dev); if (err) { dev_err(&intf->dev, "Cannot get card info, error %d\n", err); return err; } - if (dev->ops->dev_get_capabilities) { - err = dev->ops->dev_get_capabilities(dev); + if (ops->dev_get_capabilities) { + err = ops->dev_get_capabilities(dev); if (err) { dev_err(&intf->dev, "Cannot get capabilities, error %d\n", err); @@ -815,7 +846,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, } for (i = 0; i < dev->nchannels; i++) { - err = kvaser_usb_init_one(dev, id, i); + err = kvaser_usb_init_one(dev, i); if (err) { kvaser_usb_remove_interfaces(dev); return err; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index a26823c5b62a..5d70844ac030 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -375,7 +375,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { .brp_inc = 1, }; -static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = { +const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = { .name = "kvaser_usb_flex", .tseg1_min = 4, .tseg1_max = 16, @@ -2052,7 +2052,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { .freq = 24 * MEGA /* Hz */, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = { diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index c805b999c543..cc809ecd1e62 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -101,16 +101,6 @@ #define USBCAN_ERROR_STATE_RX_ERROR BIT(1) #define USBCAN_ERROR_STATE_BUSERROR BIT(2) -/* bittiming parameters */ -#define KVASER_USB_TSEG1_MIN 1 -#define KVASER_USB_TSEG1_MAX 16 -#define KVASER_USB_TSEG2_MIN 1 -#define KVASER_USB_TSEG2_MAX 8 -#define KVASER_USB_SJW_MAX 4 -#define KVASER_USB_BRP_MIN 1 -#define KVASER_USB_BRP_MAX 64 -#define KVASER_USB_BRP_INC 1 - /* ctrl modes */ #define KVASER_CTRL_MODE_NORMAL 1 #define KVASER_CTRL_MODE_SILENT 2 @@ -343,48 +333,68 @@ struct kvaser_usb_err_summary { }; }; -static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { - .name = "kvaser_usb", - .tseg1_min = KVASER_USB_TSEG1_MIN, - .tseg1_max = KVASER_USB_TSEG1_MAX, - .tseg2_min = KVASER_USB_TSEG2_MIN, - .tseg2_max = KVASER_USB_TSEG2_MAX, - .sjw_max = KVASER_USB_SJW_MAX, - .brp_min = KVASER_USB_BRP_MIN, - .brp_max = KVASER_USB_BRP_MAX, - .brp_inc = KVASER_USB_BRP_INC, +static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = { + .name = "kvaser_usb_ucii", + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 16, + .brp_inc = 1, +}; + +static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = { + .name = "kvaser_usb_leaf", + .tseg1_min = 3, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 2, + .brp_max = 128, + .brp_inc = 2, }; -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = { +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { .clock = { .freq = 8 * MEGA /* Hz */, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, + .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = { + .clock = { + .freq = 16 * MEGA /* Hz */, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, }; -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = { +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = { .clock = { .freq = 16 * MEGA /* Hz */, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = { +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { .clock = { .freq = 24 * MEGA /* Hz */, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = { +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { .clock = { .freq = 32 * MEGA /* Hz */, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; static void * @@ -404,7 +414,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, sizeof(struct kvaser_cmd_tx_can); cmd->u.tx_can.channel = priv->channel; - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags; break; @@ -524,16 +534,23 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, dev->fw_version = le32_to_cpu(softinfo->fw_version); dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); - switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { - case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: - dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz; - break; - case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: - dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz; - break; - case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: - dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz; - break; + if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { + /* Firmware expects bittiming parameters calculated for 16MHz + * clock, regardless of the actual clock + */ + dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg; + } else { + switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { + case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz; + break; + } } } @@ -550,7 +567,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) if (err) return err; - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo); break; @@ -558,7 +575,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); dev->max_tx_urbs = le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); - dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz; + dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg; break; } @@ -597,7 +614,7 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) dev->nchannels = cmd.u.cardinfo.nchannels; if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES || - (dev->card_data.leaf.family == KVASER_USBCAN && + (dev->driver_info->family == KVASER_USBCAN && dev->nchannels > MAX_USBCAN_NET_DEVICES)) return -EINVAL; @@ -730,7 +747,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { priv->can.can_stats.bus_error++; @@ -809,7 +826,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } } - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; @@ -999,7 +1016,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, stats = &priv->netdev->stats; if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && - (dev->card_data.leaf.family == KVASER_LEAF && + (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE)) { kvaser_usb_leaf_leaf_rx_error(dev, cmd); return; @@ -1015,7 +1032,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, return; } - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: rx_data = cmd->u.leaf.rx_can.data; break; @@ -1030,7 +1047,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, return; } - if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id == + if (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE) { cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id); if (cf->can_id & KVASER_EXTENDED_FRAME) @@ -1128,14 +1145,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, break; case CMD_LEAF_LOG_MESSAGE: - if (dev->card_data.leaf.family != KVASER_LEAF) + if (dev->driver_info->family != KVASER_LEAF) goto warn; kvaser_usb_leaf_rx_can_msg(dev, cmd); break; case CMD_CHIP_STATE_EVENT: case CMD_CAN_ERROR_EVENT: - if (dev->card_data.leaf.family == KVASER_LEAF) + if (dev->driver_info->family == KVASER_LEAF) kvaser_usb_leaf_leaf_rx_error(dev, cmd); else kvaser_usb_leaf_usbcan_rx_error(dev, cmd); @@ -1147,12 +1164,12 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, /* Ignored commands */ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: - if (dev->card_data.leaf.family != KVASER_USBCAN) + if (dev->driver_info->family != KVASER_USBCAN) goto warn; break; case CMD_FLUSH_QUEUE_REPLY: - if (dev->card_data.leaf.family != KVASER_LEAF) + if (dev->driver_info->family != KVASER_LEAF) goto warn; break; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 8a3b7b103ca4..e179d311aa28 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -258,7 +258,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { .tseg2_min = 1, .tseg2_max = 128, .sjw_max = 128, - .brp_min = 2, + .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; @@ -271,7 +271,7 @@ static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { .tseg2_min = 1, .tseg2_max = 16, .sjw_max = 16, - .brp_min = 2, + .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 87e81c636339..be0edfa093d0 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -878,6 +878,11 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, if (duplex == DUPLEX_FULL) reg |= DUPLX_MODE; + if (tx_pause) + reg |= TXFLOW_CNTL; + if (rx_pause) + reg |= RXFLOW_CNTL; + core_writel(priv, reg, offset); } diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c index 2572c6087bb5..b28baab6d56a 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c +++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c @@ -300,6 +300,7 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek) const char *label, *state; int ret = -EINVAL; + of_node_get(hellcreek->dev->of_node); leds = of_find_node_by_name(hellcreek->dev->of_node, "leds"); if (!leds) { dev_err(hellcreek->dev, "No LEDs specified in device tree!\n"); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 9ca8c8d7740f..92a500e1ccd2 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1038,18 +1038,21 @@ int ksz_switch_register(struct ksz_device *dev, ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); if (!ports) ports = of_get_child_by_name(dev->dev->of_node, "ports"); - if (ports) + if (ports) { for_each_available_child_of_node(ports, port) { if (of_property_read_u32(port, "reg", &port_num)) continue; if (!(dev->port_mask & BIT(port_num))) { of_node_put(port); + of_node_put(ports); return -EINVAL; } of_get_phy_mode(port, &dev->ports[port_num].interface); } + of_node_put(ports); + } dev->synclko_125 = of_property_read_bool(dev->dev->of_node, "microchip,synclko-125"); dev->synclko_disable = of_property_read_bool(dev->dev->of_node, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 570d0204b7be..9c27b9b0128d 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1886,6 +1886,8 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot, static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index, struct felix_stream_filter_counters *counters) { + mutex_lock(&ocelot->stats_lock); + ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index), SYS_STAT_CFG_STAT_VIEW_M, SYS_STAT_CFG); @@ -1900,6 +1902,8 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index, SYS_STAT_CFG_STAT_VIEW(index) | SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10), SYS_STAT_CFG); + + mutex_unlock(&ocelot->stats_lock); } static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 72b6fc1932b5..698c7d1fb45c 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3382,12 +3382,28 @@ static const struct of_device_id sja1105_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, sja1105_dt_ids); +static const struct spi_device_id sja1105_spi_ids[] = { + { "sja1105e" }, + { "sja1105t" }, + { "sja1105p" }, + { "sja1105q" }, + { "sja1105r" }, + { "sja1105s" }, + { "sja1110a" }, + { "sja1110b" }, + { "sja1110c" }, + { "sja1110d" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, sja1105_spi_ids); + static struct spi_driver sja1105_driver = { .driver = { .name = "sja1105", .owner = THIS_MODULE, .of_match_table = of_match_ptr(sja1105_dt_ids), }, + .id_table = sja1105_spi_ids, .probe = sja1105_probe, .remove = sja1105_remove, .shutdown = sja1105_shutdown, diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c index 3110895358d8..97a92e6da60d 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-spi.c +++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c @@ -205,10 +205,20 @@ static const struct of_device_id vsc73xx_of_match[] = { }; MODULE_DEVICE_TABLE(of, vsc73xx_of_match); +static const struct spi_device_id vsc73xx_spi_ids[] = { + { "vsc7385" }, + { "vsc7388" }, + { "vsc7395" }, + { "vsc7398" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids); + static struct spi_driver vsc73xx_spi_driver = { .probe = vsc73xx_spi_probe, .remove = vsc73xx_spi_remove, .shutdown = vsc73xx_spi_shutdown, + .id_table = vsc73xx_spi_ids, .driver = { .name = "vsc73xx-spi", .of_match_table = vsc73xx_of_match, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 831833911a52..8647125d60ae 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -379,7 +379,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev) } } -static int aq_suspend_common(struct device *dev, bool deep) +static int aq_suspend_common(struct device *dev) { struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev)); @@ -392,17 +392,15 @@ static int aq_suspend_common(struct device *dev, bool deep) if (netif_running(nic->ndev)) aq_nic_stop(nic); - if (deep) { - aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); - aq_nic_set_power(nic); - } + aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); + aq_nic_set_power(nic); rtnl_unlock(); return 0; } -static int atl_resume_common(struct device *dev, bool deep) +static int atl_resume_common(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct aq_nic_s *nic; @@ -415,11 +413,6 @@ static int atl_resume_common(struct device *dev, bool deep) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - if (deep) { - /* Reinitialize Nic/Vecs objects */ - aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); - } - if (netif_running(nic->ndev)) { ret = aq_nic_init(nic); if (ret) @@ -444,22 +437,22 @@ err_exit: static int aq_pm_freeze(struct device *dev) { - return aq_suspend_common(dev, true); + return aq_suspend_common(dev); } static int aq_pm_suspend_poweroff(struct device *dev) { - return aq_suspend_common(dev, true); + return aq_suspend_common(dev); } static int aq_pm_thaw(struct device *dev) { - return atl_resume_common(dev, true); + return atl_resume_common(dev); } static int aq_pm_resume_restore(struct device *dev) { - return atl_resume_common(dev, true); + return atl_resume_common(dev); } static const struct dev_pm_ops aq_pm_ops = { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 56b46b8206a7..cf9b00576ed3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7790,7 +7790,7 @@ hwrm_dbg_qcaps_exit: static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); -static int bnxt_hwrm_func_qcaps(struct bnxt *bp) +int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc; @@ -10065,7 +10065,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) resc_reinit = true; - if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || + test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) fw_reset = true; else bnxt_remap_fw_health_regs(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a1dca8c58f54..075c6206325c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2314,6 +2314,7 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset); int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); +int bnxt_hwrm_func_qcaps(struct bnxt *bp); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 3528ce9849e6..6b3d4f4c2a75 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -979,9 +979,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, if (rc) return rc; - rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH); - if (rc) - return rc; + if (BNXT_CHIP_P5(bp)) { + rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH); + if (rc) + return rc; + } return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 562f8f68a47d..7f3c0875b6f5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -76,14 +76,23 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts, u64 *ns) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 high_before, high_now, low; if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return -EIO; + high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]); ptp_read_system_prets(sts); - *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); + low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); ptp_read_system_postts(sts); - *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32; + high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]); + if (high_now != high_before) { + ptp_read_system_prets(sts); + low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); + ptp_read_system_postts(sts); + } + *ns = ((u64)high_now << 32) | low; + return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index ddf2f3963abe..a1a2c7a64fd5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -823,8 +823,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) goto err_out2; rc = pci_enable_sriov(bp->pdev, *num_vfs); - if (rc) + if (rc) { + bnxt_ulp_sriov_cfg(bp, 0); goto err_out2; + } return 0; @@ -832,6 +834,9 @@ err_out2: /* Free the resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); + /* Restore the max resources */ + bnxt_hwrm_func_qcaps(bp); + err_out1: bnxt_free_vf_resources(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index f02fe906dedb..f53387ed0167 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -28,7 +28,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct xdp_buff *xdp) { struct skb_shared_info *sinfo; - struct bnxt_sw_tx_bd *tx_buf, *first_buf; + struct bnxt_sw_tx_bd *tx_buf; struct tx_bd *txbd; int num_frags = 0; u32 flags; @@ -43,13 +43,14 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, /* fill up the first buffer */ prod = txr->tx_prod; tx_buf = &txr->tx_buf_ring[prod]; - first_buf = tx_buf; tx_buf->nr_frags = num_frags; if (xdp) tx_buf->page = virt_to_head_page(xdp->data); txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; - flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT); + flags = (len << TX_BD_LEN_SHIFT) | + ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | + bnxt_lhint_arr[len >> 9]; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd->tx_bd_opaque = prod; txbd->tx_bd_haddr = cpu_to_le64(mapping); @@ -82,7 +83,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, flags = frag_len << TX_BD_LEN_SHIFT; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); - txbd->tx_bd_opaque = prod; txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); len = frag_len; @@ -96,7 +96,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, prod = NEXT_TX(prod); txr->tx_prod = prod; - return first_buf; + return tx_buf; } static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index 4af5561cbfc5..ddfe9208529a 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1236,8 +1236,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, csk->sndbuf = newsk->sk_sndbuf; csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), - sock_net(newsk)-> - ipv4.sysctl_tcp_window_scaling, + READ_ONCE(sock_net(newsk)-> + ipv4.sysctl_tcp_window_scaling), tp->window_clamp); neigh_release(n); inet_inherit_port(&tcp_hashinfo, lsk, newsk); @@ -1384,7 +1384,7 @@ static void chtls_pass_accept_request(struct sock *sk, #endif } if (req->tcpopt.wsf <= 14 && - sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { inet_rsk(oreq)->wscale_ok = 1; inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf; } @@ -1392,7 +1392,7 @@ static void chtls_pass_accept_request(struct sock *sk, th_ecn = tcph->ece && tcph->cwr; if (th_ecn) { ect = !INET_ECN_is_not_ect(ip_dsfield); - ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn; + ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn); if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk)) inet_rsk(oreq)->ecn_ok = 1; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 528eb0f223b1..b4f5e57d0285 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2287,7 +2287,7 @@ err: /* Uses sync mcc */ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, - u8 page_num, u8 *data) + u8 page_num, u32 off, u32 len, u8 *data) { struct be_dma_mem cmd; struct be_mcc_wrb *wrb; @@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, req->port = cpu_to_le32(adapter->hba_port_num); req->page_num = cpu_to_le32(page_num); status = be_mcc_notify_wait(adapter); - if (!status) { + if (!status && len > 0) { struct be_cmd_resp_port_type *resp = cmd.va; - memcpy(data, resp->page_data, PAGE_DATA_LEN); + memcpy(data, resp->page_data + off, len); } err: mutex_unlock(&adapter->mcc_lock); @@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter) int status; status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - page_data); + 0, PAGE_DATA_LEN, page_data); if (!status) { switch (adapter->phy.interface_type) { case PHY_TYPE_QSFP: @@ -2440,7 +2440,7 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter) int status; status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - page_data); + 0, PAGE_DATA_LEN, page_data); if (!status) { strlcpy(adapter->phy.vendor_name, page_data + SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index db1f3b908582..e2085c68c0ee 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state); int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, - u8 page_num, u8 *data); + u8 page_num, u32 off, u32 len, u8 *data); int be_cmd_query_cable_type(struct be_adapter *adapter); int be_cmd_query_sfp_info(struct be_adapter *adapter); int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index dfa784339781..bd0df189d871 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1344,7 +1344,7 @@ static int be_get_module_info(struct net_device *netdev, return -EOPNOTSUPP; status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - page_data); + 0, PAGE_DATA_LEN, page_data); if (!status) { if (!page_data[SFP_PLUS_SFF_8472_COMP]) { modinfo->type = ETH_MODULE_SFF_8079; @@ -1362,25 +1362,32 @@ static int be_get_module_eeprom(struct net_device *netdev, { struct be_adapter *adapter = netdev_priv(netdev); int status; + u32 begin, end; if (!check_privilege(adapter, MAX_PRIVILEGES)) return -EOPNOTSUPP; - status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - data); - if (status) - goto err; + begin = eeprom->offset; + end = eeprom->offset + eeprom->len; + + if (begin < PAGE_DATA_LEN) { + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin, + min_t(u32, end, PAGE_DATA_LEN) - begin, + data); + if (status) + goto err; + + data += PAGE_DATA_LEN - begin; + begin = PAGE_DATA_LEN; + } - if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) { - status = be_cmd_read_port_transceiver_data(adapter, - TR_PAGE_A2, - data + - PAGE_DATA_LEN); + if (end > PAGE_DATA_LEN) { + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2, + begin - PAGE_DATA_LEN, + end - begin, data); if (status) goto err; } - if (eeprom->offset) - memcpy(data, data + eeprom->offset, eeprom->len); err: return be_cmd_status(status); } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 5231818943c6..c03663785a8d 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1764,6 +1764,19 @@ cleanup_clk: return rc; } +static bool ftgmac100_has_child_node(struct device_node *np, const char *name) +{ + struct device_node *child_np = of_get_child_by_name(np, name); + bool ret = false; + + if (child_np) { + ret = true; + of_node_put(child_np); + } + + return ret; +} + static int ftgmac100_probe(struct platform_device *pdev) { struct resource *res; @@ -1883,7 +1896,7 @@ static int ftgmac100_probe(struct platform_device *pdev) /* Display what we found */ phy_attached_info(phy); - } else if (np && !of_get_child_by_name(np, "mdio")) { + } else if (np && !ftgmac100_has_child_node(np, "mdio")) { /* Support legacy ASPEED devicetree descriptions that decribe a * MAC with an embedded MDIO controller but have no "mdio" * child node. Automatically scan the MDIO bus for available diff --git a/drivers/net/ethernet/fungible/funeth/funeth_rx.c b/drivers/net/ethernet/fungible/funeth/funeth_rx.c index 0f6a549b9f67..29a6c2ede43a 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_rx.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_rx.c @@ -142,6 +142,7 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va, int ref_ok, struct funeth_txq *xdp_q) { struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; struct xdp_buff xdp; u32 act; @@ -163,7 +164,9 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va, case XDP_TX: if (unlikely(!ref_ok)) goto pass; - if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data)) + + xdpf = xdp_convert_buff_to_frame(&xdp); + if (!xdpf || !fun_xdp_tx(xdp_q, xdpf)) goto xdp_error; FUN_QSTAT_INC(q, xdp_tx); q->xdp_flush |= FUN_XDP_FLUSH_TX; diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c index ff6e29237253..2f6698b98b03 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_tx.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c @@ -466,7 +466,7 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget) do { fun_xdp_unmap(q, reclaim_idx); - page_frag_free(q->info[reclaim_idx].vaddr); + xdp_return_frame(q->info[reclaim_idx].xdpf); trace_funeth_tx_free(q, reclaim_idx, 1, head); @@ -479,11 +479,11 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget) return npkts; } -bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len) +bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf) { struct fun_eth_tx_req *req; struct fun_dataop_gl *gle; - unsigned int idx; + unsigned int idx, len; dma_addr_t dma; if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES) @@ -494,7 +494,8 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len) return false; } - dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE); + len = xdpf->len; + dma = dma_map_single(q->dma_dev, xdpf->data, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(q->dma_dev, dma))) { FUN_QSTAT_INC(q, tx_map_err); return false; @@ -514,7 +515,7 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len) gle = (struct fun_dataop_gl *)req->dataop.imm; fun_dataop_gl_init(gle, 0, 0, len, dma); - q->info[idx].vaddr = data; + q->info[idx].xdpf = xdpf; u64_stats_update_begin(&q->syncp); q->stats.tx_bytes += len; @@ -545,12 +546,9 @@ int fun_xdp_xmit_frames(struct net_device *dev, int n, if (unlikely(q_idx >= fp->num_xdpqs)) return -ENXIO; - for (q = xdpqs[q_idx], i = 0; i < n; i++) { - const struct xdp_frame *xdpf = frames[i]; - - if (!fun_xdp_tx(q, xdpf->data, xdpf->len)) + for (q = xdpqs[q_idx], i = 0; i < n; i++) + if (!fun_xdp_tx(q, frames[i])) break; - } if (unlikely(flags & XDP_XMIT_FLUSH)) fun_txq_wr_db(q); @@ -577,7 +575,7 @@ static void fun_xdpq_purge(struct funeth_txq *q) unsigned int idx = q->cons_cnt & q->mask; fun_xdp_unmap(q, idx); - page_frag_free(q->info[idx].vaddr); + xdp_return_frame(q->info[idx].xdpf); q->cons_cnt++; } } diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h index 04c9f91b7489..8708e2895946 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h +++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h @@ -95,8 +95,8 @@ struct funeth_txq_stats { /* per Tx queue SW counters */ struct funeth_tx_info { /* per Tx descriptor state */ union { - struct sk_buff *skb; /* associated packet */ - void *vaddr; /* start address for XDP */ + struct sk_buff *skb; /* associated packet (sk_buff path) */ + struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */ }; }; @@ -245,7 +245,7 @@ static inline int fun_irq_node(const struct fun_irq *p) int fun_rxq_napi_poll(struct napi_struct *napi, int budget); int fun_txq_napi_poll(struct napi_struct *napi, int budget); netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev); -bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len); +bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf); int fun_xdp_xmit_frames(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 7e7fe5bdf1f8..5ab7c0f81e9a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -5981,6 +5981,15 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) release_sub_crqs(adapter, 0); rc = init_sub_crqs(adapter); } else { + /* no need to reinitialize completely, but we do + * need to clean up transmits that were in flight + * when we processed the reset. Failure to do so + * will confound the upper layer, usually TCP, by + * creating the illusion of transmits that are + * awaiting completion. + */ + clean_tx_pools(adapter); + rc = reset_sub_crq_queues(adapter); } } else { diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 13382df2f2ef..bcf680e83811 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -630,7 +630,6 @@ struct e1000_phy_info { bool disable_polarity_correction; bool is_mdix; bool polarity_correction; - bool reset_disable; bool speed_downgraded; bool autoneg_wait_to_complete; }; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index e6c8e6d5234f..9466f65a6da7 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2050,10 +2050,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) bool blocked = false; int i = 0; - /* Check the PHY (LCD) reset flag */ - if (hw->phy.reset_disable) - return true; - while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && (i++ < 30)) usleep_range(10000, 11000); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 638a3ddd7ada..2504b11c3169 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -271,7 +271,6 @@ #define I217_CGFREG_ENABLE_MTA_RESET 0x0002 #define I217_MEMPWR PHY_REG(772, 26) #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 -#define I217_MEMPWR_MOEM 0x1000 /* Receive Address Initial CRC Calculation */ #define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fa06f68c8c80..f1729940e46c 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6494,6 +6494,10 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && hw->mac.type >= e1000_pch_adp) { + /* Keep the GPT clock enabled for CSME */ + mac_data = er32(FEXTNVM); + mac_data |= BIT(3); + ew32(FEXTNVM, mac_data); /* Request ME unconfigure the device from S0ix */ mac_data = er32(H2ME); mac_data &= ~E1000_H2ME_START_DPG; @@ -6987,21 +6991,8 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); - struct e1000_hw *hw = &adapter->hw; - u16 phy_data; int rc; - if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && - hw->mac.type >= e1000_pch_adp) { - /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */ - e1e_rphy(hw, I217_MEMPWR, &phy_data); - phy_data |= I217_MEMPWR_MOEM; - e1e_wphy(hw, I217_MEMPWR, phy_data); - - /* Disable LCD reset */ - hw->phy.reset_disable = true; - } - e1000e_flush_lpic(pdev); e1000e_pm_freeze(dev); @@ -7023,8 +7014,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); - struct e1000_hw *hw = &adapter->hw; - u16 phy_data; int rc; /* Introduce S0ix implementation */ @@ -7035,17 +7024,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) if (rc) return rc; - if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && - hw->mac.type >= e1000_pch_adp) { - /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */ - e1e_rphy(hw, I217_MEMPWR, &phy_data); - phy_data &= ~I217_MEMPWR_MOEM; - e1e_wphy(hw, I217_MEMPWR, phy_data); - - /* Enable LCD reset */ - hw->phy.reset_disable = false; - } - return e1000e_pm_thaw(dev); } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 18558a019353..407fe8f340a0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -37,6 +37,7 @@ #include <net/tc_act/tc_mirred.h> #include <net/udp_tunnel.h> #include <net/xdp_sock.h> +#include <linux/bitfield.h> #include "i40e_type.h" #include "i40e_prototype.h" #include <linux/net/intel/i40e_client.h> @@ -1092,6 +1093,21 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf, (u32)(val & 0xFFFFFFFFULL)); } +/** + * i40e_get_pf_count - get PCI PF count. + * @hw: pointer to a hw. + * + * Reports the function number of the highest PCI physical + * function plus 1 as it is loaded from the NVM. + * + * Return: PCI PF count. + **/ +static inline u32 i40e_get_pf_count(struct i40e_hw *hw) +{ + return FIELD_GET(I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK, + rd32(hw, I40E_GLGEN_PCIFCNCNT)); +} + /* needed by i40e_ethtool.c */ int i40e_up(struct i40e_vsi *vsi); void i40e_down(struct i40e_vsi *vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 72576bb3e94d..685556e968f2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -551,6 +551,47 @@ void i40e_pf_reset_stats(struct i40e_pf *pf) } /** + * i40e_compute_pci_to_hw_id - compute index form PCI function. + * @vsi: ptr to the VSI to read from. + * @hw: ptr to the hardware info. + **/ +static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw) +{ + int pf_count = i40e_get_pf_count(hw); + + if (vsi->type == I40E_VSI_SRIOV) + return (hw->port * BIT(7)) / pf_count + vsi->vf_id; + + return hw->port + BIT(7); +} + +/** + * i40e_stat_update64 - read and update a 64 bit stat from the chip. + * @hw: ptr to the hardware info. + * @hireg: the high 32 bit reg to read. + * @loreg: the low 32 bit reg to read. + * @offset_loaded: has the initial offset been loaded yet. + * @offset: ptr to current offset value. + * @stat: ptr to the stat. + * + * Since the device stats are not reset at PFReset, they will not + * be zeroed when the driver starts. We'll save the first values read + * and use them as offsets to be subtracted from the raw values in order + * to report stats that count from zero. + **/ +static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg, + bool offset_loaded, u64 *offset, u64 *stat) +{ + u64 new_data; + + new_data = rd64(hw, loreg); + + if (!offset_loaded || new_data < *offset) + *offset = new_data; + *stat = new_data - *offset; +} + +/** * i40e_stat_update48 - read and update a 48 bit stat from the chip * @hw: ptr to the hardware info * @hireg: the high 32 bit reg to read @@ -622,6 +663,34 @@ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat) } /** + * i40e_stats_update_rx_discards - update rx_discards. + * @vsi: ptr to the VSI to be updated. + * @hw: ptr to the hardware info. + * @stat_idx: VSI's stat_counter_idx. + * @offset_loaded: ptr to the VSI's stat_offsets_loaded. + * @stat_offset: ptr to stat_offset to store first read of specific register. + * @stat: ptr to VSI's stat to be updated. + **/ +static void +i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, + int stat_idx, bool offset_loaded, + struct i40e_eth_stats *stat_offset, + struct i40e_eth_stats *stat) +{ + u64 rx_rdpc, rx_rxerr; + + i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded, + &stat_offset->rx_discards, &rx_rdpc); + i40e_stat_update64(hw, + I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)), + I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)), + offset_loaded, &stat_offset->rx_discards_other, + &rx_rxerr); + + stat->rx_discards = rx_rdpc + rx_rxerr; +} + +/** * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. * @vsi: the VSI to be updated **/ @@ -680,6 +749,10 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); + + i40e_stats_update_rx_discards(vsi, hw, stat_idx, + vsi->stat_offsets_loaded, oes, es); + vsi->stat_offsets_loaded = true; } @@ -1852,11 +1925,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, * non-zero req_queue_pairs says that user requested a new * queue count via ethtool's set_channels, so use this * value for queues distribution across traffic classes + * We need at least one queue pair for the interface + * to be usable as we see in else statement. */ if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; else if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_queue_pairs = pf->num_lan_msix; + else + vsi->num_queue_pairs = 1; } /* Number of queues per enabled TC */ @@ -10577,7 +10654,7 @@ static int i40e_reset(struct i40e_pf *pf) **/ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { - int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); + const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; i40e_status ret; @@ -10585,13 +10662,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) int v; if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && - i40e_check_recovery_mode(pf)) { + is_recovery_mode_reported) i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); - } if (test_bit(__I40E_DOWN, pf->state) && - !test_bit(__I40E_RECOVERY_MODE, pf->state) && - !old_recovery_mode_bit) + !test_bit(__I40E_RECOVERY_MODE, pf->state)) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); @@ -10618,13 +10693,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) * accordingly with regard to resources initialization * and deinitialization */ - if (test_bit(__I40E_RECOVERY_MODE, pf->state) || - old_recovery_mode_bit) { + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { if (i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities)) goto end_unlock; - if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { + if (is_recovery_mode_reported) { /* we're staying in recovery mode so we'll reinitialize * misc vector here */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 1908eed4fa5e..7339003aa17c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -211,6 +211,11 @@ #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 #define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) +#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) #define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 #define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) @@ -643,6 +648,14 @@ #define I40E_VFQF_HKEY1_MAX_INDEX 12 #define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HLUT1_MAX_INDEX 15 +#define I40E_GL_RXERR1H(_i) (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR1H_MAX_INDEX 143 +#define I40E_GL_RXERR1H_RXERR1H_SHIFT 0 +#define I40E_GL_RXERR1H_RXERR1H_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT) +#define I40E_GL_RXERR1L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR1L_MAX_INDEX 143 +#define I40E_GL_RXERR1L_RXERR1L_SHIFT 0 +#define I40E_GL_RXERR1L_RXERR1L_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 36a4ca1ffb1a..7b3f30beb757 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1172,6 +1172,7 @@ struct i40e_eth_stats { u64 tx_broadcast; /* bptc */ u64 tx_discards; /* tdpc */ u64 tx_errors; /* tepc */ + u64 rx_discards_other; /* rxerr1 */ }; /* Statistics collected per VEB per TC */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 033ea71763e3..86b0f21287dc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2147,6 +2147,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) /* VFs only use TC 0 */ vfres->vsi_res[0].qset_handle = le16_to_cpu(vsi->info.qs_handle[0]); + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) { + i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); + eth_zero_addr(vf->default_lan_addr.addr); + } ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->default_lan_addr.addr); } diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 49aed3e506a6..0ea0361cd86b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -64,7 +64,6 @@ struct iavf_vsi { u16 id; DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); int base_vector; - u16 work_limit; u16 qs_handle; void *priv; /* client driver data reference. */ }; @@ -159,8 +158,12 @@ struct iavf_vlan { struct iavf_vlan_filter { struct list_head list; struct iavf_vlan vlan; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ + struct { + u8 is_new_vlan:1; /* filter is new, wait for PF answer */ + u8 remove:1; /* filter needs to be removed */ + u8 add:1; /* filter needs to be added */ + u8 padding:5; + }; }; #define IAVF_MAX_TRAFFIC_CLASS 4 @@ -461,6 +464,10 @@ static inline const char *iavf_state_str(enum iavf_state_t state) return "__IAVF_INIT_VERSION_CHECK"; case __IAVF_INIT_GET_RESOURCES: return "__IAVF_INIT_GET_RESOURCES"; + case __IAVF_INIT_EXTENDED_CAPS: + return "__IAVF_INIT_EXTENDED_CAPS"; + case __IAVF_INIT_CONFIG_ADAPTER: + return "__IAVF_INIT_CONFIG_ADAPTER"; case __IAVF_INIT_SW: return "__IAVF_INIT_SW"; case __IAVF_INIT_FAILED: @@ -520,6 +527,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter); int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter); int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter); void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter); +u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter); void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); void iavf_configure_queues(struct iavf_adapter *adapter); void iavf_deconfigure_queues(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 3bb56714beb0..e535d4c3da49 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -692,12 +692,8 @@ static int __iavf_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct iavf_adapter *adapter = netdev_priv(netdev); - struct iavf_vsi *vsi = &adapter->vsi; struct iavf_ring *rx_ring, *tx_ring; - ec->tx_max_coalesced_frames = vsi->work_limit; - ec->rx_max_coalesced_frames = vsi->work_limit; - /* Rx and Tx usecs per queue value. If user doesn't specify the * queue, return queue 0's value to represent. */ @@ -825,12 +821,8 @@ static int __iavf_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct iavf_adapter *adapter = netdev_priv(netdev); - struct iavf_vsi *vsi = &adapter->vsi; int i; - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) - vsi->work_limit = ec->tx_max_coalesced_frames_irq; - if (ec->rx_coalesce_usecs == 0) { if (ec->use_adaptive_rx_coalesce) netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); @@ -1969,8 +1961,6 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, static const struct ethtool_ops iavf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES | - ETHTOOL_COALESCE_MAX_FRAMES_IRQ | ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = iavf_get_drvinfo, .get_link = ethtool_op_get_link, diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index f3ecb3bca33d..2e2c153ce46a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -843,7 +843,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter) * iavf_get_num_vlans_added - get number of VLANs added * @adapter: board private structure */ -static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) +u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) { return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) + bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID); @@ -906,11 +906,6 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) return -ENOMEM; - if (proto == cpu_to_be16(ETH_P_8021Q)) - set_bit(vid, adapter->vsi.active_cvlans); - else - set_bit(vid, adapter->vsi.active_svlans); - return 0; } @@ -2245,7 +2240,6 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) adapter->vsi.back = adapter; adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; vsi->netdev = adapter->netdev; vsi->qs_handle = adapter->vsi_res->qset_handle; if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { @@ -2956,6 +2950,9 @@ continue_reset: adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter); + bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID); + bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID); + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); /* We were running when the reset started, so we need to restore some diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 978f651c6b09..06d18797d25a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -194,7 +194,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, struct iavf_tx_buffer *tx_buf; struct iavf_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; - unsigned int budget = vsi->work_limit; + unsigned int budget = IAVF_DEFAULT_IRQ_WORK; tx_buf = &tx_ring->tx_bi[i]; tx_desc = IAVF_TX_DESC(tx_ring, i); @@ -1285,11 +1285,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, { struct iavf_rx_buffer *rx_buffer; - if (!size) - return NULL; - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); + if (!size) + return rx_buffer; /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 782450d5c12f..1603e99bae4a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -627,6 +627,33 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter) } /** + * iavf_vlan_add_reject + * @adapter: adapter structure + * + * Remove VLAN filters from list based on PF response. + **/ +static void iavf_vlan_add_reject(struct iavf_adapter *adapter) +{ + struct iavf_vlan_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->is_new_vlan) { + if (f->vlan.tpid == ETH_P_8021Q) + clear_bit(f->vlan.vid, + adapter->vsi.active_cvlans); + else + clear_bit(f->vlan.vid, + adapter->vsi.active_svlans); + + list_del(&f->list); + kfree(f); + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** * iavf_add_vlans * @adapter: adapter structure * @@ -683,6 +710,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) vvfl->vlan_id[i] = f->vlan.vid; i++; f->add = false; + f->is_new_vlan = true; if (i == count) break; } @@ -695,10 +723,18 @@ void iavf_add_vlans(struct iavf_adapter *adapter) iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } else { + u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters; + u16 current_vlans = iavf_get_num_vlans_added(adapter); struct virtchnl_vlan_filter_list_v2 *vvfl_v2; adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; + if ((count + current_vlans) > max_vlans && + current_vlans < max_vlans) { + count = max_vlans - iavf_get_num_vlans_added(adapter); + more = true; + } + len = sizeof(*vvfl_v2) + ((count - 1) * sizeof(struct virtchnl_vlan_filter)); if (len > IAVF_MAX_AQ_BUF_SIZE) { @@ -725,6 +761,9 @@ void iavf_add_vlans(struct iavf_adapter *adapter) &adapter->vlan_v2_caps.filtering.filtering_support; struct virtchnl_vlan *vlan; + if (i == count) + break; + /* give priority over outer if it's enabled */ if (filtering_support->outer) vlan = &vvfl_v2->filters[i].outer; @@ -736,8 +775,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) i++; f->add = false; - if (i == count) - break; + f->is_new_vlan = true; } } @@ -2080,6 +2118,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, */ iavf_netdev_features_vlan_strip_set(netdev, true); break; + case VIRTCHNL_OP_ADD_VLAN_V2: + iavf_vlan_add_reject(adapter); + dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", + iavf_stat_str(&adapter->hw, v_retval)); + break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, iavf_stat_str(&adapter->hw, v_retval), @@ -2332,6 +2375,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->adv_rss_lock); } break; + case VIRTCHNL_OP_ADD_VLAN_V2: { + struct iavf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->is_new_vlan) { + f->is_new_vlan = false; + if (f->vlan.tpid == ETH_P_8021Q) + set_bit(f->vlan.vid, + adapter->vsi.active_cvlans); + else + set_bit(f->vlan.vid, + adapter->vsi.active_svlans); + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + } + break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: /* PF enabled vlan strip on this VF. * Update netdev->features if needed to be in sync with ethtool. diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 61dd2f18dee8..b41bc3dc1745 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -5,6 +5,7 @@ #define _ICE_DEVIDS_H_ /* Device IDs */ +#define ICE_DEV_ID_E822_SI_DFLT 0x1888 /* Intel(R) Ethernet Connection E823-L for backplane */ #define ICE_DEV_ID_E823L_BACKPLANE 0x124C /* Intel(R) Ethernet Connection E823-L for SFP */ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 3991d62473bf..3337314a7b35 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -814,6 +814,8 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf) devlink_port_unregister(devlink_port); } +#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) + /** * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents * @devlink: the devlink instance @@ -840,8 +842,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - void *nvm_data; - u32 nvm_size; + u8 *nvm_data, *tmp, i; + u32 nvm_size, left; + s8 num_blks; int status; nvm_size = hw->flash.flash_size; @@ -849,26 +852,44 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, if (!nvm_data) return -ENOMEM; - status = ice_acquire_nvm(hw, ICE_RES_READ); - if (status) { - dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", - status, hw->adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); - vfree(nvm_data); - return status; - } - status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false); - if (status) { - dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", - nvm_size, status, hw->adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); + num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE); + tmp = nvm_data; + left = nvm_size; + + /* Some systems take longer to read the NVM than others which causes the + * FW to reclaim the NVM lock before the entire NVM has been read. Fix + * this by breaking the reads of the NVM into smaller chunks that will + * probably not take as long. This has some overhead since we are + * increasing the number of AQ commands, but it should always work + */ + for (i = 0; i < num_blks; i++) { + u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left); + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (status) { + dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + vfree(nvm_data); + return -EIO; + } + + status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE, + &read_sz, tmp, false); + if (status) { + dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", + read_sz, status, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); + ice_release_nvm(hw); + vfree(nvm_data); + return -EIO; + } ice_release_nvm(hw); - vfree(nvm_data); - return status; - } - ice_release_nvm(hw); + tmp += read_sz; + left -= read_sz; + } *data = nvm_data; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 70335f6e8524..4efa5e5846e0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -658,7 +658,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) rx_desc = ICE_RX_DESC(rx_ring, i); if (!(rx_desc->wb.status_error0 & - cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS))) + (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) | + cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))))) continue; rx_buf = &rx_ring->rx_buf[i]; diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index 665a344fb9c0..3dc5662d62a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -736,7 +736,87 @@ static int ice_finalize_update(struct pldmfw *context) return 0; } -static const struct pldmfw_ops ice_fwu_ops = { +struct ice_pldm_pci_record_id { + u32 vendor; + u32 device; + u32 subsystem_vendor; + u32 subsystem_device; +}; + +/** + * ice_op_pci_match_record - Check if a PCI device matches the record + * @context: PLDM fw update structure + * @record: list of records extracted from the PLDM image + * + * Determine if the PCI device associated with this device matches the record + * data provided. + * + * Searches the descriptor TLVs and extracts the relevant descriptor data into + * a pldm_pci_record_id. This is then compared against the PCI device ID + * information. + * + * Returns: true if the device matches the record, false otherwise. + */ +static bool +ice_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record) +{ + struct pci_dev *pdev = to_pci_dev(context->dev); + struct ice_pldm_pci_record_id id = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subsystem_vendor = PCI_ANY_ID, + .subsystem_device = PCI_ANY_ID, + }; + struct pldmfw_desc_tlv *desc; + + list_for_each_entry(desc, &record->descs, entry) { + u16 value; + int *ptr; + + switch (desc->type) { + case PLDM_DESC_ID_PCI_VENDOR_ID: + ptr = &id.vendor; + break; + case PLDM_DESC_ID_PCI_DEVICE_ID: + ptr = &id.device; + break; + case PLDM_DESC_ID_PCI_SUBVENDOR_ID: + ptr = &id.subsystem_vendor; + break; + case PLDM_DESC_ID_PCI_SUBDEV_ID: + ptr = &id.subsystem_device; + break; + default: + /* Skip unrelated TLVs */ + continue; + } + + value = get_unaligned_le16(desc->data); + /* A value of zero for one of the descriptors is sometimes + * used when the record should ignore this field when matching + * device. For example if the record applies to any subsystem + * device or vendor. + */ + if (value) + *ptr = value; + else + *ptr = PCI_ANY_ID; + } + + /* the E822 device can have a generic device ID so check for that */ + if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) && + (id.device == PCI_ANY_ID || id.device == pdev->device || + id.device == ICE_DEV_ID_E822_SI_DFLT) && + (id.subsystem_vendor == PCI_ANY_ID || + id.subsystem_vendor == pdev->subsystem_vendor) && + (id.subsystem_device == PCI_ANY_ID || + id.subsystem_device == pdev->subsystem_device)) + return true; + + return false; +} + +static const struct pldmfw_ops ice_fwu_ops_e810 = { .match_record = &pldmfw_op_pci_match_record, .send_package_data = &ice_send_package_data, .send_component_table = &ice_send_component_table, @@ -744,6 +824,14 @@ static const struct pldmfw_ops ice_fwu_ops = { .finalize_update = &ice_finalize_update, }; +static const struct pldmfw_ops ice_fwu_ops_e822 = { + .match_record = &ice_op_pci_match_record, + .send_package_data = &ice_send_package_data, + .send_component_table = &ice_send_component_table, + .flash_component = &ice_flash_component, + .finalize_update = &ice_finalize_update, +}; + /** * ice_get_pending_updates - Check if the component has a pending update * @pf: the PF driver structure @@ -921,7 +1009,11 @@ int ice_devlink_flash_update(struct devlink *devlink, memset(&priv, 0, sizeof(priv)); - priv.context.ops = &ice_fwu_ops; + /* the E822 device needs a slightly different ops */ + if (hw->mac_type == ICE_MAC_GENERIC) + priv.context.ops = &ice_fwu_ops_e822; + else + priv.context.ops = &ice_fwu_ops_e810; priv.context.dev = dev; priv.extack = extack; priv.pf = pf; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c1ac2f746714..9f02b60459f1 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4656,6 +4656,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_set_safe_mode_caps(hw); } + hw->ucast_shared = true; + err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); @@ -5413,6 +5415,7 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 }, /* required last entry */ { 0, } }; @@ -6010,10 +6013,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi) if (vsi->netdev) { ice_set_rx_mode(vsi->netdev); - err = ice_vsi_vlan_setup(vsi); + if (vsi->type != ICE_VSI_LB) { + err = ice_vsi_vlan_setup(vsi); - if (err) - return err; + if (err) + return err; + } } ice_vsi_cfg_dcb_rings(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index bb1721f1321d..f4907a3c2d19 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1310,39 +1310,6 @@ out_put_vf: } /** - * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch - * @pf: PF used to reference the switch's rules - * @umac: unicast MAC to compare against existing switch rules - * - * Return true on the first/any match, else return false - */ -static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac) -{ - struct ice_sw_recipe *mac_recipe_list = - &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC]; - struct ice_fltr_mgmt_list_entry *list_itr; - struct list_head *rule_head; - struct mutex *rule_lock; /* protect MAC filter list access */ - - rule_head = &mac_recipe_list->filt_rules; - rule_lock = &mac_recipe_list->filt_rule_lock; - - mutex_lock(rule_lock); - list_for_each_entry(list_itr, rule_head, list_entry) { - u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0]; - - if (ether_addr_equal(existing_mac, umac)) { - mutex_unlock(rule_lock); - return true; - } - } - - mutex_unlock(rule_lock); - - return false; -} - -/** * ice_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier @@ -1376,13 +1343,6 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) if (ret) goto out_put_vf; - if (ice_unicast_mac_exists(pf, mac)) { - netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n", - mac, vf_id, mac); - ret = -EINVAL; - goto out_put_vf; - } - mutex_lock(&vf->cfg_lock); /* VF is notified of its new MAC via the PF's response to the diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 3f8b7274ed2f..836dce840712 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1751,11 +1751,13 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) protocol = vlan_get_protocol(skb); - if (eth_p_mpls(protocol)) + if (eth_p_mpls(protocol)) { ip.hdr = skb_inner_network_header(skb); - else + l4.hdr = skb_checksum_start(skb); + } else { ip.hdr = skb_network_header(skb); - l4.hdr = skb_checksum_start(skb); + l4.hdr = skb_transport_header(skb); + } /* compute outer L2 header size */ l2_len = ip.hdr - skb->data; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 4547bc1f7cee..24188ec594d5 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2948,7 +2948,8 @@ ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi, struct virtchnl_vlan_filtering_caps *vfc, struct virtchnl_vlan_filter_list_v2 *vfl) { - u16 num_requested_filters = vsi->num_vlan + vfl->num_elements; + u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) + + vfl->num_elements; if (num_requested_filters > vfc->max_filters) return false; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index ae17af44fe02..a5ebee7df4a8 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6171,6 +6171,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); u32 value = 0; + if (IGC_REMOVED(hw_addr)) + return ~value; + value = readl(&hw_addr[reg]); /* reads should not return all F's */ diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index e197a33d93a0..026c3b65fc37 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -306,7 +306,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg); #define wr32(reg, val) \ do { \ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ - writel((val), &hw_addr[(reg)]); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ } while (0) #define rd32(reg) (igc_rd32(hw, reg)) @@ -318,4 +319,6 @@ do { \ #define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) +#define IGC_REMOVED(h) unlikely(!(h)) + #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 921a4d977d65..8813b4dd6872 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -779,6 +779,7 @@ struct ixgbe_adapter { #ifdef CONFIG_IXGBE_IPSEC struct ixgbe_ipsec *ipsec; #endif /* CONFIG_IXGBE_IPSEC */ + spinlock_t vfs_lock; }; static inline int ixgbe_determine_xdp_q_idx(int cpu) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 77c2e70b0860..55f91c9ff047 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6403,6 +6403,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); + /* init spinlock to avoid concurrency of VF resources */ + spin_lock_init(&adapter->vfs_lock); + #ifdef CONFIG_IXGBE_DCB ixgbe_init_dcb(adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d4e63f0644c3..a1e69c734863 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -205,10 +205,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { unsigned int num_vfs = adapter->num_vfs, vf; + unsigned long flags; int rss; + spin_lock_irqsave(&adapter->vfs_lock, flags); /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; + spin_unlock_irqrestore(&adapter->vfs_lock, flags); /* put the reference to all of the vf devices */ for (vf = 0; vf < num_vfs; ++vf) { @@ -1355,8 +1358,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) void ixgbe_msg_task(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + unsigned long flags; u32 vf; + spin_lock_irqsave(&adapter->vfs_lock, flags); for (vf = 0; vf < adapter->num_vfs; vf++) { /* process any reset requests */ if (!ixgbe_check_for_rst(hw, vf)) @@ -1370,6 +1375,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter) if (!ixgbe_check_for_ack(hw, vf)) ixgbe_rcv_ack_from_vf(adapter, vf); } + spin_unlock_irqrestore(&adapter->vfs_lock, flags); } static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h index cc51149790ff..3d5d39a52fe6 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h @@ -52,7 +52,7 @@ #define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF) #define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF) -#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) && 0xFF) +#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0xFF) /* SDP Function select */ #define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8 diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 28b19945d716..e64318c110fd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -28,6 +28,9 @@ #define MAX_RATE_EXPONENT 0x0FULL #define MAX_RATE_MANTISSA 0xFFULL +#define CN10K_MAX_BURST_MANTISSA 0x7FFFULL +#define CN10K_MAX_BURST_SIZE 8453888ULL + /* Bitfields in NIX_TLX_PIR register */ #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) @@ -35,6 +38,9 @@ #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) +#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) +#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) + struct otx2_tc_flow_stats { u64 bytes; u64 pkts; @@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) } EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); -static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, - u32 *burst_mantissa) +static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, + u32 *burst_exp, u32 *burst_mantissa) { + int max_burst, max_mantissa; unsigned int tmp; + if (is_dev_otx2(nic->pdev)) { + max_burst = MAX_BURST_SIZE; + max_mantissa = MAX_BURST_MANTISSA; + } else { + max_burst = CN10K_MAX_BURST_SIZE; + max_mantissa = CN10K_MAX_BURST_MANTISSA; + } + /* Burst is calculated as * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 * Max supported burst size is 130,816 bytes. */ - burst = min_t(u32, burst, MAX_BURST_SIZE); + burst = min_t(u32, burst, max_burst); if (burst) { *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; tmp = burst - rounddown_pow_of_two(burst); - if (burst < MAX_BURST_MANTISSA) + if (burst < max_mantissa) *burst_mantissa = tmp * 2; else *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); } else { *burst_exp = MAX_BURST_EXPONENT; - *burst_mantissa = MAX_BURST_MANTISSA; + *burst_mantissa = max_mantissa; } } -static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp, +static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, u32 *mantissa, u32 *div_exp) { - unsigned int tmp; + u64 tmp; /* Rate calculation by hardware * @@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp, } } -static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate) +static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, + u64 maxrate, u32 burst) { - struct otx2_hw *hw = &nic->hw; - struct nix_txschq_config *req; u32 burst_exp, burst_mantissa; u32 exp, mantissa, div_exp; + u64 regval = 0; + + /* Get exponent and mantissa values from the desired rate */ + otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); + otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); + + if (is_dev_otx2(nic->pdev)) { + regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | + FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | + FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | + FIELD_PREP(TLX_RATE_EXPONENT, exp) | + FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); + } else { + regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | + FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | + FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | + FIELD_PREP(TLX_RATE_EXPONENT, exp) | + FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); + } + + return regval; +} + +static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, + u32 burst, u64 maxrate) +{ + struct otx2_hw *hw = &nic->hw; + struct nix_txschq_config *req; int txschq, err; /* All SQs share the same TL4, so pick the first scheduler */ txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; - /* Get exponent and mantissa values from the desired rate */ - otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa); - otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); - mutex_lock(&nic->mbox.lock); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); if (!req) { @@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma req->lvl = NIX_TXSCH_LVL_TL4; req->num_regs = 1; req->reg[0] = NIX_AF_TL4X_PIR(txschq); - req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) | - FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) | - FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | - FIELD_PREP(TLX_RATE_EXPONENT, exp) | - FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); + req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); err = otx2_sync_mbox_msg(&nic->mbox); mutex_unlock(&nic->mbox.lock); @@ -230,7 +264,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, struct netlink_ext_ack *extack = cls->common.extack; struct flow_action *actions = &cls->rule->action; struct flow_action_entry *entry; - u32 rate; + u64 rate; int err; err = otx2_tc_validate_flow(nic, actions, extack); @@ -256,7 +290,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, } /* Convert bytes per second to Mbps */ rate = entry->police.rate_bytes_ps * 8; - rate = max_t(u32, rate / 1000000, 1); + rate = max_t(u64, rate / 1000000, 1); err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate); if (err) return err; @@ -614,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, flow_spec->dport = match.key->dst; flow_mask->dport = match.mask->dst; - if (ip_proto == IPPROTO_UDP) - req->features |= BIT_ULL(NPC_DPORT_UDP); - else if (ip_proto == IPPROTO_TCP) - req->features |= BIT_ULL(NPC_DPORT_TCP); - else if (ip_proto == IPPROTO_SCTP) - req->features |= BIT_ULL(NPC_DPORT_SCTP); + + if (flow_mask->dport) { + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_DPORT_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_DPORT_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_DPORT_SCTP); + } flow_spec->sport = match.key->src; flow_mask->sport = match.mask->src; - if (ip_proto == IPPROTO_UDP) - req->features |= BIT_ULL(NPC_SPORT_UDP); - else if (ip_proto == IPPROTO_TCP) - req->features |= BIT_ULL(NPC_SPORT_TCP); - else if (ip_proto == IPPROTO_SCTP) - req->features |= BIT_ULL(NPC_SPORT_SCTP); + + if (flow_mask->sport) { + if (ip_proto == IPPROTO_UDP) + req->features |= BIT_ULL(NPC_SPORT_UDP); + else if (ip_proto == IPPROTO_TCP) + req->features |= BIT_ULL(NPC_SPORT_TCP); + else if (ip_proto == IPPROTO_SCTP) + req->features |= BIT_ULL(NPC_SPORT_SCTP); + } } return otx2_tc_parse_actions(nic, &rule->action, req, f, node); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index d43e503c644f..4d93ad6a284c 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -167,12 +167,12 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, } port = netdev_priv(ingress_dev); - mask = htons(0x1FFF); - key = htons(port->hw_id); + mask = htons(0x1FFF << 3); + key = htons(port->hw_id << 3); rule_match_set(r_match->key, SYS_PORT, key); rule_match_set(r_match->mask, SYS_PORT, mask); - mask = htons(0x1FF); + mask = htons(0x3FF); key = htons(port->dev_id); rule_match_set(r_match->key, SYS_DEV, key); rule_match_set(r_match->mask, SYS_DEV, mask); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index 3754d8aec76d..3c8116f16b4d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -588,6 +588,7 @@ err_router_lib_init: void prestera_router_fini(struct prestera_switch *sw) { + unregister_fib_notifier(&init_net, &sw->router->fib_nb); unregister_inetaddr_notifier(&sw->router->inetaddr_nb); unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb); rhashtable_destroy(&sw->router->kern_fib_cache_ht); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 90e7dfd011c9..5d457bc9acc1 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -93,6 +93,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i }; struct net_device_path path = {}; + if (!ctx.dev) + return -ENODEV; + memcpy(ctx.daddr, addr, sizeof(ctx.daddr)); if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 8f0cd3196aac..29be2fcafea3 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -651,7 +651,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) * WDMA RX. */ - BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); + BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring)); if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE)) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 25f51f80a9b4..ba171c7f0a67 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -76,6 +76,7 @@ struct mlx5_tc_ct_priv { struct mlx5_ct_fs *fs; struct mlx5_ct_fs_ops *fs_ops; spinlock_t ht_lock; /* protects ft entries */ + struct workqueue_struct *wq; struct mlx5_tc_ct_debugfs debugfs; }; @@ -941,14 +942,11 @@ static void mlx5_tc_ct_entry_del_work(struct work_struct *work) static void __mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry) { - struct mlx5e_priv *priv; - if (!refcount_dec_and_test(&entry->refcnt)) return; - priv = netdev_priv(entry->ct_priv->netdev); INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work); - queue_work(priv->wq, &entry->work); + queue_work(entry->ct_priv->wq, &entry->work); } static struct mlx5_ct_counter * @@ -1759,19 +1757,16 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) static void mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) { - struct mlx5e_priv *priv; - if (!refcount_dec_and_test(&ft->refcount)) return; + flush_workqueue(ct_priv->wq); nf_flow_table_offload_del_cb(ft->nf_ft, mlx5_tc_ct_block_flow_offload, ft); rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params); rhashtable_free_and_destroy(&ft->ct_entries_ht, mlx5_tc_ct_flush_ft_entry, ct_priv); - priv = netdev_priv(ct_priv->netdev); - flush_workqueue(priv->wq); mlx5_tc_ct_free_pre_ct_tables(ft); mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id); kfree(ft); @@ -2176,6 +2171,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params)) goto err_ct_tuples_nat_ht; + ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0); + if (!ct_priv->wq) { + err = -ENOMEM; + goto err_wq; + } + err = mlx5_tc_ct_fs_init(ct_priv); if (err) goto err_init_fs; @@ -2184,6 +2185,8 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, return ct_priv; err_init_fs: + destroy_workqueue(ct_priv->wq); +err_wq: rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); err_ct_tuples_nat_ht: rhashtable_destroy(&ct_priv->ct_tuples_ht); @@ -2213,6 +2216,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) if (!ct_priv) return; + destroy_workqueue(ct_priv->wq); mlx5_ct_tc_remove_dbgfs(ct_priv); chains = ct_priv->chains; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 0bb0633b7542..27483aa7be8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, struct mlx5e_ktls_offload_context_rx **ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); - BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) > - TLS_OFFLOAD_CONTEXT_SIZE_RX); + BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX); *ctx = priv_rx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 4b6f0d1ea59a..f239fb2e832f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -68,8 +68,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx, struct mlx5e_ktls_offload_context_tx **ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); - BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) > - TLS_OFFLOAD_CONTEXT_SIZE_TX); + BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX); *ctx = priv_tx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 57fa0489eeb8..1e87bb2b7541 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -688,7 +688,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; struct mlx5_core_dev *mdev = priv->mdev; - if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) + if (!mlx5e_stats_grp_vnic_env_num_stats(priv)) return; MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 34bf11cdf90f..9ca2c8763237 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3793,7 +3793,7 @@ static bool is_lag_dev(struct mlx5e_priv *priv, static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev) { - if (mlx5e_eswitch_uplink_rep(out_dev) && + if (same_hw_reps(priv, out_dev) && MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) && MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up)) return true; @@ -4529,13 +4529,6 @@ static int mlx5e_policer_validate(const struct flow_action *action, return -EOPNOTSUPP; } - if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && - act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { - NL_SET_ERR_MSG_MOD(extack, - "Offload not supported when conform action is not pipe or ok"); - return -EOPNOTSUPP; - } - if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && !flow_action_is_last_entry(action, act)) { NL_SET_ERR_MSG_MOD(extack, @@ -4586,6 +4579,12 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_POLICE: + if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not continue"); + return -EOPNOTSUPP; + } + err = mlx5e_policer_validate(flow_action, act, extack); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 50d14cec4894..9a7250be229f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -341,6 +341,26 @@ static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq) } } +static void mlx5e_tx_flush(struct mlx5e_txqsq *sq) +{ + struct mlx5e_tx_wqe_info *wi; + struct mlx5e_tx_wqe *wqe; + u16 pi; + + /* Must not be called when a MPWQE session is active but empty. */ + mlx5e_tx_mpwqe_ensure_complete(sq); + + pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); + wi = &sq->db.wqe_info[pi]; + + *wi = (struct mlx5e_tx_wqe_info) { + .num_wqebbs = 1, + }; + + wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); +} + static inline void mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, const struct mlx5e_tx_attr *attr, @@ -459,6 +479,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, err_drop: stats->dropped++; dev_kfree_skb_any(skb); + mlx5e_tx_flush(sq); } static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr) @@ -560,6 +581,13 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_ctrl_seg *cseg; struct mlx5e_xmit_data txd; + txd.data = skb->data; + txd.len = skb->len; + + txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr))) + goto err_unmap; + if (!mlx5e_tx_mpwqe_session_is_active(sq)) { mlx5e_tx_mpwqe_session_start(sq, eseg); } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) { @@ -569,18 +597,9 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq->stats->xmit_more += xmit_more; - txd.data = skb->data; - txd.len = skb->len; - - txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr))) - goto err_unmap; mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); - mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb); - mlx5e_tx_mpwqe_add_dseg(sq, &txd); - mlx5e_tx_skb_update_hwts_flags(skb); if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) { @@ -602,6 +621,7 @@ err_unmap: mlx5e_dma_unmap_wqe_err(sq, 1); sq->stats->dropped++; dev_kfree_skb_any(skb); + mlx5e_tx_flush(sq); } void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) @@ -1006,5 +1026,6 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, err_drop: stats->dropped++; dev_kfree_skb_any(skb); + mlx5e_tx_flush(sq); } #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index 9d17206d1625..fabe49a35a5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -11,6 +11,7 @@ #include "mlx5_core.h" #include "eswitch.h" #include "fs_core.h" +#include "fs_ft_pool.h" #include "esw/qos.h" enum { @@ -95,8 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) if (!flow_group_in) return -ENOMEM; - table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - ft_attr.max_fte = table_size; + ft_attr.max_fte = POOL_NEXT_SIZE; ft_attr.prio = LEGACY_FDB_PRIO; fdb = mlx5_create_flow_table(root_ns, &ft_attr); if (IS_ERR(fdb)) { @@ -105,6 +105,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) goto out; } esw->fdb_table.legacy.fdb = fdb; + table_size = fdb->max_fte; /* Addresses group : Full match unicast/multicast addresses */ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c index 15e41dc84d53..b8feaf0f5c4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c @@ -44,7 +44,7 @@ static int port_sel_mode_show(struct seq_file *file, void *priv) ldev = dev->priv.lag; mutex_lock(&ldev->lock); if (__mlx5_lag_is_active(ldev)) - mode = mlx5_get_str_port_sel_mode(ldev); + mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags); else ret = -EINVAL; mutex_unlock(&ldev->lock); @@ -72,6 +72,7 @@ static int state_show(struct seq_file *file, void *priv) static int flags_show(struct seq_file *file, void *priv) { struct mlx5_core_dev *dev = file->private; + bool fdb_sel_mode_native; struct mlx5_lag *ldev; bool shared_fdb; bool lag_active; @@ -79,14 +80,21 @@ static int flags_show(struct seq_file *file, void *priv) ldev = dev->priv.lag; mutex_lock(&ldev->lock); lag_active = __mlx5_lag_is_active(ldev); - if (lag_active) - shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); + if (!lag_active) + goto unlock; + + shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); + fdb_sel_mode_native = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, + &ldev->mode_flags); +unlock: mutex_unlock(&ldev->lock); if (!lag_active) return -EINVAL; seq_printf(file, "%s:%s\n", "shared_fdb", shared_fdb ? "on" : "off"); + seq_printf(file, "%s:%s\n", "fdb_selection_mode", + fdb_sel_mode_native ? "native" : "affinity"); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 2a8fc547eb37..5d41e19378e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -68,14 +68,15 @@ static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags) static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode, unsigned long flags) { - bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); + bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, + &flags); int port_sel_mode = get_port_sel_mode(mode, flags); u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; void *lag_ctx; lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); - MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb); + MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode); if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) { MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); @@ -471,8 +472,13 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, bool roce_lag = mode == MLX5_LAG_MODE_ROCE; *flags = 0; - if (shared_fdb) + if (shared_fdb) { set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags); + set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags); + } + + if (mode == MLX5_LAG_MODE_MPESW) + set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags); if (roce_lag) return mlx5_lag_set_port_sel_mode_roce(ldev, flags); @@ -481,9 +487,9 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, return 0; } -char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev) +char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags) { - int port_sel_mode = get_port_sel_mode(ldev->mode, ldev->mode_flags); + int port_sel_mode = get_port_sel_mode(mode, flags); switch (port_sel_mode) { case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity"; @@ -507,7 +513,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, if (tracker) mlx5_lag_print_mapping(dev0, ldev, tracker, flags); mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n", - shared_fdb, mlx5_get_str_port_sel_mode(ldev)); + shared_fdb, mlx5_get_str_port_sel_mode(mode, flags)); err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index c81b173156d2..ce2ce8ccbd70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -24,6 +24,7 @@ enum { enum { MLX5_LAG_MODE_FLAG_HASH_BASED, MLX5_LAG_MODE_FLAG_SHARED_FDB, + MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, }; enum mlx5_lag_mode { @@ -114,7 +115,7 @@ bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev); void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev); int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev); -char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev); +char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags); void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, u8 *ports, int *num_enabled); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index ee4b25a50315..f643202b29c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -41,7 +41,6 @@ void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev) int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev = dev->priv.lag; - bool shared_fdb; int err = 0; if (!ldev) @@ -55,8 +54,8 @@ int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev) err = -EINVAL; goto out; } - shared_fdb = mlx5_shared_fdb_supported(ldev); - err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, shared_fdb); + + err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false); if (err) mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 9dbb573d53ea..ce33dbde124d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -4415,6 +4415,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, return 0; err_nexthop_neigh_init: + list_del(&nh->router_list_node); + mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); return err; } @@ -5382,7 +5384,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, { const struct fib_nh *nh = fib_info_nh(fi, 0); - return nh->fib_nh_scope == RT_SCOPE_LINK || + return nh->fib_nh_gw_family || mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); } @@ -6740,6 +6742,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, const struct fib6_info *rt) { struct net_device *dev = rt->fib6_nh->fib_nh_dev; + int err; nh->nhgi = nh_grp->nhgi; nh->nh_weight = rt->fib6_nh->fib_nh_weight; @@ -6755,7 +6758,16 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, return 0; nh->ifindex = dev->ifindex; - return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev); + err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev); + if (err) + goto err_nexthop_type_init; + + return 0; + +err_nexthop_type_init: + list_del(&nh->router_list_node); + mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); + return err; } static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, @@ -10312,7 +10324,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, unsigned long *fields = config->fields; u32 hash_fields; - switch (net->ipv4.sysctl_fib_multipath_hash_policy) { + switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) { case 0: mlxsw_sp_mp4_hash_outer_addr(config); break; @@ -10330,7 +10342,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_mp_hash_inner_l3(config); break; case 3: - hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; + hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); /* Outer */ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP); MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP); @@ -10511,13 +10523,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { struct net *net = mlxsw_sp_net(mlxsw_sp); - bool usp = net->ipv4.sysctl_ip_fwd_update_priority; char rgcr_pl[MLXSW_REG_RGCR_LEN]; u64 max_rifs; + bool usp; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) return -EIO; max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority); mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c index 005e56ea5da1..5893770bfd94 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c @@ -75,6 +75,9 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid, unsigned int vid, enum macaccess_entry_type type) { + int ret; + + spin_lock(&lan966x->mac_lock); lan966x_mac_select(lan966x, mac, vid); /* Issue a write command */ @@ -86,7 +89,10 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid, ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN), lan966x, ANA_MACACCESS); - return lan966x_mac_wait_for_completion(lan966x); + ret = lan966x_mac_wait_for_completion(lan966x); + spin_unlock(&lan966x->mac_lock); + + return ret; } /* The mask of the front ports is encoded inside the mac parameter via a call @@ -113,11 +119,13 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port, return __lan966x_mac_learn(lan966x, port, false, mac, vid, type); } -int lan966x_mac_forget(struct lan966x *lan966x, - const unsigned char mac[ETH_ALEN], - unsigned int vid, - enum macaccess_entry_type type) +static int lan966x_mac_forget_locked(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) { + lockdep_assert_held(&lan966x->mac_lock); + lan966x_mac_select(lan966x, mac, vid); /* Issue a forget command */ @@ -128,6 +136,20 @@ int lan966x_mac_forget(struct lan966x *lan966x, return lan966x_mac_wait_for_completion(lan966x); } +int lan966x_mac_forget(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + int ret; + + spin_lock(&lan966x->mac_lock); + ret = lan966x_mac_forget_locked(lan966x, mac, vid, type); + spin_unlock(&lan966x->mac_lock); + + return ret; +} + int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid) { return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED); @@ -161,7 +183,7 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma { struct lan966x_mac_entry *mac_entry; - mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL); + mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC); if (!mac_entry) return NULL; @@ -179,7 +201,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x, struct lan966x_mac_entry *res = NULL; struct lan966x_mac_entry *mac_entry; - spin_lock(&lan966x->mac_lock); list_for_each_entry(mac_entry, &lan966x->mac_entries, list) { if (mac_entry->vid == vid && ether_addr_equal(mac, mac_entry->mac) && @@ -188,7 +209,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x, break; } } - spin_unlock(&lan966x->mac_lock); return res; } @@ -231,8 +251,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port, { struct lan966x_mac_entry *mac_entry; - if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) + spin_lock(&lan966x->mac_lock); + if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) { + spin_unlock(&lan966x->mac_lock); return 0; + } /* In case the entry already exists, don't add it again to SW, * just update HW, but we need to look in the actual HW because @@ -241,21 +264,25 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port, * add the entry but without the extern_learn flag. */ mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port); - if (mac_entry) - return lan966x_mac_learn(lan966x, port->chip_port, - addr, vid, ENTRYTYPE_LOCKED); + if (mac_entry) { + spin_unlock(&lan966x->mac_lock); + goto mac_learn; + } mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port); - if (!mac_entry) + if (!mac_entry) { + spin_unlock(&lan966x->mac_lock); return -ENOMEM; + } - spin_lock(&lan966x->mac_lock); list_add_tail(&mac_entry->list, &lan966x->mac_entries); spin_unlock(&lan966x->mac_lock); - lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED); lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev); +mac_learn: + lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED); + return 0; } @@ -269,8 +296,9 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr, list) { if (mac_entry->vid == vid && ether_addr_equal(addr, mac_entry->mac)) { - lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid, - ENTRYTYPE_LOCKED); + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, + ENTRYTYPE_LOCKED); list_del(&mac_entry->list); kfree(mac_entry); @@ -288,8 +316,8 @@ void lan966x_mac_purge_entries(struct lan966x *lan966x) spin_lock(&lan966x->mac_lock); list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) { - lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid, - ENTRYTYPE_LOCKED); + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, ENTRYTYPE_LOCKED); list_del(&mac_entry->list); kfree(mac_entry); @@ -325,10 +353,13 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, { struct lan966x_mac_entry *mac_entry, *tmp; unsigned char mac[ETH_ALEN] __aligned(2); + struct list_head mac_deleted_entries; u32 dest_idx; u32 column; u16 vid; + INIT_LIST_HEAD(&mac_deleted_entries); + spin_lock(&lan966x->mac_lock); list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) { bool found = false; @@ -362,20 +393,26 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, } if (!found) { - /* Notify the bridge that the entry doesn't exist - * anymore in the HW and remove the entry from the SW - * list - */ - lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, - mac_entry->mac, mac_entry->vid, - lan966x->ports[mac_entry->port_index]->dev); - list_del(&mac_entry->list); - kfree(mac_entry); + /* Move the entry from SW list to a tmp list such that + * it would be deleted later + */ + list_add_tail(&mac_entry->list, &mac_deleted_entries); } } spin_unlock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) { + /* Notify the bridge that the entry doesn't exist + * anymore in the HW + */ + lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + mac_entry->mac, mac_entry->vid, + lan966x->ports[mac_entry->port_index]->dev); + list_del(&mac_entry->list); + kfree(mac_entry); + } + /* Now go to the list of columns and see if any entry was not in the SW * list, then that means that the entry is new so it needs to notify the * bridge. @@ -396,13 +433,20 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, if (WARN_ON(dest_idx >= lan966x->num_phys_ports)) continue; + spin_lock(&lan966x->mac_lock); + mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx); + if (mac_entry) { + spin_unlock(&lan966x->mac_lock); + continue; + } + mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx); - if (!mac_entry) + if (!mac_entry) { + spin_unlock(&lan966x->mac_lock); return; + } mac_entry->row = row; - - spin_lock(&lan966x->mac_lock); list_add_tail(&mac_entry->list, &lan966x->mac_entries); spin_unlock(&lan966x->mac_lock); @@ -424,6 +468,7 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x) lan966x, ANA_MACTINDX); while (1) { + spin_lock(&lan966x->mac_lock); lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT), ANA_MACACCESS_MAC_TABLE_CMD, lan966x, ANA_MACACCESS); @@ -447,12 +492,15 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x) stop = false; if (column == LAN966X_MAC_COLUMNS - 1 && - index == 0 && stop) + index == 0 && stop) { + spin_unlock(&lan966x->mac_lock); break; + } entry[column].mach = lan_rd(lan966x, ANA_MACHDATA); entry[column].macl = lan_rd(lan966x, ANA_MACLDATA); entry[column].maca = lan_rd(lan966x, ANA_MACACCESS); + spin_unlock(&lan966x->mac_lock); /* Once all the columns are read process them */ if (column == LAN966X_MAC_COLUMNS - 1) { diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 5784c4161e5e..1d6e3b641b2e 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -994,7 +994,7 @@ static int lan966x_probe(struct platform_device *pdev) struct fwnode_handle *ports, *portnp; struct lan966x *lan966x; u8 mac_addr[ETH_ALEN]; - int err, i; + int err; lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL); if (!lan966x) @@ -1025,11 +1025,7 @@ static int lan966x_probe(struct platform_device *pdev) if (err) return dev_err_probe(&pdev->dev, err, "Reset failed"); - i = 0; - fwnode_for_each_available_child_node(ports, portnp) - ++i; - - lan966x->num_phys_ports = i; + lan966x->num_phys_ports = NUM_PHYS_PORTS; lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports, sizeof(struct lan966x_port *), GFP_KERNEL); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 3b86ddddc756..2787055c1847 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -34,6 +34,7 @@ /* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */ #define QSYS_Q_RSRV 95 +#define NUM_PHYS_PORTS 8 #define CPU_PORT 8 /* Reserved PGIDs */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index 3429660cd2e5..5edc8b7176c8 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -396,6 +396,9 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev, u32 mact_entry; int res, err; + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + if (netif_is_bridge_master(v->obj.orig_dev)) { sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid); return 0; @@ -466,6 +469,9 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev, u32 mact_entry, res, pgid_entry[3]; int err; + if (!sparx5_netdevice_check(dev)) + return -EOPNOTSUPP; + if (netif_is_bridge_master(v->obj.orig_dev)) { sparx5_mact_forget(spx5, v->addr, v->vid); return 0; diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c index 083fddd263ec..8e3894cf5f7c 100644 --- a/drivers/net/ethernet/mscc/ocelot_fdma.c +++ b/drivers/net/ethernet/mscc/ocelot_fdma.c @@ -94,19 +94,18 @@ static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma, ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan)); } +static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot) +{ + return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE); +} + static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan) { - unsigned long timeout; u32 safe; - timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US); - do { - safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE); - if (safe & BIT(chan)) - return 0; - } while (time_after(jiffies, timeout)); - - return -ETIMEDOUT; + return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe, + safe & BIT(chan), 0, + OCELOT_FDMA_CH_SAFE_TIMEOUT_US); } static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e31f8fbbc696..df2ab5cbd49b 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -4233,7 +4233,7 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) } /* If the chain is ended by an load/store pair then this - * could serve as the new head of the the next chain. + * could serve as the new head of the next chain. */ if (curr_pair_is_memcpy(meta1, meta2)) { head_ld_meta = meta1; diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 0147de405365..ffb6f6d05a07 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -474,7 +474,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, set_tun->ttl = ip4_dst_hoplimit(&rt->dst); ip_rt_put(rt); } else { - set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 6bf3ec448e7e..97dcf8db7ed2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -447,7 +447,8 @@ void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app, static void nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, - void *flow, struct neighbour *neigh, bool is_ipv6) + void *flow, struct neighbour *neigh, bool is_ipv6, + bool override) { bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead; size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) : @@ -546,6 +547,13 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, if (nn_entry->flow) list_del(&nn_entry->list_head); kfree(nn_entry); + } else if (nn_entry && !neigh_invalid && override) { + mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : + NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; + nfp_tun_link_predt_entries(app, nn_entry); + nfp_flower_xmit_tun_conf(app, mtype, neigh_size, + nn_entry->payload, + GFP_ATOMIC); } spin_unlock_bh(&priv->predt_lock); @@ -610,7 +618,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, dst_release(dst); } - nfp_tun_write_neigh(n->dev, app, &flow6, n, true); + nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false); #else return NOTIFY_DONE; #endif /* CONFIG_IPV6 */ @@ -633,7 +641,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, ip_rt_put(rt); } - nfp_tun_write_neigh(n->dev, app, &flow4, n, false); + nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false); } #else return NOTIFY_DONE; @@ -676,7 +684,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) ip_rt_put(rt); if (!n) goto fail_rcu_unlock; - nfp_tun_write_neigh(n->dev, app, &flow, n, false); + nfp_tun_write_neigh(n->dev, app, &flow, n, false, true); neigh_release(n); rcu_read_unlock(); return; @@ -718,7 +726,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) if (!n) goto fail_rcu_unlock; - nfp_tun_write_neigh(n->dev, app, &flow, n, true); + nfp_tun_write_neigh(n->dev, app, &flow, n, true, true); neigh_release(n); rcu_read_unlock(); return; diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c index e509d6dcba5c..805071d64a20 100644 --- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c @@ -125,17 +125,18 @@ nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, static int nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring, - unsigned int nr_frags, struct sk_buff *skb) + struct sk_buff *skb) { unsigned int n_descs, wr_p, nop_slots; const skb_frag_t *frag, *fend; struct nfp_nfdk_tx_desc *txd; + unsigned int nr_frags; unsigned int wr_idx; int err; recount_descs: n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb)); - + nr_frags = skb_shinfo(skb)->nr_frags; frag = skb_shinfo(skb)->frags; fend = frag + nr_frags; for (; frag < fend; frag++) @@ -281,10 +282,13 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev) if (unlikely((int)metadata < 0)) goto err_flush; - nr_frags = skb_shinfo(skb)->nr_frags; - if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb)) + if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb)) goto err_flush; + /* nr_frags will change after skb_linearize so we get nr_frags after + * nfp_nfdk_tx_maybe_close_block function + */ + nr_frags = skb_shinfo(skb)->nr_frags; /* DMA map all */ wr_idx = D_IDX(tx_ring, tx_ring->wr_p); txd = &tx_ring->ktxds[wr_idx]; @@ -310,7 +314,16 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev) /* FIELD_PREP() implicitly truncates to chunk */ dma_len -= 1; - dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) | + + /* We will do our best to pass as much data as we can in descriptor + * and we need to make sure the first descriptor includes whole head + * since there is limitation in firmware side. Sometimes the value of + * dma_len bitwise and NFDK_DESC_TX_DMA_LEN_HEAD will less than + * headlen. + */ + dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, + dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ? + NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) | FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type); txd->dma_len_type = cpu_to_le16(dlen_type); @@ -925,7 +938,9 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, /* FIELD_PREP() implicitly truncates to chunk */ dma_len -= 1; - dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) | + dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, + dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ? + NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) | FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type); txd->dma_len_type = cpu_to_le16(dlen_type); @@ -1303,7 +1318,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, skb_push(skb, 4)); } - if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb)) + if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb)) goto err_free; /* DMA map all */ @@ -1328,7 +1343,9 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, txbuf++; dma_len -= 1; - dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) | + dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, + dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ? + NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) | FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type); txd->dma_len_type = cpu_to_le16(dlen_type); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 3098d6672192..1b7fdb4f056b 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4190,7 +4190,6 @@ static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, struct sk_buff *skb, u32 *opts) { - u32 transport_offset = (u32)skb_transport_offset(skb); struct skb_shared_info *shinfo = skb_shinfo(skb); u32 mss = shinfo->gso_size; @@ -4207,7 +4206,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, WARN_ON_ONCE(1); } - opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT; opts[1] |= mss << TD1_MSS_SHIFT; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 ip_protocol; @@ -4235,7 +4234,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, else WARN_ON_ONCE(1); - opts[1] |= transport_offset << TCPHO_SHIFT; + opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT; } else { unsigned int padto = rtl_quirk_packet_padto(tp, skb); @@ -4402,14 +4401,13 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { - int transport_offset = skb_transport_offset(skb); struct rtl8169_private *tp = netdev_priv(dev); if (skb_is_gso(skb)) { if (tp->mac_version == RTL_GIGA_MAC_VER_34) features = rtl8168evl_fix_tso(skb, features); - if (transport_offset > GTTCPHO_MAX && + if (skb_transport_offset(skb) > GTTCPHO_MAX && rtl_chip_supports_csum_v2(tp)) features &= ~NETIF_F_ALL_TSO; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -4420,7 +4418,7 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb, if (rtl_quirk_packet_padto(tp, skb)) features &= ~NETIF_F_CSUM_MASK; - if (transport_offset > TCPHO_MAX && + if (skb_transport_offset(skb) > TCPHO_MAX && rtl_chip_supports_csum_v2(tp)) features &= ~NETIF_F_CSUM_MASK; } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 186cb28c03bd..8b62ce21aff3 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1932,7 +1932,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) efx_update_sw_stats(efx, stats); out: + /* releasing a DMA coherent buffer with BH disabled can panic */ + spin_unlock_bh(&efx->stats_lock); efx_nic_free_buffer(efx, &stats_buf); + spin_lock_bh(&efx->stats_lock); return rc; } diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 7f5aa4a8c451..92550c7e85ce 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -408,8 +408,9 @@ fail1: static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) { struct pci_dev *dev = efx->pci_dev; + struct efx_ef10_nic_data *nic_data = efx->nic_data; unsigned int vfs_assigned = pci_vfs_assigned(dev); - int rc = 0; + int i, rc = 0; if (vfs_assigned && !force) { netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " @@ -417,10 +418,13 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) return -EBUSY; } - if (!vfs_assigned) + if (!vfs_assigned) { + for (i = 0; i < efx->vf_count; i++) + nic_data->vf[i].pci_dev = NULL; pci_disable_sriov(dev); - else + } else { rc = -EBUSY; + } efx_ef10_sriov_free_vf_vswitching(efx); efx->vf_count = 0; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 4625f85acab2..10ad0b93d283 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1100,7 +1100,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb) tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type); if (tx_queue && tx_queue->timestamping) { + /* This code invokes normal driver TX code which is always + * protected from softirqs when called from generic TX code, + * which in turn disables preemption. Look at __dev_queue_xmit + * which uses rcu_read_lock_bh disabling preemption for RCU + * plus disabling softirqs. We do not need RCU reader + * protection here. + * + * Although it is theoretically safe for current PTP TX/RX code + * running without disabling softirqs, there are three good + * reasond for doing so: + * + * 1) The code invoked is mainly implemented for non-PTP + * packets and it is always executed with softirqs + * disabled. + * 2) This being a single PTP packet, better to not + * interrupt its processing by softirqs which can lead + * to high latencies. + * 3) netdev_xmit_more checks preemption is disabled and + * triggers a BUG_ON if not. + */ + local_bh_disable(); efx_enqueue_skb(tx_queue, skb); + local_bh_enable(); } else { WARN_ONCE(1, "PTP channel has no timestamped tx queue\n"); dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index a0654e88444c..0329caf63279 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1515,14 +1515,14 @@ static void epic_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct epic_private *ep = netdev_priv(dev); + unregister_netdev(dev); dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); - unregister_netdev(dev); pci_iounmap(pdev, ep->ioaddr); - pci_release_regions(pdev); free_netdev(dev); + pci_release_regions(pdev); pci_disable_device(pdev); /* pci_power_off(pdev, -1); */ } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index bc91fd867dcd..358fc26f8d1f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -361,6 +361,7 @@ bypass_clk_reset_gpio: data->fix_mac_speed = tegra_eqos_fix_speed; data->init = tegra_eqos_init; data->bsp_priv = eqos; + data->sph_disable = 1; err = tegra_eqos_init(pdev, eqos); if (err < 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c index 9a6d819b84ae..378b4dd826bb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c @@ -273,7 +273,8 @@ static int ingenic_mac_probe(struct platform_device *pdev) mac->tx_delay = tx_delay_ps * 1000; } else { dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps); - return -EINVAL; + ret = -EINVAL; + goto err_remove_config_dt; } } @@ -283,7 +284,8 @@ static int ingenic_mac_probe(struct platform_device *pdev) mac->rx_delay = rx_delay_ps * 1000; } else { dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps); - return -EINVAL; + ret = -EINVAL; + goto err_remove_config_dt; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 38fe77d1035e..3fe720c5dc9f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -298,6 +298,11 @@ static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, *art_time = ns; } +static int stmmac_cross_ts_isr(struct stmmac_priv *priv) +{ + return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE); +} + static int intel_crosststamp(ktime_t *device, struct system_counterval_t *system, void *ctx) @@ -313,8 +318,6 @@ static int intel_crosststamp(ktime_t *device, u32 num_snapshot; u32 gpio_value; u32 acr_value; - int ret; - u32 v; int i; if (!boot_cpu_has(X86_FEATURE_ART)) @@ -328,6 +331,8 @@ static int intel_crosststamp(ktime_t *device, if (priv->plat->ext_snapshot_en) return -EBUSY; + priv->plat->int_snapshot_en = 1; + mutex_lock(&priv->aux_ts_lock); /* Enable Internal snapshot trigger */ acr_value = readl(ptpaddr + PTP_ACR); @@ -347,6 +352,7 @@ static int intel_crosststamp(ktime_t *device, break; default: mutex_unlock(&priv->aux_ts_lock); + priv->plat->int_snapshot_en = 0; return -EINVAL; } writel(acr_value, ptpaddr + PTP_ACR); @@ -368,13 +374,12 @@ static int intel_crosststamp(ktime_t *device, gpio_value |= GMAC_GPO1; writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); - /* Poll for time sync operation done */ - ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v, - (v & GMAC_INT_TSIE), 100, 10000); - - if (ret == -ETIMEDOUT) { - pr_err("%s: Wait for time sync operation timeout\n", __func__); - return ret; + /* Time sync done Indication - Interrupt method */ + if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait, + stmmac_cross_ts_isr(priv), + HZ / 100)) { + priv->plat->int_snapshot_en = 0; + return -ETIMEDOUT; } num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & @@ -392,6 +397,7 @@ static int intel_crosststamp(ktime_t *device, } system->cycles *= intel_priv->crossts_adj; + priv->plat->int_snapshot_en = 0; return 0; } @@ -576,6 +582,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->has_crossts = true; plat->crosststamp = intel_crosststamp; + plat->int_snapshot_en = 0; /* Setup MSI vector offset specific to Intel mGbE controller */ plat->msi_mac_vec = 29; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index 6ff88df58767..d42e1afb6521 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -576,32 +576,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv) } } - ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks); - if (ret) { - dev_err(plat->dev, "failed to enable clks, err = %d\n", ret); - return ret; - } - - ret = clk_prepare_enable(plat->rmii_internal_clk); - if (ret) { - dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret); - goto err_clk; - } - return 0; - -err_clk: - clk_bulk_disable_unprepare(variant->num_clks, plat->clks); - return ret; -} - -static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv) -{ - struct mediatek_dwmac_plat_data *plat = priv; - const struct mediatek_dwmac_variant *variant = plat->variant; - - clk_disable_unprepare(plat->rmii_internal_clk); - clk_bulk_disable_unprepare(variant->num_clks, plat->clks); } static int mediatek_dwmac_clks_config(void *priv, bool enabled) @@ -643,7 +618,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev, plat->addr64 = priv_plat->variant->dma_bit_mask; plat->bsp_priv = priv_plat; plat->init = mediatek_dwmac_init; - plat->exit = mediatek_dwmac_exit; plat->clks_config = mediatek_dwmac_clks_config; if (priv_plat->variant->dwmac_fix_mac_speed) plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed; @@ -712,13 +686,33 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) mediatek_dwmac_common_data(pdev, plat_dat, priv_plat); mediatek_dwmac_init(pdev, priv_plat); + ret = mediatek_dwmac_clks_config(priv_plat, true); + if (ret) + goto err_remove_config_dt; + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) { - stmmac_remove_config_dt(pdev, plat_dat); - return ret; - } + if (ret) + goto err_drv_probe; return 0; + +err_drv_probe: + mediatek_dwmac_clks_config(priv_plat, false); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; +} + +static int mediatek_dwmac_remove(struct platform_device *pdev) +{ + struct mediatek_dwmac_plat_data *priv_plat = get_stmmac_bsp_priv(&pdev->dev); + int ret; + + ret = stmmac_pltfr_remove(pdev); + mediatek_dwmac_clks_config(priv_plat, false); + + return ret; } static const struct of_device_id mediatek_dwmac_match[] = { @@ -733,7 +727,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match); static struct platform_driver mediatek_dwmac_driver = { .probe = mediatek_dwmac_probe, - .remove = stmmac_pltfr_remove, + .remove = mediatek_dwmac_remove, .driver = { .name = "dwmac-mediatek", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 462ca7ed095a..71dad409f78b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -150,7 +150,8 @@ #define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ GMAC_INT_PCS_ANE) -#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN) +#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \ + GMAC_INT_TSIE) enum dwmac4_irq_status { time_stamp_irq = 0x00001000, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index fd41db65fe1d..d8f1fbc25bdd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -23,6 +23,7 @@ static void dwmac4_core_init(struct mac_device_info *hw, struct net_device *dev) { + struct stmmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONFIG); @@ -58,6 +59,9 @@ static void dwmac4_core_init(struct mac_device_info *hw, value |= GMAC_INT_FPE_EN; writel(value, ioaddr + GMAC_INT_EN); + + if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE) + init_waitqueue_head(&priv->tstamp_busy_wait); } static void dwmac4_rx_queue_enable(struct mac_device_info *hw, @@ -219,6 +223,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) if (queue == 0 || queue == 4) { value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; value |= MTL_RXQ_DMA_Q04MDMACH(chan); + } else if (queue > 4) { + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4); + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4); } else { value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 57970ae2178d..f9e83964aa7e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -266,6 +266,7 @@ struct stmmac_priv { rwlock_t ptp_lock; /* Protects auxiliary snapshot registers from concurrent access. */ struct mutex aux_ts_lock; + wait_queue_head_t tstamp_busy_wait; void __iomem *mmcaddr; void __iomem *ptpaddr; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index abfb3cd5958d..9c3055ee2608 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -803,14 +803,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, netdev_warn(priv->dev, "Setting EEE tx-lpi is not supported\n"); - if (priv->hw->xpcs) { - ret = xpcs_config_eee(priv->hw->xpcs, - priv->plat->mult_fact_100ns, - edata->eee_enabled); - if (ret) - return ret; - } - if (!edata->eee_enabled) stmmac_disable_eee_mode(priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 92d32940aff0..764832f4dae1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -179,6 +179,11 @@ static void timestamp_interrupt(struct stmmac_priv *priv) u64 ptp_time; int i; + if (priv->plat->int_snapshot_en) { + wake_up(&priv->tstamp_busy_wait); + return; + } + tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE; if (!tsync_int) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d1a7cf4567bc..c5f33630e771 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -834,19 +834,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) struct timespec64 now; u32 sec_inc = 0; u64 temp = 0; - int ret; if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; - ret = clk_prepare_enable(priv->plat->clk_ptp_ref); - if (ret < 0) { - netdev_warn(priv->dev, - "failed to enable PTP reference clock: %pe\n", - ERR_PTR(ret)); - return ret; - } - stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); priv->systime_flags = systime_flags; @@ -3270,6 +3261,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) stmmac_mmc_setup(priv); + if (ptp_register) { + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + } + ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_info(priv->dev, "PTP not supported by HW\n"); @@ -7213,8 +7212,6 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__); pm_runtime_get_sync(dev); - pm_runtime_disable(dev); - pm_runtime_put_noidle(dev); stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); @@ -7241,6 +7238,9 @@ int stmmac_dvr_remove(struct device *dev) mutex_destroy(&priv->lock); bitmap_free(priv->af_xdp_zc_qps); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + return 0; } EXPORT_SYMBOL_GPL(stmmac_dvr_remove); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 11e1055e8260..9f5cac4000da 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -815,7 +815,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) if (ret) return ret; - stmmac_init_tstamp_counter(priv, priv->systime_flags); + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) { + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + return ret; + } } return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index e45fb191d8e6..4d11980dcd64 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -175,11 +175,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp, struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); void __iomem *ptpaddr = priv->ptpaddr; - void __iomem *ioaddr = priv->hw->pcsr; struct stmmac_pps_cfg *cfg; - u32 intr_value, acr_value; int ret = -EOPNOTSUPP; unsigned long flags; + u32 acr_value; switch (rq->type) { case PTP_CLK_REQ_PEROUT: @@ -213,19 +212,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp, netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n", priv->plat->ext_snapshot_num >> PTP_ACR_ATSEN_SHIFT); - /* Enable Timestamp Interrupt */ - intr_value = readl(ioaddr + GMAC_INT_EN); - intr_value |= GMAC_INT_TSIE; - writel(intr_value, ioaddr + GMAC_INT_EN); - } else { netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n", priv->plat->ext_snapshot_num >> PTP_ACR_ATSEN_SHIFT); - /* Disable Timestamp Interrupt */ - intr_value = readl(ioaddr + GMAC_INT_EN); - intr_value &= ~GMAC_INT_TSIE; - writel(intr_value, ioaddr + GMAC_INT_EN); } writel(acr_value, ptpaddr + PTP_ACR); mutex_unlock(&priv->aux_ts_lock); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 77e5dffb558f..8594ee839628 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -545,43 +545,24 @@ static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs) static void display_link_mode(struct happy_meal *hp, void __iomem *tregs) { - printk(KERN_INFO "%s: Link is up using ", hp->dev->name); - if (hp->tcvr_type == external) - printk("external "); - else - printk("internal "); - printk("transceiver at "); hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); - if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) { - if (hp->sw_lpa & LPA_100FULL) - printk("100Mb/s, Full Duplex.\n"); - else - printk("100Mb/s, Half Duplex.\n"); - } else { - if (hp->sw_lpa & LPA_10FULL) - printk("10Mb/s, Full Duplex.\n"); - else - printk("10Mb/s, Half Duplex.\n"); - } + + netdev_info(hp->dev, + "Link is up using %s transceiver at %dMb/s, %s Duplex.\n", + hp->tcvr_type == external ? "external" : "internal", + hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10, + hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half"); } static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs) { - printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name); - if (hp->tcvr_type == external) - printk("external "); - else - printk("internal "); - printk("transceiver at "); hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); - if (hp->sw_bmcr & BMCR_SPEED100) - printk("100Mb/s, "); - else - printk("10Mb/s, "); - if (hp->sw_bmcr & BMCR_FULLDPLX) - printk("Full Duplex.\n"); - else - printk("Half Duplex.\n"); + + netdev_info(hp->dev, + "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n", + hp->tcvr_type == external ? "external" : "internal", + hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10, + hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half"); } static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index fb92d4c1547d..f4a6b590a1e3 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2467,7 +2467,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) port->port_id, ret); goto dl_port_unreg; } - devlink_port_type_eth_set(dl_port, port->ndev); } devlink_register(common->devlink); return ret; @@ -2511,6 +2510,7 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) { struct device *dev = common->dev; + struct devlink_port *dl_port; struct am65_cpsw_port *port; int ret = 0, i; @@ -2527,6 +2527,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) return ret; } + ret = am65_cpsw_nuss_register_devlink(common); + if (ret) + return ret; + for (i = 0; i < common->port_num; i++) { port = &common->ports[i]; @@ -2539,25 +2543,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) i, ret); goto err_cleanup_ndev; } + + dl_port = &port->devlink_port; + devlink_port_type_eth_set(dl_port, port->ndev); } ret = am65_cpsw_register_notifiers(common); if (ret) goto err_cleanup_ndev; - ret = am65_cpsw_nuss_register_devlink(common); - if (ret) - goto clean_unregister_notifiers; - /* can't auto unregister ndev using devm_add_action() due to * devres release sequence in DD core for DMA */ return 0; -clean_unregister_notifiers: - am65_cpsw_unregister_notifiers(common); + err_cleanup_ndev: am65_cpsw_nuss_cleanup_ndev(common); + am65_cpsw_unregister_devlink(common); return ret; } diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index 3233d145fd87..495e85abe50b 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -214,7 +214,7 @@ struct ipa_init_modem_driver_req { /* The response to a IPA_QMI_INIT_DRIVER request begins with a standard * QMI response, but contains other information as well. Currently we - * simply wait for the the INIT_DRIVER transaction to complete and + * simply wait for the INIT_DRIVER transaction to complete and * ignore any other data that might be returned. */ struct ipa_init_modem_driver_rsp { diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 817577e713d7..f354fad05714 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -243,6 +243,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) #define DEFAULT_SEND_SCI true #define DEFAULT_ENCRYPT false #define DEFAULT_ENCODING_SA 0 +#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) static bool send_sci(const struct macsec_secy *secy) { @@ -1697,7 +1698,7 @@ static bool validate_add_rxsa(struct nlattr **attrs) return false; if (attrs[MACSEC_SA_ATTR_PN] && - *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0) + nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) return false; if (attrs[MACSEC_SA_ATTR_ACTIVE]) { @@ -1753,7 +1754,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) } pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; - if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { + if (tb_sa[MACSEC_SA_ATTR_PN] && + nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); rtnl_unlock(); @@ -1769,7 +1771,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), - MACSEC_SA_ATTR_SALT); + MACSEC_SALT_LEN); rtnl_unlock(); return -EINVAL; } @@ -1842,7 +1844,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) return 0; cleanup: - kfree(rx_sa); + macsec_rxsa_put(rx_sa); rtnl_unlock(); return err; } @@ -1939,7 +1941,7 @@ static bool validate_add_txsa(struct nlattr **attrs) if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) return false; - if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) + if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) return false; if (attrs[MACSEC_SA_ATTR_ACTIVE]) { @@ -2011,7 +2013,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), - MACSEC_SA_ATTR_SALT); + MACSEC_SALT_LEN); rtnl_unlock(); return -EINVAL; } @@ -2085,7 +2087,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) cleanup: secy->operational = was_operational; - kfree(tx_sa); + macsec_txsa_put(tx_sa); rtnl_unlock(); return err; } @@ -2293,7 +2295,7 @@ static bool validate_upd_sa(struct nlattr **attrs) if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) return false; - if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) + if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) return false; if (attrs[MACSEC_SA_ATTR_ACTIVE]) { @@ -3745,9 +3747,6 @@ static int macsec_changelink_common(struct net_device *dev, secy->operational = tx_sa && tx_sa->active; } - if (data[IFLA_MACSEC_WINDOW]) - secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); - if (data[IFLA_MACSEC_ENCRYPT]) tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); @@ -3793,6 +3792,16 @@ static int macsec_changelink_common(struct net_device *dev, } } + if (data[IFLA_MACSEC_WINDOW]) { + secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); + + /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window + * for XPN cipher suites */ + if (secy->xpn && + secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) + return -EINVAL; + } + return 0; } @@ -3822,7 +3831,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], ret = macsec_changelink_common(dev, data); if (ret) - return ret; + goto cleanup; /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 4cfd05c15aee..d25fbb9caeba 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -896,7 +896,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs, */ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS); if (ret < 0) - return false; + return ret; if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) { int speed_value; diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c index 457896337505..0f1e617a26c9 100644 --- a/drivers/net/phy/ax88796b.c +++ b/drivers/net/phy/ax88796b.c @@ -88,8 +88,10 @@ static void asix_ax88772a_link_change_notify(struct phy_device *phydev) /* Reset PHY, otherwise MII_LPA will provide outdated information. * This issue is reproducible only with some link partner PHYs */ - if (phydev->state == PHY_NOLINK && phydev->drv->soft_reset) - phydev->drv->soft_reset(phydev); + if (phydev->state == PHY_NOLINK) { + phy_init_hw(phydev); + phy_start_aneg(phydev); + } } static struct phy_driver asix_driver[] = { diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index e6ad3a494d32..8549e0e356c9 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -229,9 +229,7 @@ static int dp83822_config_intr(struct phy_device *phydev) if (misr_status < 0) return misr_status; - misr_status |= (DP83822_RX_ERR_HF_INT_EN | - DP83822_FALSE_CARRIER_HF_INT_EN | - DP83822_LINK_STAT_INT_EN | + misr_status |= (DP83822_LINK_STAT_INT_EN | DP83822_ENERGY_DET_INT_EN | DP83822_LINK_QUAL_INT_EN); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index ef62f357b76d..8d3ee3a6495b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -31,6 +31,7 @@ #include <linux/io.h> #include <linux/uaccess.h> #include <linux/atomic.h> +#include <linux/suspend.h> #include <net/netlink.h> #include <net/genetlink.h> #include <net/sock.h> @@ -976,6 +977,28 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) struct phy_driver *drv = phydev->drv; irqreturn_t ret; + /* Wakeup interrupts may occur during a system sleep transition. + * Postpone handling until the PHY has resumed. + */ + if (IS_ENABLED(CONFIG_PM_SLEEP) && phydev->irq_suspended) { + struct net_device *netdev = phydev->attached_dev; + + if (netdev) { + struct device *parent = netdev->dev.parent; + + if (netdev->wol_enabled) + pm_system_wakeup(); + else if (device_may_wakeup(&netdev->dev)) + pm_wakeup_dev_event(&netdev->dev, 0, true); + else if (parent && device_may_wakeup(parent)) + pm_wakeup_dev_event(parent, 0, true); + } + + phydev->irq_rerun = 1; + disable_irq_nosync(irq); + return IRQ_HANDLED; + } + mutex_lock(&phydev->lock); ret = drv->handle_interrupt(phydev); mutex_unlock(&phydev->lock); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 431a8719c635..46acddd865a7 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -278,6 +278,15 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev) if (phydev->mac_managed_pm) return 0; + /* Wakeup interrupts may occur during the system sleep transition when + * the PHY is inaccessible. Set flag to postpone handling until the PHY + * has resumed. Wait for concurrent interrupt handler to complete. + */ + if (phy_interrupt_is_valid(phydev)) { + phydev->irq_suspended = 1; + synchronize_irq(phydev->irq); + } + /* We must stop the state machine manually, otherwise it stops out of * control, possibly with the phydev->lock held. Upon resume, netdev * may call phy routines that try to grab the same lock, and that may @@ -315,6 +324,20 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev) if (ret < 0) return ret; no_resume: + if (phy_interrupt_is_valid(phydev)) { + phydev->irq_suspended = 0; + synchronize_irq(phydev->irq); + + /* Rerun interrupts which were postponed by phy_interrupt() + * because they occurred during the system sleep transition. + */ + if (phydev->irq_rerun) { + phydev->irq_rerun = 0; + enable_irq(phydev->irq); + irq_wake_thread(phydev->irq, phydev); + } + } + if (phydev->attached_dev && phydev->adjust_link) phy_start_machine(phydev); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 9a5d5a10560f..e7b0e12cc75b 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -2516,7 +2516,7 @@ static int sfp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sfp); - err = devm_add_action(sfp->dev, sfp_cleanup, sfp); + err = devm_add_action_or_reset(sfp->dev, sfp_cleanup, sfp); if (err < 0) return err; diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index ff22b6b1c686..36803d932dff 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -450,6 +450,7 @@ static int bcm5421_init(struct mii_phy* phy) int can_low_power = 1; if (np == NULL || of_get_property(np, "no-autolowpower", NULL)) can_low_power = 0; + of_node_put(np); if (can_low_power) { /* Enable automatic low-power */ sungem_phy_write(phy, 0x1c, 0x9002); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 87a635aac008..259b2b84b2b3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -273,6 +273,12 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, } } +static void tun_napi_enable(struct tun_file *tfile) +{ + if (tfile->napi_enabled) + napi_enable(&tfile->napi); +} + static void tun_napi_disable(struct tun_file *tfile) { if (tfile->napi_enabled) @@ -634,7 +640,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun = rtnl_dereference(tfile->tun); if (tun && clean) { - tun_napi_disable(tfile); + if (!tfile->detached) + tun_napi_disable(tfile); tun_napi_del(tfile); } @@ -653,8 +660,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean) if (clean) { RCU_INIT_POINTER(tfile->tun, NULL); sock_put(&tfile->sk); - } else + } else { tun_disable_queue(tun, tfile); + tun_napi_disable(tfile); + } synchronize_net(); tun_flow_delete_by_queue(tun, tun->numqueues + 1); @@ -727,6 +736,7 @@ static void tun_detach_all(struct net_device *dev) sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { + tun_napi_del(tfile); tun_enable_queue(tfile); tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); @@ -807,6 +817,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, if (tfile->detached) { tun_enable_queue(tfile); + tun_napi_enable(tfile); } else { sock_hold(&tfile->sk); tun_napi_init(tun, tfile, napi, napi_frags); diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index 2c81236c6c7c..45d3cc5cc355 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h @@ -126,8 +126,7 @@ AX_MEDIUM_RE) #define AX88772_MEDIUM_DEFAULT \ - (AX_MEDIUM_FD | AX_MEDIUM_RFC | \ - AX_MEDIUM_TFC | AX_MEDIUM_PS | \ + (AX_MEDIUM_FD | AX_MEDIUM_PS | \ AX_MEDIUM_AC | AX_MEDIUM_RE) /* AX88772 & AX88178 RX_CTL values */ diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 632fa6c1d5e3..b4a1b7abcfc9 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -431,6 +431,7 @@ void asix_adjust_link(struct net_device *netdev) asix_write_medium_mode(dev, mode, 0); phy_print_status(phydev); + usbnet_link_change(dev, phydev->link, 0); } int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 4704ed6f00ef..ac2d400d1d6c 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1472,6 +1472,42 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) * are bundled into this buffer and where we can find an array of * per-packet metadata (which contains elements encoded into u16). */ + + /* SKB contents for current firmware: + * <packet 1> <padding> + * ... + * <packet N> <padding> + * <per-packet metadata entry 1> <dummy header> + * ... + * <per-packet metadata entry N> <dummy header> + * <padding2> <rx_hdr> + * + * where: + * <packet N> contains pkt_len bytes: + * 2 bytes of IP alignment pseudo header + * packet received + * <per-packet metadata entry N> contains 4 bytes: + * pkt_len and fields AX_RXHDR_* + * <padding> 0-7 bytes to terminate at + * 8 bytes boundary (64-bit). + * <padding2> 4 bytes to make rx_hdr terminate at + * 8 bytes boundary (64-bit) + * <dummy-header> contains 4 bytes: + * pkt_len=0 and AX_RXHDR_DROP_ERR + * <rx-hdr> contains 4 bytes: + * pkt_cnt and hdr_off (offset of + * <per-packet metadata entry 1>) + * + * pkt_cnt is number of entrys in the per-packet metadata. + * In current firmware there is 2 entrys per packet. + * The first points to the packet and the + * second is a dummy header. + * This was done probably to align fields in 64-bit and + * maintain compatibility with old firmware. + * This code assumes that <dummy header> and <padding2> are + * optional. + */ + if (skb->len < 4) return 0; skb_trim(skb, skb->len - 4); @@ -1485,51 +1521,66 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* Make sure that the bounds of the metadata array are inside the SKB * (and in front of the counter at the end). */ - if (pkt_cnt * 2 + hdr_off > skb->len) + if (pkt_cnt * 4 + hdr_off > skb->len) return 0; pkt_hdr = (u32 *)(skb->data + hdr_off); /* Packets must not overlap the metadata array */ skb_trim(skb, hdr_off); - for (; ; pkt_cnt--, pkt_hdr++) { + for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) { + u16 pkt_len_plus_padd; u16 pkt_len; le32_to_cpus(pkt_hdr); pkt_len = (*pkt_hdr >> 16) & 0x1fff; + pkt_len_plus_padd = (pkt_len + 7) & 0xfff8; - if (pkt_len > skb->len) + /* Skip dummy header used for alignment + */ + if (pkt_len == 0) + continue; + + if (pkt_len_plus_padd > skb->len) return 0; /* Check CRC or runt packet */ - if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) && - pkt_len >= 2 + ETH_HLEN) { - bool last = (pkt_cnt == 0); - - if (last) { - ax_skb = skb; - } else { - ax_skb = skb_clone(skb, GFP_ATOMIC); - if (!ax_skb) - return 0; - } - ax_skb->len = pkt_len; - /* Skip IP alignment pseudo header */ - skb_pull(ax_skb, 2); - skb_set_tail_pointer(ax_skb, ax_skb->len); - ax_skb->truesize = pkt_len + sizeof(struct sk_buff); - ax88179_rx_checksum(ax_skb, pkt_hdr); + if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) || + pkt_len < 2 + ETH_HLEN) { + dev->net->stats.rx_errors++; + skb_pull(skb, pkt_len_plus_padd); + continue; + } - if (last) - return 1; + /* last packet */ + if (pkt_len_plus_padd == skb->len) { + skb_trim(skb, pkt_len); - usbnet_skb_return(dev, ax_skb); + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + + skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd); + ax88179_rx_checksum(skb, pkt_hdr); + return 1; } - /* Trim this packet away from the SKB */ - if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8)) + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (!ax_skb) return 0; + skb_trim(ax_skb, pkt_len); + + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + + skb->truesize = pkt_len_plus_padd + + SKB_DATA_ALIGN(sizeof(struct sk_buff)); + ax88179_rx_checksum(ax_skb, pkt_hdr); + usbnet_skb_return(dev, ax_skb); + + skb_pull(skb, pkt_len_plus_padd); } + + return 0; } static struct sk_buff * diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index e7fe9c0f63a9..1a376ed45d7a 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -781,7 +781,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id intf->altsetting->desc.bInterfaceNumber, 1)) { dev_err(dev, "Can't set altsetting 1.\n"); ret = -EIO; - goto fail_mem;; + goto fail_mem; } netdev = alloc_etherdev(sizeof(struct catc)); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 7389d6ef8569..0f6efaabaa32 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -32,7 +32,7 @@ #define NETNEXT_VERSION "12" /* Information for net */ -#define NET_VERSION "12" +#define NET_VERSION "13" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -2156,7 +2156,7 @@ static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb) } static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, - struct sk_buff *skb, u32 len, u32 transport_offset) + struct sk_buff *skb, u32 len) { u32 mss = skb_shinfo(skb)->gso_size; u32 opts1, opts2 = 0; @@ -2167,6 +2167,8 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, opts1 = len | TX_FS | TX_LS; if (mss) { + u32 transport_offset = (u32)skb_transport_offset(skb); + if (transport_offset > GTTCPHO_MAX) { netif_warn(tp, tx_err, tp->netdev, "Invalid transport offset 0x%x for TSO\n", @@ -2197,6 +2199,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, opts1 |= transport_offset << GTTCPHO_SHIFT; opts2 |= min(mss, MSS_MAX) << MSS_SHIFT; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u32 transport_offset = (u32)skb_transport_offset(skb); u8 ip_protocol; if (transport_offset > TCPHO_MAX) { @@ -2260,7 +2263,6 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) struct tx_desc *tx_desc; struct sk_buff *skb; unsigned int len; - u32 offset; skb = __skb_dequeue(&skb_head); if (!skb) @@ -2276,9 +2278,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) tx_data = tx_agg_align(tx_data); tx_desc = (struct tx_desc *)tx_data; - offset = (u32)skb_transport_offset(skb); - - if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) { + if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) { r8152_csum_workaround(tp, skb, &skb_head); continue; } @@ -2759,9 +2759,9 @@ rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, { u32 mss = skb_shinfo(skb)->gso_size; int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; - int offset = skb_transport_offset(skb); - if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset) + if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && + skb_transport_offset(skb) > max_offset) features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) features &= ~NETIF_F_GSO_MASK; @@ -5917,7 +5917,8 @@ static void r8153_enter_oob(struct r8152 *tp) wait_oob_link_list_ready(tp); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); switch (tp->version) { case RTL_VER_03: @@ -5953,6 +5954,10 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data |= MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); @@ -6555,6 +6560,9 @@ static void rtl8156_down(struct r8152 *tp) rtl_disable(tp); rtl_reset_bmu(tp); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); + /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. @@ -6565,6 +6573,10 @@ static void rtl8156_down(struct r8152 *tp) ocp_data |= NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data |= MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + rtl_rx_vlan_en(tp, true); rxdy_gated_en(tp, false); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 1cb6dab3e2d0..78a92751ce4c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -2004,7 +2004,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); if (size) { - buf = kmalloc(size, GFP_KERNEL); + buf = kmalloc(size, GFP_NOIO); if (!buf) goto out; } @@ -2036,7 +2036,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); if (data) { - buf = kmemdup(data, size, GFP_KERNEL); + buf = kmemdup(data, size, GFP_NOIO); if (!buf) goto out; } else { @@ -2137,7 +2137,7 @@ static void usbnet_async_cmd_cb(struct urb *urb) int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, const void *data, u16 size) { - struct usb_ctrlrequest *req = NULL; + struct usb_ctrlrequest *req; struct urb *urb; int err = -ENOMEM; void *buf = NULL; @@ -2155,7 +2155,7 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, if (!buf) { netdev_err(dev->net, "Error allocating buffer" " in %s!\n", __func__); - goto fail_free; + goto fail_free_urb; } } @@ -2179,14 +2179,21 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, if (err < 0) { netdev_err(dev->net, "Error submitting the control" " message: status=%d\n", err); - goto fail_free; + goto fail_free_all; } return 0; +fail_free_all: + kfree(req); fail_free_buf: kfree(buf); -fail_free: - kfree(req); + /* + * avoid a double free + * needed because the flag can be set only + * after filling the URB + */ + urb->transfer_flags = 0; +fail_free_urb: usb_free_urb(urb); fail: return err; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 969a67970e71..ec8e1b3108c3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -242,9 +242,15 @@ struct virtnet_info { /* Packet virtio header size */ u8 hdr_len; - /* Work struct for refilling if we run low on memory. */ + /* Work struct for delayed refilling if we run low on memory. */ struct delayed_work refill; + /* Is delayed refill enabled? */ + bool refill_enabled; + + /* The lock to synchronize the access to refill_enabled */ + spinlock_t refill_lock; + /* Work struct for config space updates */ struct work_struct config_work; @@ -348,6 +354,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) return p; } +static void enable_delayed_refill(struct virtnet_info *vi) +{ + spin_lock_bh(&vi->refill_lock); + vi->refill_enabled = true; + spin_unlock_bh(&vi->refill_lock); +} + +static void disable_delayed_refill(struct virtnet_info *vi) +{ + spin_lock_bh(&vi->refill_lock); + vi->refill_enabled = false; + spin_unlock_bh(&vi->refill_lock); +} + static void virtqueue_napi_schedule(struct napi_struct *napi, struct virtqueue *vq) { @@ -1527,8 +1547,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { - if (!try_fill_recv(vi, rq, GFP_ATOMIC)) - schedule_delayed_work(&vi->refill, 0); + if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { + spin_lock(&vi->refill_lock); + if (vi->refill_enabled) + schedule_delayed_work(&vi->refill, 0); + spin_unlock(&vi->refill_lock); + } } u64_stats_update_begin(&rq->stats.syncp); @@ -1651,6 +1675,8 @@ static int virtnet_open(struct net_device *dev) struct virtnet_info *vi = netdev_priv(dev); int i, err; + enable_delayed_refill(vi); + for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ @@ -2033,6 +2059,8 @@ static int virtnet_close(struct net_device *dev) struct virtnet_info *vi = netdev_priv(dev); int i; + /* Make sure NAPI doesn't schedule refill work */ + disable_delayed_refill(vi); /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); @@ -2792,6 +2820,8 @@ static int virtnet_restore_up(struct virtio_device *vdev) virtio_device_ready(vdev); + enable_delayed_refill(vi); + if (netif_running(vi->dev)) { err = virtnet_open(vi->dev); if (err) @@ -3535,6 +3565,7 @@ static int virtnet_probe(struct virtio_device *vdev) vdev->priv = vi; INIT_WORK(&vi->config_work, virtnet_config_changed_work); + spin_lock_init(&vi->refill_lock); /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || @@ -3642,14 +3673,20 @@ static int virtnet_probe(struct virtio_device *vdev) if (vi->has_rss || vi->has_rss_hash_report) virtnet_init_default_rss(vi); - err = register_netdev(dev); + /* serialize netdev register + virtio_device_ready() with ndo_open() */ + rtnl_lock(); + + err = register_netdevice(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); + rtnl_unlock(); goto free_failover; } virtio_device_ready(vdev); + rtnl_unlock(); + err = virtnet_cpu_notif_add(vi); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 84d1c7054013..7b1dc19c565e 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -3822,7 +3822,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk switch (ev->evt_type) { case WMI_BSS_COLOR_COLLISION_DETECTION: - ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap); + ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap, + GFP_KERNEL); ath11k_dbg(ab, ATH11K_DBG_WMI, "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n", ev->vdev_id, ev->evt_type, ev->obss_color_bitmap); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 2f746eb64507..6f83af849f2e 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -4912,6 +4912,8 @@ static int hwsim_virtio_probe(struct virtio_device *vdev) if (err) return err; + virtio_device_ready(vdev); + err = fill_vq(hwsim_vqs[HWSIM_VQ_RX]); if (err) goto out_remove; diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index c02be4ac159e..7db627fc26be 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1233,9 +1233,6 @@ struct rtw_chip_info { const struct wiphy_wowlan_support *wowlan_stub; const u8 max_sched_scan_ssids; - /* for 8821c set channel */ - u32 ch_param[3]; - /* coex paras */ u32 coex_para_ver; u8 bt_desired_ver; @@ -1937,6 +1934,9 @@ struct rtw_hal { enum rtw_sar_bands sar_band; struct rtw_sar sar; + + /* for 8821c set channel */ + u32 ch_param[3]; }; struct rtw_path_div { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index ffee39ea5df6..488a7ddd507c 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -125,6 +125,7 @@ static void rtw8821c_phy_bf_init(struct rtw_dev *rtwdev) static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev) { + struct rtw_hal *hal = &rtwdev->hal; u8 crystal_cap, val; /* power on BB/RF domain */ @@ -159,9 +160,9 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev) /* post init after header files config */ rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST); - rtwdev->chip->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD); - rtwdev->chip->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD); - rtwdev->chip->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD); + hal->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD); + hal->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD); + hal->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD); rtw_phy_init(rtwdev); rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f; @@ -351,6 +352,7 @@ static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw) static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, u8 primary_ch_idx) { + struct rtw_hal *hal = &rtwdev->hal; u32 val32; if (channel <= 14) { @@ -367,11 +369,11 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667); } else { rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, - rtwdev->chip->ch_param[0]); + hal->ch_param[0]); rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, - rtwdev->chip->ch_param[1] & MASKLWORD); + hal->ch_param[1] & MASKLWORD); rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, - rtwdev->chip->ch_param[2]); + hal->ch_param[2]); } } else if (channel > 35) { rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1); diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index dbac4c03d21a..a0335407be42 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -495,6 +495,7 @@ void xenvif_rx_action(struct xenvif_queue *queue) queue->rx_copy.completed = &completed_skbs; while (xenvif_rx_ring_slots_available(queue) && + !skb_queue_empty(&queue->rx_queue) && work_done < RX_BATCH_SIZE) { xenvif_rx_skb(queue); work_done++; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8c0b9546d5a2..2409007f1fd9 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +static bool __read_mostly xennet_trusted = true; +module_param_named(trusted, xennet_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define XENNET_TIMEOUT (5 * HZ) static const struct ethtool_ops xennet_ethtool_ops; @@ -173,6 +177,9 @@ struct netfront_info { /* Is device behaving sane? */ bool broken; + /* Should skbs be bounced into a zeroed buffer? */ + bool bounce; + atomic_t rx_gso_checksum_fixup; }; @@ -271,7 +278,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) if (unlikely(!skb)) return NULL; - page = page_pool_dev_alloc_pages(queue->page_pool); + page = page_pool_alloc_pages(queue->page_pool, + GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO); if (unlikely(!page)) { kfree_skb(skb); return NULL; @@ -665,6 +673,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, return nxmit; } +struct sk_buff *bounce_skb(const struct sk_buff *skb) +{ + unsigned int headerlen = skb_headroom(skb); + /* Align size to allocate full pages and avoid contiguous data leaks */ + unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, + XEN_PAGE_SIZE); + struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); + + if (!n) + return NULL; + + if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { + WARN_ONCE(1, "misaligned skb allocated\n"); + kfree_skb(n); + return NULL; + } + + /* Set the data pointer */ + skb_reserve(n, headerlen); + /* Set the tail pointer and length */ + skb_put(n, skb->len); + + BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); + + skb_copy_header(n, skb); + return n; +} #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) @@ -718,9 +753,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* The first req should be at least ETH_HLEN size or the packet will be * dropped by netback. + * + * If the backend is not trusted bounce all data to zeroed pages to + * avoid exposing contiguous data on the granted page not belonging to + * the skb. */ - if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { - nskb = skb_copy(skb, GFP_ATOMIC); + if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { + nskb = bounce_skb(skb); if (!nskb) goto drop; dev_consume_skb_any(skb); @@ -1053,8 +1092,10 @@ static int xennet_get_responses(struct netfront_queue *queue, } } rcu_read_unlock(); -next: + __skb_queue_tail(list, skb); + +next: if (!(rx->flags & XEN_NETRXF_more_data)) break; @@ -2214,6 +2255,10 @@ static int talk_to_netback(struct xenbus_device *dev, info->netdev->irq = 0; + /* Check if backend is trusted. */ + info->bounce = !xennet_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + /* Check if backend supports multiple queues */ max_queues = xenbus_read_unsigned(info->xbdev->otherend, "multi-queue-max-queues", 1); @@ -2381,6 +2426,9 @@ static int xennet_connect(struct net_device *dev) return err; if (np->netback_has_xdp_headroom) pr_info("backend supports XDP headroom\n"); + if (np->bounce) + dev_info(&np->xbdev->dev, + "bouncing transmitted data to zeroed pages\n"); /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c index ceef81d93ac9..01329b91d59d 100644 --- a/drivers/nfc/nfcmrvl/i2c.c +++ b/drivers/nfc/nfcmrvl/i2c.c @@ -167,9 +167,9 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node, pdata->irq_polarity = IRQF_TRIGGER_RISING; ret = irq_of_parse_and_map(node, 0); - if (ret < 0) { - pr_err("Unable to get irq, error: %d\n", ret); - return ret; + if (!ret) { + pr_err("Unable to get irq\n"); + return -EINVAL; } pdata->irq = ret; diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c index a38e2fcdfd39..ad3359a4942c 100644 --- a/drivers/nfc/nfcmrvl/spi.c +++ b/drivers/nfc/nfcmrvl/spi.c @@ -115,9 +115,9 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node, } ret = irq_of_parse_and_map(node, 0); - if (ret < 0) { - pr_err("Unable to get irq, error: %d\n", ret); - return ret; + if (!ret) { + pr_err("Unable to get irq\n"); + return -EINVAL; } pdata->irq = ret; diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 7e451c10985d..ae2ba08d8ac3 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -122,7 +122,9 @@ static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy, skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN); r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len); - if (r != frame_len) { + if (r < 0) { + goto fw_read_exit_free_skb; + } else if (r != frame_len) { nfc_err(&client->dev, "Invalid frame length: %u (expected %zu)\n", r, frame_len); @@ -162,8 +164,13 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy, skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE); + if (!header.plen) + return 0; + r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen); - if (r != header.plen) { + if (r < 0) { + goto nci_read_exit_free_skb; + } else if (r != header.plen) { nfc_err(&client->dev, "Invalid frame payload length: %u (expected %u)\n", r, header.plen); diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index a4fc17db707c..b38d0355b0ac 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -176,8 +176,8 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data) ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1; /* make sure we are in the region */ - if (ctx->phys < nd_region->ndr_start - || (ctx->phys + ctx->cleared) > ndr_end) + if (ctx->phys < nd_region->ndr_start || + (ctx->phys + ctx->cleared - 1) > ndr_end) return 0; sector = (ctx->phys - nd_region->ndr_start) / 512; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index b3d9c29aba1e..6a12a906a11e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3786,7 +3786,7 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns) } static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, - unsigned nsid, struct nvme_ns_ids *ids) + unsigned nsid, struct nvme_ns_ids *ids, bool is_shared) { struct nvme_ns_head *head; size_t size = sizeof(*head); @@ -3810,6 +3810,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, head->subsys = ctrl->subsys; head->ns_id = nsid; head->ids = *ids; + head->shared = is_shared; kref_init(&head->ref); if (head->ids.csi) { @@ -3891,12 +3892,11 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, nsid); goto out_unlock; } - head = nvme_alloc_ns_head(ctrl, nsid, ids); + head = nvme_alloc_ns_head(ctrl, nsid, ids, is_shared); if (IS_ERR(head)) { ret = PTR_ERR(head); goto out_unlock; } - head->shared = is_shared; } else { ret = -EINVAL; if (!is_shared || !head->shared) { @@ -4595,6 +4595,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) nvme_stop_failfast_work(ctrl); flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fw_act_work); + if (ctrl->ops->stop_ctrl) + ctrl->ops->stop_ctrl(ctrl); } EXPORT_SYMBOL_GPL(nvme_stop_ctrl); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 0da94b233fed..5558f8812157 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -502,6 +502,7 @@ struct nvme_ctrl_ops { void (*free_ctrl)(struct nvme_ctrl *ctrl); void (*submit_async_event)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); + void (*stop_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); void (*print_device_info)(struct nvme_ctrl *ctrl); }; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d7b24ee17285..73d9fcba3b1c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2690,8 +2690,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) struct pci_dev *pdev = to_pci_dev(dev->dev); mutex_lock(&dev->shutdown_lock); - if (pci_device_is_present(pdev) && pci_is_enabled(pdev)) { - u32 csts = readl(dev->bar + NVME_REG_CSTS); + if (pci_is_enabled(pdev)) { + u32 csts; + + if (pci_device_is_present(pdev)) + csts = readl(dev->bar + NVME_REG_CSTS); + else + csts = ~0; if (dev->ctrl.state == NVME_CTRL_LIVE || dev->ctrl.state == NVME_CTRL_RESETTING) { @@ -3465,12 +3470,16 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ - .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ - .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, @@ -3506,6 +3515,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f2a5e1ea508a..46c2dcf72f7e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1048,6 +1048,14 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, } } +static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); + + cancel_work_sync(&ctrl->err_work); + cancel_delayed_work_sync(&ctrl->reconnect_work); +} + static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); @@ -2252,9 +2260,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { - cancel_work_sync(&ctrl->err_work); - cancel_delayed_work_sync(&ctrl->reconnect_work); - nvme_rdma_teardown_io_queues(ctrl, shutdown); nvme_stop_admin_queue(&ctrl->ctrl); if (shutdown) @@ -2304,6 +2309,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .submit_async_event = nvme_rdma_submit_async_event, .delete_ctrl = nvme_rdma_delete_ctrl, .get_address = nvmf_get_address, + .stop_ctrl = nvme_rdma_stop_ctrl, }; /* diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index bb67538d241b..7a9e6ffa2342 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1180,8 +1180,7 @@ done: } else if (ret < 0) { dev_err(queue->ctrl->ctrl.device, "failed to send request %d\n", ret); - if (ret != -EPIPE && ret != -ECONNRESET) - nvme_tcp_fail_request(queue->request); + nvme_tcp_fail_request(queue->request); nvme_tcp_done_send_req(queue); } return ret; @@ -2194,9 +2193,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) { - cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); - cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); - nvme_tcp_teardown_io_queues(ctrl, shutdown); nvme_stop_admin_queue(ctrl); if (shutdown) @@ -2236,6 +2232,12 @@ out_fail: nvme_tcp_reconnect_or_remove(ctrl); } +static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) +{ + cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); + cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); +} + static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); @@ -2557,6 +2559,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { .submit_async_event = nvme_tcp_submit_async_event, .delete_ctrl = nvme_tcp_delete_ctrl, .get_address = nvmf_get_address, + .stop_ctrl = nvme_tcp_stop_ctrl, }; static bool diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h index b5f85259461a..37c7f4c89f92 100644 --- a/drivers/nvme/host/trace.h +++ b/drivers/nvme/host/trace.h @@ -69,7 +69,7 @@ TRACE_EVENT(nvme_setup_cmd, __entry->metadata = !!blk_integrity_rq(req); __entry->fctype = cmd->fabrics.fctype; __assign_disk_name(__entry->disk, req->q->disk); - memcpy(__entry->cdw10, &cmd->common.cdw10, + memcpy(__entry->cdw10, &cmd->common.cdws, sizeof(__entry->cdw10)); ), TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)", diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index e44b2988759e..ff77c3d2354f 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item, } CONFIGFS_ATTR(nvmet_passthru_, io_timeout); +static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item, + char *page) +{ + return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids); +} + +static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_subsys *subsys = to_subsys(item->ci_parent); + unsigned int clear_ids; + + if (kstrtouint(page, 0, &clear_ids)) + return -EINVAL; + subsys->clear_ids = clear_ids; + return count; +} +CONFIGFS_ATTR(nvmet_passthru_, clear_ids); + static struct configfs_attribute *nvmet_passthru_attrs[] = { &nvmet_passthru_attr_device_path, &nvmet_passthru_attr_enable, &nvmet_passthru_attr_admin_timeout, &nvmet_passthru_attr_io_timeout, + &nvmet_passthru_attr_clear_ids, NULL, }; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 90e75324dae0..c27660a660d9 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1374,6 +1374,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ctrl->port = req->port; ctrl->ops = req->ops; +#ifdef CONFIG_NVME_TARGET_PASSTHRU + /* By default, set loop targets to clear IDS by default */ + if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP) + subsys->clear_ids = 1; +#endif + INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_LIST_HEAD(&ctrl->async_events); INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 69818752a33a..2b3e5719f24e 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -249,6 +249,7 @@ struct nvmet_subsys { struct config_group passthru_group; unsigned int admin_timeout; unsigned int io_timeout; + unsigned int clear_ids; #endif /* CONFIG_NVME_TARGET_PASSTHRU */ #ifdef CONFIG_BLK_DEV_ZONED diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index b1f7efab3918..6f39a29828b1 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -30,6 +30,53 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl) ctrl->cap &= ~(1ULL << 43); } +static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u16 status = NVME_SC_SUCCESS; + int pos, len; + bool csi_seen = false; + void *data; + u8 csi; + + if (!ctrl->subsys->clear_ids) + return status; + + data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); + if (!data) + return NVME_SC_INTERNAL; + + status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE); + if (status) + goto out_free; + + for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { + struct nvme_ns_id_desc *cur = data + pos; + + if (cur->nidl == 0) + break; + if (cur->nidt == NVME_NIDT_CSI) { + memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN); + csi_seen = true; + break; + } + len = sizeof(struct nvme_ns_id_desc) + cur->nidl; + } + + memset(data, 0, NVME_IDENTIFY_DATA_SIZE); + if (csi_seen) { + struct nvme_ns_id_desc *cur = data; + + cur->nidt = NVME_NIDT_CSI; + cur->nidl = NVME_NIDT_CSI_LEN; + memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN); + } + status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE); +out_free: + kfree(data); + return status; +} + static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -152,6 +199,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req) */ id->mc = 0; + if (req->sq->ctrl->subsys->clear_ids) { + memset(id->nguid, 0, NVME_NIDT_NGUID_LEN); + memset(id->eui64, 0, NVME_NIDT_EUI64_LEN); + } + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); out_free: @@ -176,6 +228,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) case NVME_ID_CNS_NS: nvmet_passthru_override_id_ns(req); break; + case NVME_ID_CNS_NS_DESC_LIST: + nvmet_passthru_override_id_descs(req); + break; } } else if (status < 0) status = NVME_SC_INTERNAL; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 2793554e622e..0a9542599ad1 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -405,7 +405,7 @@ err: return NVME_SC_INTERNAL; } -static void nvmet_tcp_send_ddgst(struct ahash_request *hash, +static void nvmet_tcp_calc_ddgst(struct ahash_request *hash, struct nvmet_tcp_cmd *cmd) { ahash_request_set_crypt(hash, cmd->req.sg, @@ -413,23 +413,6 @@ static void nvmet_tcp_send_ddgst(struct ahash_request *hash, crypto_ahash_digest(hash); } -static void nvmet_tcp_recv_ddgst(struct ahash_request *hash, - struct nvmet_tcp_cmd *cmd) -{ - struct scatterlist sg; - struct kvec *iov; - int i; - - crypto_ahash_init(hash); - for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) { - sg_init_one(&sg, iov->iov_base, iov->iov_len); - ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len); - crypto_ahash_update(hash); - } - ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0); - crypto_ahash_final(hash); -} - static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; @@ -454,7 +437,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) if (queue->data_digest) { pdu->hdr.flags |= NVME_TCP_F_DDGST; - nvmet_tcp_send_ddgst(queue->snd_hash, cmd); + nvmet_tcp_calc_ddgst(queue->snd_hash, cmd); } if (cmd->queue->hdr_digest) { @@ -1137,7 +1120,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) { struct nvmet_tcp_queue *queue = cmd->queue; - nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd); + nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd); queue->offset = 0; queue->left = NVME_TCP_DIGEST_LENGTH; queue->rcv_state = NVMET_TCP_RECV_DDGST; diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index 8d374cc552be..f2e58ddfaed2 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -9,6 +9,7 @@ * Copyright (C) 2016 IBM Corporation */ +#include <linux/ima.h> #include <linux/kernel.h> #include <linux/kexec.h> #include <linux/memblock.h> @@ -115,6 +116,7 @@ static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr, return 0; } +#ifdef CONFIG_HAVE_IMA_KEXEC /** * ima_get_kexec_buffer - get IMA buffer from the previous kernel * @addr: On successful return, set to point to the buffer contents. @@ -122,16 +124,13 @@ static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr, * * Return: 0 on success, negative errno on error. */ -int ima_get_kexec_buffer(void **addr, size_t *size) +int __init ima_get_kexec_buffer(void **addr, size_t *size) { int ret, len; unsigned long tmp_addr; size_t tmp_size; const void *prop; - if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC)) - return -ENOTSUPP; - prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len); if (!prop) return -ENOENT; @@ -149,16 +148,13 @@ int ima_get_kexec_buffer(void **addr, size_t *size) /** * ima_free_kexec_buffer - free memory used by the IMA buffer */ -int ima_free_kexec_buffer(void) +int __init ima_free_kexec_buffer(void) { int ret; unsigned long addr; size_t size; struct property *prop; - if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC)) - return -ENOTSUPP; - prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL); if (!prop) return -ENOENT; @@ -173,6 +169,7 @@ int ima_free_kexec_buffer(void) return memblock_phys_free(addr, size); } +#endif /** * remove_ima_buffer - remove the IMA buffer property and reservation from @fdt diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 8a3b0c3a1e92..3a8c98615634 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -677,7 +677,7 @@ static int iosapic_set_affinity_irq(struct irq_data *d, if (dest_cpu < 0) return -1; - cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu)); + irq_data_update_affinity(d, cpumask_of(dest_cpu)); vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu); spin_lock_irqsave(&iosapic_lock, flags); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index db814f7b93ba..e7c6f6629e7c 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -642,7 +642,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) struct hv_retarget_device_interrupt *params; struct tran_int_desc *int_desc; struct hv_pcibus_device *hbus; - struct cpumask *dest; + const struct cpumask *dest; cpumask_var_t tmp; struct pci_bus *pbus; struct pci_dev *pdev; @@ -1613,7 +1613,7 @@ out: } static u32 hv_compose_msi_req_v1( - struct pci_create_interrupt *int_pkt, struct cpumask *affinity, + struct pci_create_interrupt *int_pkt, const struct cpumask *affinity, u32 slot, u8 vector, u8 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; @@ -1635,13 +1635,13 @@ static u32 hv_compose_msi_req_v1( * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten * by subsequent retarget in hv_irq_unmask(). */ -static int hv_compose_msi_req_get_cpu(struct cpumask *affinity) +static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity) { return cpumask_first_and(affinity, cpu_online_mask); } static u32 hv_compose_msi_req_v2( - struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, + struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity, u32 slot, u8 vector, u8 vector_count) { int cpu; @@ -1660,7 +1660,7 @@ static u32 hv_compose_msi_req_v2( } static u32 hv_compose_msi_req_v3( - struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity, + struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity, u32 slot, u32 vector, u8 vector_count) { int cpu; @@ -1697,7 +1697,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; - struct cpumask *dest; + const struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct msi_desc *msi_desc; diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 96e09fa40909..03b1309875ae 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1139,7 +1139,7 @@ static void cci_pmu_start(struct perf_event *event, int pmu_flags) /* * To handle interrupt latency, we always reprogram the period - * regardlesss of PERF_EF_RELOAD. + * regardless of PERF_EF_RELOAD. */ if (pmu_flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); @@ -1261,7 +1261,7 @@ static int validate_group(struct perf_event *event) */ .used_mask = mask, }; - memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); + bitmap_zero(mask, cci_pmu->num_cntrs); if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; @@ -1629,10 +1629,9 @@ static struct cci_pmu *cci_pmu_alloc(struct device *dev) GFP_KERNEL); if (!cci_pmu->hw_events.events) return ERR_PTR(-ENOMEM); - cci_pmu->hw_events.used_mask = devm_kcalloc(dev, - BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), - sizeof(*cci_pmu->hw_events.used_mask), - GFP_KERNEL); + cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev, + CCI_PMU_MAX_HW_CNTRS(model), + GFP_KERNEL); if (!cci_pmu->hw_events.used_mask) return ERR_PTR(-ENOMEM); diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 40b352e8aa7f..728d13d8e98a 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1250,7 +1250,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9); /* Get a convenient /sys/event_source/devices/ name */ - ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL); + ccn->dt.id = ida_alloc(&arm_ccn_pmu_ida, GFP_KERNEL); if (ccn->dt.id == 0) { name = "ccn"; } else { @@ -1312,7 +1312,7 @@ error_pmu_register: &ccn->dt.node); error_set_affinity: error_choose_name: - ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); + ida_free(&arm_ccn_pmu_ida, ccn->dt.id); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); writel(0, ccn->dt.base + CCN_DT_PMCR); @@ -1329,7 +1329,7 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); writel(0, ccn->dt.base + CCN_DT_PMCR); perf_pmu_unregister(&ccn->dt.pmu); - ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); + ida_free(&arm_ccn_pmu_ida, ccn->dt.id); } static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index db670b265897..b65a7d9640e1 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -39,6 +39,24 @@ #include <asm/mmu.h> #include <asm/sysreg.h> +/* + * Cache if the event is allowed to trace Context information. + * This allows us to perform the check, i.e, perfmon_capable(), + * in the context of the event owner, once, during the event_init(). + */ +#define SPE_PMU_HW_FLAGS_CX BIT(0) + +static void set_spe_event_has_cx(struct perf_event *event) +{ + if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) + event->hw.flags |= SPE_PMU_HW_FLAGS_CX; +} + +static bool get_spe_event_has_cx(struct perf_event *event) +{ + return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX); +} + #define ARM_SPE_BUF_PAD_BYTE 0 struct arm_spe_pmu_buf { @@ -272,7 +290,7 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event) if (!attr->exclude_kernel) reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT); - if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) + if (get_spe_event_has_cx(event)) reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT); return reg; @@ -709,10 +727,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event) !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) return -EOPNOTSUPP; + set_spe_event_has_cx(event); reg = arm_spe_event_to_pmscr(event); if (!perfmon_capable() && (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) | - BIT(SYS_PMSCR_EL1_CX_SHIFT) | BIT(SYS_PMSCR_EL1_PCT_SHIFT)))) return -EACCES; diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index b1b2a55de77f..8e058e08fe81 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -611,7 +611,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, .dev = dev, }; - pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); + pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL); return pmu->id; } @@ -765,7 +765,7 @@ ddr_perf_err: cpuhp_instance_err: cpuhp_remove_multi_state(pmu->cpuhp_state); cpuhp_state_err: - ida_simple_remove(&ddr_ida, pmu->id); + ida_free(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); return ret; } @@ -779,7 +779,7 @@ static int ddr_perf_remove(struct platform_device *pdev) perf_pmu_unregister(&pmu->pmu); - ida_simple_remove(&ddr_ida, pmu->id); + ida_free(&ddr_ida, pmu->id); return 0; } diff --git a/drivers/perf/hisilicon/Kconfig b/drivers/perf/hisilicon/Kconfig index 5546218b5598..171bfc1b6bc2 100644 --- a/drivers/perf/hisilicon/Kconfig +++ b/drivers/perf/hisilicon/Kconfig @@ -14,3 +14,13 @@ config HISI_PCIE_PMU RCiEP devices. Adds the PCIe PMU into perf events system for monitoring latency, bandwidth etc. + +config HNS3_PMU + tristate "HNS3 PERF PMU" + depends on ARM64 || COMPILE_TEST + depends on PCI + help + Provide support for HNS3 performance monitoring unit (PMU) RCiEP + devices. + Adds the HNS3 PMU into perf events system for monitoring latency, + bandwidth etc. diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index 6be83517acaa..4d2c9abe3372 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \ hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o +obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 62299ab5a9be..50d0c0a2f1fe 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -516,21 +516,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id, ddrc_pmu->index_id); - ddrc_pmu->pmu = (struct pmu) { - .name = name, - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = ddrc_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; + hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index 393513150106..13017b3412a5 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -519,21 +519,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", hha_pmu->sccl_id, hha_pmu->index_id); - hha_pmu->pmu = (struct pmu) { - .name = name, - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = hha_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; + hisi_pmu_init(&hha_pmu->pmu, name, hha_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&hha_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 560ab964c8b5..2995f3630d49 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -557,21 +557,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) */ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u", l3c_pmu->sccl_id, l3c_pmu->ccl_id); - l3c_pmu->pmu = (struct pmu) { - .name = name, - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = l3c_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; + hisi_pmu_init(&l3c_pmu->pmu, name, l3c_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c index a0ee84d97c41..47d3cc9b6eec 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c @@ -412,21 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev) return ret; } - pa_pmu->pmu = (struct pmu) { - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = pa_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; - + hisi_pmu_init(&pa_pmu->pmu, name, pa_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&pa_pmu->pmu, name, -1); if (ret) { dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 980b9ee6eb14..fbc8a93d5eac 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -531,4 +531,22 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) } EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu); +void hisi_pmu_init(struct pmu *pmu, const char *name, + const struct attribute_group **attr_groups, struct module *module) +{ + pmu->name = name; + pmu->module = module; + pmu->task_ctx_nr = perf_invalid_context; + pmu->event_init = hisi_uncore_pmu_event_init; + pmu->pmu_enable = hisi_uncore_pmu_enable; + pmu->pmu_disable = hisi_uncore_pmu_disable; + pmu->add = hisi_uncore_pmu_add; + pmu->del = hisi_uncore_pmu_del; + pmu->start = hisi_uncore_pmu_start; + pmu->stop = hisi_uncore_pmu_stop; + pmu->read = hisi_uncore_pmu_read; + pmu->attr_groups = attr_groups; +} +EXPORT_SYMBOL_GPL(hisi_pmu_init); + MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h index 96eeddad55ff..b59de33cd059 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.h +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -121,4 +121,6 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev, int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu, struct platform_device *pdev); +void hisi_pmu_init(struct pmu *pmu, const char *name, + const struct attribute_group **attr_groups, struct module *module); #endif /* __HISI_UNCORE_PMU_H__ */ diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c index 6aedc303ff56..b9c79f17230c 100644 --- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c @@ -445,20 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev) return ret; } - sllc_pmu->pmu = (struct pmu) { - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = sllc_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; + hisi_pmu_init(&sllc_pmu->pmu, name, sllc_pmu->pmu_events.attr_groups, THIS_MODULE); ret = perf_pmu_register(&sllc_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c new file mode 100644 index 000000000000..e0457d84af6b --- /dev/null +++ b/drivers/perf/hisilicon/hns3_pmu.c @@ -0,0 +1,1671 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This driver adds support for HNS3 PMU iEP device. Related perf events are + * bandwidth, latency, packet rate, interrupt rate etc. + * + * Copyright (C) 2022 HiSilicon Limited + */ +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pci-epf.h> +#include <linux/perf_event.h> +#include <linux/smp.h> + +/* registers offset address */ +#define HNS3_PMU_REG_GLOBAL_CTRL 0x0000 +#define HNS3_PMU_REG_CLOCK_FREQ 0x0020 +#define HNS3_PMU_REG_BDF 0x0fe0 +#define HNS3_PMU_REG_VERSION 0x0fe4 +#define HNS3_PMU_REG_DEVICE_ID 0x0fe8 + +#define HNS3_PMU_REG_EVENT_OFFSET 0x1000 +#define HNS3_PMU_REG_EVENT_SIZE 0x1000 +#define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00 +#define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04 +#define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08 +#define HNS3_PMU_REG_EVENT_INTR_MASK 0x0c +#define HNS3_PMU_REG_EVENT_COUNTER 0x10 +#define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18 +#define HNS3_PMU_REG_EVENT_QID_CTRL 0x28 +#define HNS3_PMU_REG_EVENT_QID_PARA 0x2c + +#define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0) +#define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1) +#define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2) +#define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3) +#define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4) +#define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5) + +#define HNS3_PMU_FILTER_ALL_TC 0xf +#define HNS3_PMU_FILTER_ALL_QUEUE 0xffff + +#define HNS3_PMU_CTRL_SUBEVENT_S 4 +#define HNS3_PMU_CTRL_FILTER_MODE_S 24 + +#define HNS3_PMU_GLOBAL_START BIT(0) + +#define HNS3_PMU_EVENT_STATUS_RESET BIT(11) +#define HNS3_PMU_EVENT_EN BIT(12) +#define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15) + +#define HNS3_PMU_QID_PARA_FUNC_S 0 +#define HNS3_PMU_QID_PARA_QUEUE_S 16 + +#define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0) +#define HNS3_PMU_QID_CTRL_DONE BIT(1) +#define HNS3_PMU_QID_CTRL_MISS BIT(2) + +#define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1) + +#define HNS3_PMU_MAX_HW_EVENTS 8 + +/* + * Each hardware event contains two registers (counter and ext_counter) for + * bandwidth, packet rate, latency and interrupt rate. These two registers will + * be triggered to run at the same when a hardware event is enabled. The meaning + * of counter and ext_counter of different event type are different, their + * meaning show as follow: + * + * +----------------+------------------+---------------+ + * | event type | counter | ext_counter | + * +----------------+------------------+---------------+ + * | bandwidth | byte number | cycle number | + * +----------------+------------------+---------------+ + * | packet rate | packet number | cycle number | + * +----------------+------------------+---------------+ + * | latency | cycle number | packet number | + * +----------------+------------------+---------------+ + * | interrupt rate | interrupt number | cycle number | + * +----------------+------------------+---------------+ + * + * The cycle number indicates increment of counter of hardware timer, the + * frequency of hardware timer can be read from hw_clk_freq file. + * + * Performance of each hardware event is calculated by: counter / ext_counter. + * + * Since processing of data is preferred to be done in userspace, we expose + * ext_counter as a separate event for userspace and use bit 16 to indicate it. + * For example, event 0x00001 and 0x10001 are actually one event for hardware + * because bit 0-15 are same. If the bit 16 of one event is 0 means to read + * counter register, otherwise means to read ext_counter register. + */ +/* bandwidth events */ +#define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM 0x00001 +#define HNS3_PMU_EVT_BW_SSU_EGU_TIME 0x10001 +#define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM 0x00002 +#define HNS3_PMU_EVT_BW_SSU_RPU_TIME 0x10002 +#define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM 0x00003 +#define HNS3_PMU_EVT_BW_SSU_ROCE_TIME 0x10003 +#define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM 0x00004 +#define HNS3_PMU_EVT_BW_ROCE_SSU_TIME 0x10004 +#define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM 0x00005 +#define HNS3_PMU_EVT_BW_TPU_SSU_TIME 0x10005 +#define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM 0x00006 +#define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME 0x10006 +#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM 0x00008 +#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME 0x10008 +#define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM 0x00009 +#define HNS3_PMU_EVT_BW_WR_FBD_TIME 0x10009 +#define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM 0x0000a +#define HNS3_PMU_EVT_BW_WR_EBD_TIME 0x1000a +#define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM 0x0000b +#define HNS3_PMU_EVT_BW_RD_FBD_TIME 0x1000b +#define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM 0x0000c +#define HNS3_PMU_EVT_BW_RD_EBD_TIME 0x1000c +#define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM 0x0000d +#define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME 0x1000d +#define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM 0x0000e +#define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME 0x1000e +#define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM 0x0000f +#define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME 0x1000f +#define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM 0x00010 +#define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME 0x10010 + +/* packet rate events */ +#define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM 0x00100 +#define HNS3_PMU_EVT_PPS_IGU_SSU_TIME 0x10100 +#define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM 0x00101 +#define HNS3_PMU_EVT_PPS_SSU_EGU_TIME 0x10101 +#define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM 0x00102 +#define HNS3_PMU_EVT_PPS_SSU_RPU_TIME 0x10102 +#define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM 0x00103 +#define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME 0x10103 +#define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM 0x00104 +#define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME 0x10104 +#define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM 0x00105 +#define HNS3_PMU_EVT_PPS_TPU_SSU_TIME 0x10105 +#define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM 0x00106 +#define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME 0x10106 +#define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM 0x00107 +#define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME 0x10107 +#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM 0x00108 +#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME 0x10108 +#define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM 0x00109 +#define HNS3_PMU_EVT_PPS_WR_FBD_TIME 0x10109 +#define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM 0x0010a +#define HNS3_PMU_EVT_PPS_WR_EBD_TIME 0x1010a +#define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM 0x0010b +#define HNS3_PMU_EVT_PPS_RD_FBD_TIME 0x1010b +#define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM 0x0010c +#define HNS3_PMU_EVT_PPS_RD_EBD_TIME 0x1010c +#define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM 0x0010d +#define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME 0x1010d +#define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM 0x0010e +#define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME 0x1010e +#define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM 0x0010f +#define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME 0x1010f +#define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM 0x00110 +#define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME 0x10110 +#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM 0x00111 +#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME 0x10111 +#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM 0x00112 +#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME 0x10112 + +/* latency events */ +#define HNS3_PMU_EVT_DLY_TX_PUSH_TIME 0x00202 +#define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM 0x10202 +#define HNS3_PMU_EVT_DLY_TX_TIME 0x00204 +#define HNS3_PMU_EVT_DLY_TX_PACKET_NUM 0x10204 +#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME 0x00206 +#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM 0x10206 +#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME 0x00207 +#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM 0x10207 +#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME 0x00208 +#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM 0x10208 +#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME 0x00209 +#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM 0x10209 +#define HNS3_PMU_EVT_DLY_RPU_TIME 0x0020e +#define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM 0x1020e +#define HNS3_PMU_EVT_DLY_TPU_TIME 0x0020f +#define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM 0x1020f +#define HNS3_PMU_EVT_DLY_RPE_TIME 0x00210 +#define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM 0x10210 +#define HNS3_PMU_EVT_DLY_TPE_TIME 0x00211 +#define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM 0x10211 +#define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME 0x00212 +#define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM 0x10212 +#define HNS3_PMU_EVT_DLY_WR_FBD_TIME 0x00213 +#define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM 0x10213 +#define HNS3_PMU_EVT_DLY_WR_EBD_TIME 0x00214 +#define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM 0x10214 +#define HNS3_PMU_EVT_DLY_RD_FBD_TIME 0x00215 +#define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM 0x10215 +#define HNS3_PMU_EVT_DLY_RD_EBD_TIME 0x00216 +#define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM 0x10216 +#define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME 0x00217 +#define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM 0x10217 +#define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME 0x00218 +#define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM 0x10218 +#define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME 0x00219 +#define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM 0x10219 +#define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME 0x0021a +#define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM 0x1021a +#define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME 0x0021c +#define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM 0x1021c + +/* interrupt rate events */ +#define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM 0x00300 +#define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME 0x10300 + +/* filter mode supported by each bandwidth event */ +#define HNS3_PMU_FILTER_BW_SSU_EGU 0x07 +#define HNS3_PMU_FILTER_BW_SSU_RPU 0x1f +#define HNS3_PMU_FILTER_BW_SSU_ROCE 0x0f +#define HNS3_PMU_FILTER_BW_ROCE_SSU 0x0f +#define HNS3_PMU_FILTER_BW_TPU_SSU 0x1f +#define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11 +#define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x11 +#define HNS3_PMU_FILTER_BW_WR_FBD 0x1b +#define HNS3_PMU_FILTER_BW_WR_EBD 0x11 +#define HNS3_PMU_FILTER_BW_RD_FBD 0x01 +#define HNS3_PMU_FILTER_BW_RD_EBD 0x1b +#define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01 + +/* filter mode supported by each packet rate event */ +#define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07 +#define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07 +#define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1f +#define HNS3_PMU_FILTER_PPS_SSU_ROCE 0x0f +#define HNS3_PMU_FILTER_PPS_ROCE_SSU 0x0f +#define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1f +#define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11 +#define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1f +#define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x11 +#define HNS3_PMU_FILTER_PPS_WR_FBD 0x1b +#define HNS3_PMU_FILTER_PPS_WR_EBD 0x11 +#define HNS3_PMU_FILTER_PPS_RD_FBD 0x01 +#define HNS3_PMU_FILTER_PPS_RD_EBD 0x1b +#define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01 +#define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x01 +#define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x01 + +/* filter mode supported by each latency event */ +#define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01 +#define HNS3_PMU_FILTER_DLY_TX 0x01 +#define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE 0x07 +#define HNS3_PMU_FILTER_DLY_RPU 0x11 +#define HNS3_PMU_FILTER_DLY_TPU 0x1f +#define HNS3_PMU_FILTER_DLY_RPE 0x01 +#define HNS3_PMU_FILTER_DLY_TPE 0x0b +#define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1b +#define HNS3_PMU_FILTER_DLY_WR_FBD 0x1b +#define HNS3_PMU_FILTER_DLY_WR_EBD 0x11 +#define HNS3_PMU_FILTER_DLY_RD_FBD 0x01 +#define HNS3_PMU_FILTER_DLY_RD_EBD 0x1b +#define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01 +#define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01 + +/* filter mode supported by each interrupt rate event */ +#define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01 + +enum hns3_pmu_hw_filter_mode { + HNS3_PMU_HW_FILTER_GLOBAL, + HNS3_PMU_HW_FILTER_PORT, + HNS3_PMU_HW_FILTER_PORT_TC, + HNS3_PMU_HW_FILTER_FUNC, + HNS3_PMU_HW_FILTER_FUNC_QUEUE, + HNS3_PMU_HW_FILTER_FUNC_INTR, +}; + +struct hns3_pmu_event_attr { + u32 event; + u16 filter_support; +}; + +struct hns3_pmu { + struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS]; + struct hlist_node node; + struct pci_dev *pdev; + struct pmu pmu; + void __iomem *base; + int irq; + int on_cpu; + u32 identifier; + u32 hw_clk_freq; /* hardware clock frequency of PMU */ + /* maximum and minimum bdf allowed by PMU */ + u16 bdf_min; + u16 bdf_max; +}; + +#define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu)) + +#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff) + +#define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff) +#define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07)) +#define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func)) + +#define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \ + static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \ + { \ + return FIELD_GET(GENMASK_ULL(_end, _start), \ + event->attr._config); \ + } + +HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7); +HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15); +HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16); +HNS3_PMU_FILTER_ATTR(port, config1, 0, 3); +HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7); +HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23); +HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39); +HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51); +HNS3_PMU_FILTER_ATTR(global, config1, 52, 52); + +#define HNS3_BW_EVT_BYTE_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_BW_##_name##_BYTE_NUM, \ + HNS3_PMU_FILTER_BW_##_name}) +#define HNS3_BW_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_BW_##_name##_TIME, \ + HNS3_PMU_FILTER_BW_##_name}) +#define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM, \ + HNS3_PMU_FILTER_PPS_##_name}) +#define HNS3_PPS_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_TIME, \ + HNS3_PMU_FILTER_PPS_##_name}) +#define HNS3_DLY_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_DLY_##_name##_TIME, \ + HNS3_PMU_FILTER_DLY_##_name}) +#define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM, \ + HNS3_PMU_FILTER_DLY_##_name}) +#define HNS3_INTR_EVT_INTR_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_INTR_NUM, \ + HNS3_PMU_FILTER_INTR_##_name}) +#define HNS3_INTR_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_TIME, \ + HNS3_PMU_FILTER_INTR_##_name}) + +static ssize_t hns3_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sysfs_emit(buf, "%s\n", (char *)eattr->var); +} + +static ssize_t hns3_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu_event_attr *event; + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + event = eattr->var; + + return sysfs_emit(buf, "config=0x%x\n", event->event); +} + +static ssize_t hns3_pmu_filter_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu_event_attr *event; + struct dev_ext_attribute *eattr; + int len; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + event = eattr->var; + + len = sysfs_emit_at(buf, 0, "filter mode supported: "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL) + len += sysfs_emit_at(buf, len, "global "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT) + len += sysfs_emit_at(buf, len, "port "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC) + len += sysfs_emit_at(buf, len, "port-tc "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC) + len += sysfs_emit_at(buf, len, "func "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE) + len += sysfs_emit_at(buf, len, "func-queue "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR) + len += sysfs_emit_at(buf, len, "func-intr "); + + len += sysfs_emit_at(buf, len, "\n"); + + return len; +} + +#define HNS3_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]) { \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } \ + })[0].attr.attr) + +#define HNS3_PMU_FORMAT_ATTR(_name, _format) \ + HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format) +#define HNS3_PMU_EVENT_ATTR(_name, _event) \ + HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event) +#define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \ + HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event) + +#define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) +#define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) +#define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) +#define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) + +#define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) +#define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) +#define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) +#define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) + +static u8 hns3_pmu_hw_filter_modes[] = { + HNS3_PMU_HW_FILTER_GLOBAL, + HNS3_PMU_HW_FILTER_PORT, + HNS3_PMU_HW_FILTER_PORT_TC, + HNS3_PMU_HW_FILTER_FUNC, + HNS3_PMU_HW_FILTER_FUNC_QUEUE, + HNS3_PMU_HW_FILTER_FUNC_INTR, +}; + +#define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \ + ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)]) + +static ssize_t identifier_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier); +} +static DEVICE_ATTR_RO(identifier); + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu); +} +static DEVICE_ATTR_RO(cpumask); + +static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + u16 bdf = hns3_pmu->bdf_min; + + return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf), + PCI_SLOT(bdf), PCI_FUNC(bdf)); +} +static DEVICE_ATTR_RO(bdf_min); + +static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + u16 bdf = hns3_pmu->bdf_max; + + return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf), + PCI_SLOT(bdf), PCI_FUNC(bdf)); +} +static DEVICE_ATTR_RO(bdf_max); + +static ssize_t hw_clk_freq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq); +} +static DEVICE_ATTR_RO(hw_clk_freq); + +static struct attribute *hns3_pmu_events_attr[] = { + /* bandwidth events */ + HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU), + HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU), + HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE), + HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU), + HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU), + HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD), + HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1), + + /* packet rate events */ + HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE), + HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU), + HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), + HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), + + /* latency events */ + HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH), + HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), + HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU), + HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU), + HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE), + HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE), + HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE), + + /* interrupt rate events */ + HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC), + + NULL +}; + +static struct attribute *hns3_pmu_filter_mode_attr[] = { + /* bandwidth events */ + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1), + + /* packet rate events */ + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), + + /* latency events */ + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE), + + /* interrupt rate events */ + HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC), + + NULL +}; + +static struct attribute_group hns3_pmu_events_group = { + .name = "events", + .attrs = hns3_pmu_events_attr, +}; + +static struct attribute_group hns3_pmu_filter_mode_group = { + .name = "filtermode", + .attrs = hns3_pmu_filter_mode_attr, +}; + +static struct attribute *hns3_pmu_format_attr[] = { + HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"), + HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"), + HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"), + HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"), + HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"), + HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"), + HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"), + HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"), + HNS3_PMU_FORMAT_ATTR(global, "config1:52"), + NULL +}; + +static struct attribute_group hns3_pmu_format_group = { + .name = "format", + .attrs = hns3_pmu_format_attr, +}; + +static struct attribute *hns3_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group hns3_pmu_cpumask_attr_group = { + .attrs = hns3_pmu_cpumask_attrs, +}; + +static struct attribute *hns3_pmu_identifier_attrs[] = { + &dev_attr_identifier.attr, + NULL +}; + +static struct attribute_group hns3_pmu_identifier_attr_group = { + .attrs = hns3_pmu_identifier_attrs, +}; + +static struct attribute *hns3_pmu_bdf_range_attrs[] = { + &dev_attr_bdf_min.attr, + &dev_attr_bdf_max.attr, + NULL +}; + +static struct attribute_group hns3_pmu_bdf_range_attr_group = { + .attrs = hns3_pmu_bdf_range_attrs, +}; + +static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = { + &dev_attr_hw_clk_freq.attr, + NULL +}; + +static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = { + .attrs = hns3_pmu_hw_clk_freq_attrs, +}; + +static const struct attribute_group *hns3_pmu_attr_groups[] = { + &hns3_pmu_events_group, + &hns3_pmu_filter_mode_group, + &hns3_pmu_format_group, + &hns3_pmu_cpumask_attr_group, + &hns3_pmu_identifier_attr_group, + &hns3_pmu_bdf_range_attr_group, + &hns3_pmu_hw_clk_freq_attr_group, + NULL +}; + +static u32 hns3_pmu_get_event(struct perf_event *event) +{ + return hns3_pmu_get_ext_counter_used(event) << 16 | + hns3_pmu_get_event_type(event) << 8 | + hns3_pmu_get_subevent(event); +} + +static u32 hns3_pmu_get_real_event(struct perf_event *event) +{ + return hns3_pmu_get_event_type(event) << 8 | + hns3_pmu_get_subevent(event); +} + +static u32 hns3_pmu_get_offset(u32 offset, u32 idx) +{ + return offset + HNS3_PMU_REG_EVENT_OFFSET + + HNS3_PMU_REG_EVENT_SIZE * idx; +} + +static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + return readl(hns3_pmu->base + offset); +} + +static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, + u32 val) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + writel(val, hns3_pmu->base + offset); +} + +static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + return readq(hns3_pmu->base + offset); +} + +static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, + u64 val) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + writeq(val, hns3_pmu->base + offset); +} + +static bool hns3_pmu_cmp_event(struct perf_event *target, + struct perf_event *event) +{ + return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event); +} + +static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu, + struct perf_event *event) +{ + struct perf_event *sibling; + int hw_event_used = 0; + int idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + sibling = hns3_pmu->hw_events[idx]; + if (!sibling) + continue; + + hw_event_used++; + + if (!hns3_pmu_cmp_event(sibling, event)) + continue; + + /* Related events is used in group */ + if (sibling->group_leader == event->group_leader) + return idx; + } + + /* No related event and all hardware events are used up */ + if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS) + return -EBUSY; + + /* No related event and there is extra hardware events can be use */ + return -ENOENT; +} + +static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu) +{ + int idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + if (!hns3_pmu->hw_events[idx]) + return idx; + } + + return -EBUSY; +} + +static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf) +{ + struct pci_dev *pdev; + + if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) { + pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf); + return false; + } + + pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus), + PCI_BUS_NUM(bdf), + GET_PCI_DEVFN(bdf)); + if (!pdev) { + pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf); + return false; + } + + pci_dev_put(pdev); + return true; +} + +static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, + u16 queue) +{ + u32 val; + + val = GET_PCI_DEVFN(bdf); + val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val); +} + +static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx) +{ + bool queue_id_valid = false; + u32 reg_qid_ctrl, val; + int err; + + /* enable queue id request */ + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, + HNS3_PMU_QID_CTRL_REQ_ENABLE); + + reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx); + err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val, + val & HNS3_PMU_QID_CTRL_DONE, 1, 1000); + if (err == -ETIMEDOUT) { + pci_err(hns3_pmu->pdev, "QID request timeout!\n"); + goto out; + } + + queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS); + +out: + /* disable qid request and clear status */ + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0); + + return queue_id_valid; +} + +static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, + u16 queue) +{ + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue); + + return hns3_pmu_qid_req_start(hns3_pmu, idx); +} + +static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event) +{ + struct hns3_pmu_event_attr *pmu_event; + struct dev_ext_attribute *eattr; + struct device_attribute *dattr; + struct attribute *attr; + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) { + attr = hns3_pmu_events_attr[i]; + dattr = container_of(attr, struct device_attribute, attr); + eattr = container_of(dattr, struct dev_ext_attribute, attr); + pmu_event = eattr->var; + + if (event == pmu_event->event) + return pmu_event; + } + + return NULL; +} + +static int hns3_pmu_set_func_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + struct hw_perf_event *hwc = &event->hw; + u16 bdf = hns3_pmu_get_bdf(event); + + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) + return -ENOENT; + + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC); + + return 0; +} + +static int hns3_pmu_set_func_queue_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + u16 queue_id = hns3_pmu_get_queue(event); + struct hw_perf_event *hwc = &event->hw; + u16 bdf = hns3_pmu_get_bdf(event); + + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) + return -ENOENT; + + if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) { + pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id); + return -ENOENT; + } + + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE); + + return 0; +} + +static bool +hns3_pmu_is_enabled_global_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 global = hns3_pmu_get_global(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)) + return false; + + return global; +} + +static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 queue_id = hns3_pmu_get_queue(event); + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)) + return false; + else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE) + return false; + + return bdf; +} + +static bool +hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 queue_id = hns3_pmu_get_queue(event); + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)) + return false; + else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE) + return false; + + return bdf; +} + +static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 tc_id = hns3_pmu_get_tc(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)) + return false; + + return tc_id == HNS3_PMU_FILTER_ALL_TC; +} + +static bool +hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 tc_id = hns3_pmu_get_tc(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)) + return false; + + return tc_id != HNS3_PMU_FILTER_ALL_TC; +} + +static bool +hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)) + return false; + + return hns3_pmu_valid_bdf(hns3_pmu, bdf); +} + +static int hns3_pmu_select_filter_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + u32 event_id = hns3_pmu_get_event(event); + struct hw_perf_event *hwc = &event->hw; + struct hns3_pmu_event_attr *pmu_event; + + pmu_event = hns3_pmu_get_pmu_event(event_id); + if (!pmu_event) { + pci_err(hns3_pmu->pdev, "Invalid pmu event\n"); + return -ENOENT; + } + + if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL); + return 0; + } + + if (hns3_pmu_is_enabled_func_mode(event, pmu_event)) + return hns3_pmu_set_func_mode(event, hns3_pmu); + + if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event)) + return hns3_pmu_set_func_queue_mode(event, hns3_pmu); + + if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT); + return 0; + } + + if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC); + return 0; + } + + if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR); + return 0; + } + + return -ENOENT; +} + +static bool hns3_pmu_validate_event_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS]; + int counters = 1; + int num; + + event_group[0] = leader; + if (!is_software_event(leader)) { + if (leader->pmu != event->pmu) + return false; + + if (leader != event && !hns3_pmu_cmp_event(leader, event)) + event_group[counters++] = event; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + + if (sibling->pmu != event->pmu) + return false; + + for (num = 0; num < counters; num++) { + if (hns3_pmu_cmp_event(event_group[num], sibling)) + break; + } + + if (num == counters) + event_group[counters++] = sibling; + } + + return counters <= HNS3_PMU_MAX_HW_EVENTS; +} + +static u32 hns3_pmu_get_filter_condition(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u16 intr_id = hns3_pmu_get_intr(event); + u8 port_id = hns3_pmu_get_port(event); + u16 bdf = hns3_pmu_get_bdf(event); + u8 tc_id = hns3_pmu_get_tc(event); + u8 filter_mode; + + filter_mode = *(u8 *)hwc->addr_filters; + switch (filter_mode) { + case HNS3_PMU_HW_FILTER_PORT: + return FILTER_CONDITION_PORT(port_id); + case HNS3_PMU_HW_FILTER_PORT_TC: + return FILTER_CONDITION_PORT_TC(port_id, tc_id); + case HNS3_PMU_HW_FILTER_FUNC: + case HNS3_PMU_HW_FILTER_FUNC_QUEUE: + return GET_PCI_DEVFN(bdf); + case HNS3_PMU_HW_FILTER_FUNC_INTR: + return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id); + default: + break; + } + + return 0; +} + +static void hns3_pmu_config_filter(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + u8 event_type = hns3_pmu_get_event_type(event); + u8 subevent_id = hns3_pmu_get_subevent(event); + u16 queue_id = hns3_pmu_get_queue(event); + struct hw_perf_event *hwc = &event->hw; + u8 filter_mode = *(u8 *)hwc->addr_filters; + u16 bdf = hns3_pmu_get_bdf(event); + u32 idx = hwc->idx; + u32 val; + + val = event_type; + val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S; + val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S; + val |= HNS3_PMU_EVENT_OVERFLOW_RESTART; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); + + val = hns3_pmu_get_filter_condition(event); + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val); + + if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE) + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id); +} + +static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val |= HNS3_PMU_EVENT_EN; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val &= ~HNS3_PMU_EVENT_EN; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); + val &= ~HNS3_PMU_INTR_MASK_OVERFLOW; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); +} + +static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); + val |= HNS3_PMU_INTR_MASK_OVERFLOW; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); +} + +static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx) +{ + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val |= HNS3_PMU_EVENT_STATUS_RESET; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val &= ~HNS3_PMU_EVENT_STATUS_RESET; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static u64 hns3_pmu_read_counter(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + + return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx); +} + +static void hns3_pmu_write_counter(struct perf_event *event, u64 value) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + u32 idx = event->hw.idx; + + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value); + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value); +} + +static void hns3_pmu_init_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + local64_set(&hwc->prev_count, 0); + hns3_pmu_write_counter(event, 0); +} + +static int hns3_pmu_event_init(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + int ret; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Sampling is not supported */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + event->cpu = hns3_pmu->on_cpu; + + idx = hns3_pmu_get_event_idx(hns3_pmu); + if (idx < 0) { + pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n", + HNS3_PMU_MAX_HW_EVENTS); + return -EBUSY; + } + + hwc->idx = idx; + + ret = hns3_pmu_select_filter_mode(event, hns3_pmu); + if (ret) { + pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret); + return ret; + } + + if (!hns3_pmu_validate_event_group(event)) { + pci_err(hns3_pmu->pdev, "Invalid event group.\n"); + return -EINVAL; + } + + if (hns3_pmu_get_ext_counter_used(event)) + hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER; + else + hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER; + + return 0; +} + +static void hns3_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 new_cnt, prev_cnt, delta; + + do { + prev_cnt = local64_read(&hwc->prev_count); + new_cnt = hns3_pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != + prev_cnt); + + delta = new_cnt - prev_cnt; + local64_add(delta, &event->count); +} + +static void hns3_pmu_start(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + + hns3_pmu_config_filter(event); + hns3_pmu_init_counter(event); + hns3_pmu_enable_intr(hns3_pmu, hwc); + hns3_pmu_enable_counter(hns3_pmu, hwc); + + perf_event_update_userpage(event); +} + +static void hns3_pmu_stop(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hns3_pmu_disable_counter(hns3_pmu, hwc); + hns3_pmu_disable_intr(hns3_pmu, hwc); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + /* Read hardware counter and update the perf counter statistics */ + hns3_pmu_read(event); + hwc->state |= PERF_HES_UPTODATE; +} + +static int hns3_pmu_add(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + /* Check all working events to find a related event. */ + idx = hns3_pmu_find_related_event_idx(hns3_pmu, event); + if (idx < 0 && idx != -ENOENT) + return idx; + + /* Current event shares an enabled hardware event with related event */ + if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) { + hwc->idx = idx; + goto start_count; + } + + idx = hns3_pmu_get_event_idx(hns3_pmu); + if (idx < 0) + return idx; + + hwc->idx = idx; + hns3_pmu->hw_events[idx] = event; + +start_count: + if (flags & PERF_EF_START) + hns3_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} + +static void hns3_pmu_del(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hns3_pmu_stop(event, PERF_EF_UPDATE); + hns3_pmu->hw_events[hwc->idx] = NULL; + perf_event_update_userpage(event); +} + +static void hns3_pmu_enable(struct pmu *pmu) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); + u32 val; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); + val |= HNS3_PMU_GLOBAL_START; + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); +} + +static void hns3_pmu_disable(struct pmu *pmu) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); + u32 val; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); + val &= ~HNS3_PMU_GLOBAL_START; + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); +} + +static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) +{ + u16 device_id; + char *name; + u32 val; + + hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2]; + if (!hns3_pmu->base) { + pci_err(pdev, "ioremap failed\n"); + return -ENOMEM; + } + + hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ); + + val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF); + hns3_pmu->bdf_min = val & 0xffff; + hns3_pmu->bdf_max = val >> 16; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID); + device_id = val & 0xffff; + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id); + if (!name) + return -ENOMEM; + + hns3_pmu->pdev = pdev; + hns3_pmu->on_cpu = -1; + hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION); + hns3_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .event_init = hns3_pmu_event_init, + .pmu_enable = hns3_pmu_enable, + .pmu_disable = hns3_pmu_disable, + .add = hns3_pmu_add, + .del = hns3_pmu_del, + .start = hns3_pmu_start, + .stop = hns3_pmu_stop, + .read = hns3_pmu_read, + .task_ctx_nr = perf_invalid_context, + .attr_groups = hns3_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + return 0; +} + +static irqreturn_t hns3_pmu_irq(int irq, void *data) +{ + struct hns3_pmu *hns3_pmu = data; + u32 intr_status, idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + intr_status = hns3_pmu_readl(hns3_pmu, + HNS3_PMU_REG_EVENT_INTR_STATUS, + idx); + + /* + * As each counter will restart from 0 when it is overflowed, + * extra processing is no need, just clear interrupt status. + */ + if (intr_status) + hns3_pmu_clear_intr_status(hns3_pmu, idx); + } + + return IRQ_HANDLED; +} + +static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hns3_pmu *hns3_pmu; + + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); + if (!hns3_pmu) + return -ENODEV; + + if (hns3_pmu->on_cpu == -1) { + hns3_pmu->on_cpu = cpu; + irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu)); + } + + return 0; +} + +static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hns3_pmu *hns3_pmu; + unsigned int target; + + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); + if (!hns3_pmu) + return -ENODEV; + + /* Nothing to do if this CPU doesn't own the PMU */ + if (hns3_pmu->on_cpu != cpu) + return 0; + + /* Choose a new CPU from all online cpus */ + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target); + hns3_pmu->on_cpu = target; + irq_set_affinity(hns3_pmu->irq, cpumask_of(target)); + + return 0; +} + +static void hns3_pmu_free_irq(void *data) +{ + struct pci_dev *pdev = data; + + pci_free_irq_vectors(pdev); +} + +static int hns3_pmu_irq_register(struct pci_dev *pdev, + struct hns3_pmu *hns3_pmu) +{ + int irq, ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) { + pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret); + return ret; + } + + ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev); + if (ret) { + pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret); + return ret; + } + + irq = pci_irq_vector(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0, + hns3_pmu->pmu.name, hns3_pmu); + if (ret) { + pci_err(pdev, "failed to register irq, ret = %d.\n", ret); + return ret; + } + + hns3_pmu->irq = irq; + + return 0; +} + +static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) +{ + int ret; + + ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu); + if (ret) + return ret; + + ret = hns3_pmu_irq_register(pdev, hns3_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); + if (ret) { + pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret); + return ret; + } + + ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1); + if (ret) { + pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); + } + + return ret; +} + +static void hns3_pmu_uninit_pmu(struct pci_dev *pdev) +{ + struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev); + + perf_pmu_unregister(&hns3_pmu->pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); +} + +static int hns3_pmu_init_dev(struct pci_dev *pdev) +{ + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu"); + if (ret < 0) { + pci_err(pdev, "failed to request pci region, ret = %d.\n", ret); + return ret; + } + + pci_set_master(pdev); + + return 0; +} + +static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hns3_pmu *hns3_pmu; + int ret; + + hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL); + if (!hns3_pmu) + return -ENOMEM; + + ret = hns3_pmu_init_dev(pdev); + if (ret) + return ret; + + ret = hns3_pmu_init_pmu(pdev, hns3_pmu); + if (ret) { + pci_clear_master(pdev); + return ret; + } + + pci_set_drvdata(pdev, hns3_pmu); + + return ret; +} + +static void hns3_pmu_remove(struct pci_dev *pdev) +{ + hns3_pmu_uninit_pmu(pdev); + pci_clear_master(pdev); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id hns3_pmu_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hns3_pmu_ids); + +static struct pci_driver hns3_pmu_driver = { + .name = "hns3_pmu", + .id_table = hns3_pmu_ids, + .probe = hns3_pmu_probe, + .remove = hns3_pmu_remove, +}; + +static int __init hns3_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + "AP_PERF_ARM_HNS3_PMU_ONLINE", + hns3_pmu_online_cpu, + hns3_pmu_offline_cpu); + if (ret) { + pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret); + return ret; + } + + ret = pci_register_driver(&hns3_pmu_driver); + if (ret) { + pr_err("failed to register pci driver, ret = %d.\n", ret); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); + } + + return ret; +} +module_init(hns3_pmu_module_init); + +static void __exit hns3_pmu_module_exit(void) +{ + pci_unregister_driver(&hns3_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); +} +module_exit(hns3_pmu_module_exit); + +MODULE_DESCRIPTION("HNS3 PMU driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c index 282d3a071a67..69c3050a4348 100644 --- a/drivers/perf/marvell_cn10k_tad_pmu.c +++ b/drivers/perf/marvell_cn10k_tad_pmu.c @@ -2,10 +2,6 @@ /* Marvell CN10K LLC-TAD perf driver * * Copyright (C) 2021 Marvell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #define pr_fmt(fmt) "tad_pmu: " fmt @@ -18,9 +14,9 @@ #include <linux/perf_event.h> #include <linux/platform_device.h> -#define TAD_PFC_OFFSET 0x0 +#define TAD_PFC_OFFSET 0x800 #define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3)) -#define TAD_PRF_OFFSET 0x100 +#define TAD_PRF_OFFSET 0x900 #define TAD_PRF(counter) (TAD_PRF_OFFSET | (counter << 3)) #define TAD_PRF_CNTSEL_MASK 0xFF #define TAD_MAX_COUNTERS 8 @@ -100,9 +96,7 @@ static void tad_pmu_event_counter_start(struct perf_event *event, int flags) * which sets TAD()_PRF()[CNTSEL] != 0 */ for (i = 0; i < tad_pmu->region_cnt; i++) { - reg_val = readq_relaxed(tad_pmu->regions[i].base + - TAD_PRF(counter_idx)); - reg_val |= (event_idx & 0xFF); + reg_val = event_idx & 0xFF; writeq_relaxed(reg_val, tad_pmu->regions[i].base + TAD_PRF(counter_idx)); } diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index b2b8d2074ed0..2c961839903d 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -121,7 +121,7 @@ u64 riscv_pmu_event_update(struct perf_event *event) return delta; } -static void riscv_pmu_stop(struct perf_event *event, int flags) +void riscv_pmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); @@ -175,7 +175,7 @@ int riscv_pmu_event_set_period(struct perf_event *event) return overflow; } -static void riscv_pmu_start(struct perf_event *event, int flags) +void riscv_pmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index dca3537a8dcc..79a3de515ece 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -17,10 +17,30 @@ #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <linux/of.h> +#include <linux/cpu_pm.h> #include <asm/sbi.h> #include <asm/hwcap.h> +PMU_FORMAT_ATTR(event, "config:0-47"); +PMU_FORMAT_ATTR(firmware, "config:63"); + +static struct attribute *riscv_arch_formats_attr[] = { + &format_attr_event.attr, + &format_attr_firmware.attr, + NULL, +}; + +static struct attribute_group riscv_pmu_format_group = { + .name = "format", + .attrs = riscv_arch_formats_attr, +}; + +static const struct attribute_group *riscv_pmu_attr_groups[] = { + &riscv_pmu_format_group, + NULL, +}; + union sbi_pmu_ctr_info { unsigned long value; struct { @@ -666,12 +686,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde child = of_get_compatible_child(cpu, "riscv,cpu-intc"); if (!child) { pr_err("Failed to find INTC node\n"); + of_node_put(cpu); return -ENODEV; } domain = irq_find_host(child); of_node_put(child); - if (domain) + if (domain) { + of_node_put(cpu); break; + } } if (!domain) { pr_err("Failed to find INTC IRQ root domain\n"); @@ -693,6 +716,73 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde return 0; } +#ifdef CONFIG_CPU_PM +static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, + void *v) +{ + struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb); + struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); + int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); + struct perf_event *event; + int idx; + + if (!enabled) + return NOTIFY_OK; + + for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) { + event = cpuc->events[idx]; + if (!event) + continue; + + switch (cmd) { + case CPU_PM_ENTER: + /* + * Stop and update the counter + */ + riscv_pmu_stop(event, PERF_EF_UPDATE); + break; + case CPU_PM_EXIT: + case CPU_PM_ENTER_FAILED: + /* + * Restore and enable the counter. + * + * Requires RCU read locking to be functional, + * wrap the call within RCU_NONIDLE to make the + * RCU subsystem aware this cpu is not idle from + * an RCU perspective for the riscv_pmu_start() call + * duration. + */ + RCU_NONIDLE(riscv_pmu_start(event, PERF_EF_RELOAD)); + break; + default: + break; + } + } + + return NOTIFY_OK; +} + +static int riscv_pm_pmu_register(struct riscv_pmu *pmu) +{ + pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify; + return cpu_pm_register_notifier(&pmu->riscv_pm_nb); +} + +static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) +{ + cpu_pm_unregister_notifier(&pmu->riscv_pm_nb); +} +#else +static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; } +static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { } +#endif + +static void riscv_pmu_destroy(struct riscv_pmu *pmu) +{ + riscv_pm_pmu_unregister(pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); +} + static int pmu_sbi_device_probe(struct platform_device *pdev) { struct riscv_pmu *pmu = NULL; @@ -720,6 +810,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; } + pmu->pmu.attr_groups = riscv_pmu_attr_groups; pmu->num_counters = num_counters; pmu->ctr_start = pmu_sbi_ctr_start; pmu->ctr_stop = pmu_sbi_ctr_stop; @@ -733,14 +824,19 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) if (ret) return ret; + ret = riscv_pm_pmu_register(pmu); + if (ret) + goto out_unregister; + ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); - if (ret) { - cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); - return ret; - } + if (ret) + goto out_unregister; return 0; +out_unregister: + riscv_pmu_destroy(pmu); + out_free: kfree(pmu); return ret; diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig index 849c4204f550..93a6a8ee4716 100644 --- a/drivers/phy/broadcom/Kconfig +++ b/drivers/phy/broadcom/Kconfig @@ -83,7 +83,7 @@ config PHY_NS2_USB_DRD config PHY_BRCM_SATA tristate "Broadcom SATA PHY driver" depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || \ - ARCH_BCM_63XX || COMPILE_TEST + ARCH_BCMBCA || COMPILE_TEST depends on OF select GENERIC_PHY default ARCH_BCM_IPROC diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index f52960d2dfbe..bff144c97e66 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -32,7 +32,7 @@ config DEBUG_PINCTRL Say Y here to add some extra checks and diagnostics to PINCTRL calls. config PINCTRL_AMD - tristate "AMD GPIO pin control" + bool "AMD GPIO pin control" depends on HAS_IOMEM depends on ACPI || COMPILE_TEST select GPIOLIB diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index c94e24aadf92..83d47ff1cea8 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c @@ -236,11 +236,11 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, const struct aspeed_sig_expr **funcs; const struct aspeed_sig_expr ***prios; - pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name); - if (!pdesc) return -EINVAL; + pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name); + prios = pdesc->prios; if (!prios) diff --git a/drivers/pinctrl/freescale/pinctrl-imx93.c b/drivers/pinctrl/freescale/pinctrl-imx93.c index c0630f69e995..417e41b37a6f 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx93.c +++ b/drivers/pinctrl/freescale/pinctrl-imx93.c @@ -239,6 +239,7 @@ static const struct pinctrl_pin_desc imx93_pinctrl_pads[] = { static const struct imx_pinctrl_soc_info imx93_pinctrl_info = { .pins = imx93_pinctrl_pads, .npins = ARRAY_SIZE(imx93_pinctrl_pads), + .flags = ZERO_OFFSET_VALID, .gpr_compatible = "fsl,imx93-iomuxc-gpr", }; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index a140b6bfbfaa..bcde042d29dc 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -102,7 +102,7 @@ struct armada_37xx_pinctrl { struct device *dev; struct gpio_chip gpio_chip; struct irq_chip irq_chip; - spinlock_t irq_lock; + raw_spinlock_t irq_lock; struct pinctrl_desc pctl; struct pinctrl_dev *pctl_dev; struct armada_37xx_pin_group *groups; @@ -523,9 +523,9 @@ static void armada_37xx_irq_ack(struct irq_data *d) unsigned long flags; armada_37xx_irq_update_reg(®, d); - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); writel(d->mask, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); } static void armada_37xx_irq_mask(struct irq_data *d) @@ -536,10 +536,10 @@ static void armada_37xx_irq_mask(struct irq_data *d) unsigned long flags; armada_37xx_irq_update_reg(®, d); - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); val = readl(info->base + reg); writel(val & ~d->mask, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); } static void armada_37xx_irq_unmask(struct irq_data *d) @@ -550,10 +550,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d) unsigned long flags; armada_37xx_irq_update_reg(®, d); - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); val = readl(info->base + reg); writel(val | d->mask, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); } static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) @@ -564,14 +564,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) unsigned long flags; armada_37xx_irq_update_reg(®, d); - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); val = readl(info->base + reg); if (on) val |= (BIT(d->hwirq % GPIO_PER_REG)); else val &= ~(BIT(d->hwirq % GPIO_PER_REG)); writel(val, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); return 0; } @@ -583,7 +583,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) u32 val, reg = IRQ_POL; unsigned long flags; - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); armada_37xx_irq_update_reg(®, d); val = readl(info->base + reg); switch (type) { @@ -607,11 +607,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) break; } default: - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); return -EINVAL; } writel(val, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); return 0; } @@ -626,7 +626,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info, regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l); - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); p = readl(info->base + IRQ_POL + 4 * reg_idx); if ((p ^ l) & (1 << bit_num)) { /* @@ -647,7 +647,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info, ret = -1; } - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); return ret; } @@ -664,11 +664,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc) u32 status; unsigned long flags; - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); status = readl_relaxed(info->base + IRQ_STATUS + 4 * i); /* Manage only the interrupt that was enabled */ status &= readl_relaxed(info->base + IRQ_EN + 4 * i); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); while (status) { u32 hwirq = ffs(status) - 1; u32 virq = irq_find_mapping(d, hwirq + @@ -695,12 +695,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc) update_status: /* Update status in case a new IRQ appears */ - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); status = readl_relaxed(info->base + IRQ_STATUS + 4 * i); /* Manage only the interrupt that was enabled */ status &= readl_relaxed(info->base + IRQ_EN + 4 * i); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); } } chained_irq_exit(chip, desc); @@ -731,7 +731,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, struct device *dev = &pdev->dev; unsigned int i, nr_irq_parent; - spin_lock_init(&info->irq_lock); + raw_spin_lock_init(&info->irq_lock); nr_irq_parent = of_irq_count(np); if (!nr_irq_parent) { @@ -1107,25 +1107,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = { { }, }; +static const struct regmap_config armada_37xx_pinctrl_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .use_raw_spinlock = true, +}; + static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev) { struct armada_37xx_pinctrl *info; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; struct regmap *regmap; + void __iomem *base; int ret; + base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(base)) { + dev_err(dev, "failed to ioremap base address: %pe\n", base); + return PTR_ERR(base); + } + + regmap = devm_regmap_init_mmio(dev, base, + &armada_37xx_pinctrl_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "failed to create regmap: %pe\n", regmap); + return PTR_ERR(regmap); + } + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->dev = dev; - - regmap = syscon_node_to_regmap(np); - if (IS_ERR(regmap)) - return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n"); info->regmap = regmap; - info->data = of_device_get_match_data(dev); ret = armada_37xx_pinctrl_register(pdev, info); diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index 5f4a8c5c6650..771dd1f4fbe0 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -29,19 +29,12 @@ #define ocelot_clrsetbits(addr, clear, set) \ writel((readl(addr) & ~(clear)) | (set), (addr)) -/* PINCONFIG bits (sparx5 only) */ enum { PINCONF_BIAS, PINCONF_SCHMITT, PINCONF_DRIVE_STRENGTH, }; -#define BIAS_PD_BIT BIT(4) -#define BIAS_PU_BIT BIT(3) -#define BIAS_BITS (BIAS_PD_BIT|BIAS_PU_BIT) -#define SCHMITT_BIT BIT(2) -#define DRIVE_BITS GENMASK(1, 0) - /* GPIO standard registers */ #define OCELOT_GPIO_OUT_SET 0x0 #define OCELOT_GPIO_OUT_CLR 0x4 @@ -321,6 +314,13 @@ struct ocelot_pin_caps { unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */ }; +struct ocelot_pincfg_data { + u8 pd_bit; + u8 pu_bit; + u8 drive_bits; + u8 schmitt_bit; +}; + struct ocelot_pinctrl { struct device *dev; struct pinctrl_dev *pctl; @@ -328,10 +328,16 @@ struct ocelot_pinctrl { struct regmap *map; struct regmap *pincfg; struct pinctrl_desc *desc; + const struct ocelot_pincfg_data *pincfg_data; struct ocelot_pmx_func func[FUNC_MAX]; u8 stride; }; +struct ocelot_match_data { + struct pinctrl_desc desc; + struct ocelot_pincfg_data pincfg_data; +}; + #define LUTON_P(p, f0, f1) \ static struct ocelot_pin_caps luton_pin_##p = { \ .pin = p, \ @@ -1325,24 +1331,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info, int ret = -EOPNOTSUPP; if (info->pincfg) { + const struct ocelot_pincfg_data *opd = info->pincfg_data; u32 regcfg; - ret = regmap_read(info->pincfg, pin, ®cfg); + ret = regmap_read(info->pincfg, + pin * regmap_get_reg_stride(info->pincfg), + ®cfg); if (ret) return ret; ret = 0; switch (reg) { case PINCONF_BIAS: - *val = regcfg & BIAS_BITS; + *val = regcfg & (opd->pd_bit | opd->pu_bit); break; case PINCONF_SCHMITT: - *val = regcfg & SCHMITT_BIT; + *val = regcfg & opd->schmitt_bit; break; case PINCONF_DRIVE_STRENGTH: - *val = regcfg & DRIVE_BITS; + *val = regcfg & opd->drive_bits; break; default: @@ -1359,14 +1368,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr, u32 val; int ret; - ret = regmap_read(info->pincfg, regaddr, &val); + ret = regmap_read(info->pincfg, + regaddr * regmap_get_reg_stride(info->pincfg), + &val); if (ret) return ret; val &= ~clrbits; val |= setbits; - ret = regmap_write(info->pincfg, regaddr, val); + ret = regmap_write(info->pincfg, + regaddr * regmap_get_reg_stride(info->pincfg), + val); return ret; } @@ -1379,23 +1392,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info, int ret = -EOPNOTSUPP; if (info->pincfg) { + const struct ocelot_pincfg_data *opd = info->pincfg_data; ret = 0; switch (reg) { case PINCONF_BIAS: - ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS, + ret = ocelot_pincfg_clrsetbits(info, pin, + opd->pd_bit | opd->pu_bit, val); break; case PINCONF_SCHMITT: - ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT, + ret = ocelot_pincfg_clrsetbits(info, pin, + opd->schmitt_bit, val); break; case PINCONF_DRIVE_STRENGTH: if (val <= 3) ret = ocelot_pincfg_clrsetbits(info, pin, - DRIVE_BITS, val); + opd->drive_bits, + val); else ret = -EINVAL; break; @@ -1425,17 +1442,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev, if (param == PIN_CONFIG_BIAS_DISABLE) val = (val == 0); else if (param == PIN_CONFIG_BIAS_PULL_DOWN) - val = (val & BIAS_PD_BIT ? true : false); + val = !!(val & info->pincfg_data->pd_bit); else /* PIN_CONFIG_BIAS_PULL_UP */ - val = (val & BIAS_PU_BIT ? true : false); + val = !!(val & info->pincfg_data->pu_bit); break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + if (!info->pincfg_data->schmitt_bit) + return -EOPNOTSUPP; + err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val); if (err) return err; - val = (val & SCHMITT_BIT ? true : false); + val = !!(val & info->pincfg_data->schmitt_bit); break; case PIN_CONFIG_DRIVE_STRENGTH: @@ -1479,6 +1499,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *configs, unsigned int num_configs) { struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + const struct ocelot_pincfg_data *opd = info->pincfg_data; u32 param, arg, p; int cfg, err = 0; @@ -1491,8 +1512,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_DOWN: arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 : - (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT : - BIAS_PD_BIT; + (param == PIN_CONFIG_BIAS_PULL_UP) ? + opd->pu_bit : opd->pd_bit; err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg); if (err) @@ -1501,7 +1522,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - arg = arg ? SCHMITT_BIT : 0; + if (!opd->schmitt_bit) + return -EOPNOTSUPP; + + arg = arg ? opd->schmitt_bit : 0; err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT, arg); if (err) @@ -1562,69 +1586,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = { .dt_free_map = pinconf_generic_dt_free_map, }; -static struct pinctrl_desc luton_desc = { - .name = "luton-pinctrl", - .pins = luton_pins, - .npins = ARRAY_SIZE(luton_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data luton_desc = { + .desc = { + .name = "luton-pinctrl", + .pins = luton_pins, + .npins = ARRAY_SIZE(luton_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, }; -static struct pinctrl_desc serval_desc = { - .name = "serval-pinctrl", - .pins = serval_pins, - .npins = ARRAY_SIZE(serval_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data serval_desc = { + .desc = { + .name = "serval-pinctrl", + .pins = serval_pins, + .npins = ARRAY_SIZE(serval_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, }; -static struct pinctrl_desc ocelot_desc = { - .name = "ocelot-pinctrl", - .pins = ocelot_pins, - .npins = ARRAY_SIZE(ocelot_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data ocelot_desc = { + .desc = { + .name = "ocelot-pinctrl", + .pins = ocelot_pins, + .npins = ARRAY_SIZE(ocelot_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, }; -static struct pinctrl_desc jaguar2_desc = { - .name = "jaguar2-pinctrl", - .pins = jaguar2_pins, - .npins = ARRAY_SIZE(jaguar2_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data jaguar2_desc = { + .desc = { + .name = "jaguar2-pinctrl", + .pins = jaguar2_pins, + .npins = ARRAY_SIZE(jaguar2_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, }; -static struct pinctrl_desc servalt_desc = { - .name = "servalt-pinctrl", - .pins = servalt_pins, - .npins = ARRAY_SIZE(servalt_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data servalt_desc = { + .desc = { + .name = "servalt-pinctrl", + .pins = servalt_pins, + .npins = ARRAY_SIZE(servalt_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, }; -static struct pinctrl_desc sparx5_desc = { - .name = "sparx5-pinctrl", - .pins = sparx5_pins, - .npins = ARRAY_SIZE(sparx5_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .confops = &ocelot_confops, - .owner = THIS_MODULE, +static struct ocelot_match_data sparx5_desc = { + .desc = { + .name = "sparx5-pinctrl", + .pins = sparx5_pins, + .npins = ARRAY_SIZE(sparx5_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .confops = &ocelot_confops, + .owner = THIS_MODULE, + }, + .pincfg_data = { + .pd_bit = BIT(4), + .pu_bit = BIT(3), + .drive_bits = GENMASK(1, 0), + .schmitt_bit = BIT(2), + }, }; -static struct pinctrl_desc lan966x_desc = { - .name = "lan966x-pinctrl", - .pins = lan966x_pins, - .npins = ARRAY_SIZE(lan966x_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &lan966x_pmx_ops, - .confops = &ocelot_confops, - .owner = THIS_MODULE, +static struct ocelot_match_data lan966x_desc = { + .desc = { + .name = "lan966x-pinctrl", + .pins = lan966x_pins, + .npins = ARRAY_SIZE(lan966x_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &lan966x_pmx_ops, + .confops = &ocelot_confops, + .owner = THIS_MODULE, + }, + .pincfg_data = { + .pd_bit = BIT(3), + .pu_bit = BIT(2), + .drive_bits = GENMASK(1, 0), + }, }; static int ocelot_create_group_func_map(struct device *dev, @@ -1761,6 +1810,7 @@ static void ocelot_irq_mask(struct irq_data *data) regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), BIT(gpio % 32), 0); + gpiochip_disable_irq(chip, gpio); } static void ocelot_irq_unmask(struct irq_data *data) @@ -1769,6 +1819,7 @@ static void ocelot_irq_unmask(struct irq_data *data) struct ocelot_pinctrl *info = gpiochip_get_data(chip); unsigned int gpio = irqd_to_hwirq(data); + gpiochip_enable_irq(chip, gpio); regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), BIT(gpio % 32), BIT(gpio % 32)); } @@ -1790,8 +1841,10 @@ static struct irq_chip ocelot_eoi_irqchip = { .irq_mask = ocelot_irq_mask, .irq_eoi = ocelot_irq_ack, .irq_unmask = ocelot_irq_unmask, - .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED, + .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | + IRQCHIP_IMMUTABLE, .irq_set_type = ocelot_irq_set_type, + GPIOCHIP_IRQ_RESOURCE_HELPERS }; static struct irq_chip ocelot_irqchip = { @@ -1800,6 +1853,8 @@ static struct irq_chip ocelot_irqchip = { .irq_ack = ocelot_irq_ack, .irq_unmask = ocelot_irq_unmask, .irq_set_type = ocelot_irq_set_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS }; static int ocelot_irq_set_type(struct irq_data *data, unsigned int type) @@ -1863,7 +1918,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev, irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { girq = &gc->irq; - girq->chip = &ocelot_irqchip; + gpio_irq_chip_set_chip(girq, &ocelot_irqchip); girq->parent_handler = ocelot_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(&pdev->dev, 1, @@ -1890,7 +1945,8 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = { {}, }; -static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev) +static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev, + const struct ocelot_pinctrl *info) { void __iomem *base; @@ -1898,7 +1954,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev) .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = 32, + .max_register = info->desc->npins * 4, .name = "pincfg", }; @@ -1913,6 +1969,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev) static int ocelot_pinctrl_probe(struct platform_device *pdev) { + const struct ocelot_match_data *data; struct device *dev = &pdev->dev; struct ocelot_pinctrl *info; struct reset_control *reset; @@ -1929,7 +1986,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) if (!info) return -ENOMEM; - info->desc = (struct pinctrl_desc *)device_get_match_data(dev); + data = device_get_match_data(dev); + if (!data) + return -EINVAL; + + info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc), + GFP_KERNEL); + if (!info->desc) + return -ENOMEM; + + info->pincfg_data = &data->pincfg_data; reset = devm_reset_control_get_optional_shared(dev, "switch"); if (IS_ERR(reset)) @@ -1956,7 +2022,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) /* Pinconf registers */ if (info->desc->confops) { - pincfg = ocelot_pinctrl_create_pincfg(pdev); + pincfg = ocelot_pinctrl_create_pincfg(pdev, info); if (IS_ERR(pincfg)) dev_dbg(dev, "Failed to create pincfg regmap\n"); else diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index fd5fff9adff0..3be2a08ae3a6 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -966,16 +966,13 @@ static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip, return 0; } -static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip, - unsigned int parent_hwirq, - unsigned int parent_type) +static int pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) { struct pmic_gpio_state *state = gpiochip_get_data(chip); - struct irq_fwspec *fwspec; - - fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); - if (!fwspec) - return NULL; + struct irq_fwspec *fwspec = &gfwspec->fwspec; fwspec->fwnode = chip->irq.parent_domain->fwnode; @@ -985,7 +982,7 @@ static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip, /* param[2] must be left as 0 */ fwspec->param[3] = parent_type; - return fwspec; + return 0; } static int pmic_gpio_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.c b/drivers/pinctrl/ralink/pinctrl-ralink.c index 63429a287434..770862f45b3f 100644 --- a/drivers/pinctrl/ralink/pinctrl-ralink.c +++ b/drivers/pinctrl/ralink/pinctrl-ralink.c @@ -266,6 +266,8 @@ static int ralink_pinctrl_pins(struct ralink_priv *p) p->func[i]->pin_count, sizeof(int), GFP_KERNEL); + if (!p->func[i]->pins) + return -ENOMEM; for (j = 0; j < p->func[i]->pin_count; j++) p->func[i]->pins[j] = p->func[i]->pin_first + j; diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index a48cac55152c..c47eed9d948f 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -9,8 +9,10 @@ #include <linux/clk.h> #include <linux/gpio/driver.h> #include <linux/io.h> +#include <linux/interrupt.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinctrl.h> @@ -89,6 +91,7 @@ #define PIN(n) (0x0800 + 0x10 + (n)) #define IOLH(n) (0x1000 + (n) * 8) #define IEN(n) (0x1800 + (n) * 8) +#define ISEL(n) (0x2c80 + (n) * 8) #define PWPR (0x3014) #define SD_CH(n) (0x3000 + (n) * 4) #define QSPI (0x3008) @@ -112,6 +115,10 @@ #define RZG2L_PIN_ID_TO_PORT_OFFSET(id) (RZG2L_PIN_ID_TO_PORT(id) + 0x10) #define RZG2L_PIN_ID_TO_PIN(id) ((id) % RZG2L_PINS_PER_PORT) +#define RZG2L_TINT_MAX_INTERRUPT 32 +#define RZG2L_TINT_IRQ_START_INDEX 9 +#define RZG2L_PACK_HWIRQ(t, i) (((t) << 16) | (i)) + struct rzg2l_dedicated_configs { const char *name; u32 config; @@ -137,6 +144,9 @@ struct rzg2l_pinctrl { struct gpio_chip gpio_chip; struct pinctrl_gpio_range gpio_range; + DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT); + spinlock_t bitmap_lock; + unsigned int hwirq[RZG2L_TINT_MAX_INTERRUPT]; spinlock_t lock; }; @@ -883,8 +893,14 @@ static int rzg2l_gpio_get(struct gpio_chip *chip, unsigned int offset) static void rzg2l_gpio_free(struct gpio_chip *chip, unsigned int offset) { + unsigned int virq; + pinctrl_gpio_free(chip->base + offset); + virq = irq_find_mapping(chip->irq.domain, offset); + if (virq) + irq_dispose_mapping(virq); + /* * Set the GPIO as an input to ensure that the next GPIO request won't * drive the GPIO pin as an output. @@ -1104,14 +1120,221 @@ static struct { } }; +static int rzg2l_gpio_get_gpioint(unsigned int virq) +{ + unsigned int gpioint; + unsigned int i; + u32 port, bit; + + port = virq / 8; + bit = virq % 8; + + if (port >= ARRAY_SIZE(rzg2l_gpio_configs) || + bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port])) + return -EINVAL; + + gpioint = bit; + for (i = 0; i < port; i++) + gpioint += RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[i]); + + return gpioint; +} + +static void rzg2l_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip); + unsigned int hwirq = irqd_to_hwirq(d); + unsigned long flags; + void __iomem *addr; + u32 port; + u8 bit; + + port = RZG2L_PIN_ID_TO_PORT(hwirq); + bit = RZG2L_PIN_ID_TO_PIN(hwirq); + + addr = pctrl->base + ISEL(port); + if (bit >= 4) { + bit -= 4; + addr += 4; + } + + spin_lock_irqsave(&pctrl->lock, flags); + writel(readl(addr) & ~BIT(bit * 8), addr); + spin_unlock_irqrestore(&pctrl->lock, flags); + + gpiochip_disable_irq(gc, hwirq); + irq_chip_disable_parent(d); +} + +static void rzg2l_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip); + unsigned int hwirq = irqd_to_hwirq(d); + unsigned long flags; + void __iomem *addr; + u32 port; + u8 bit; + + gpiochip_enable_irq(gc, hwirq); + + port = RZG2L_PIN_ID_TO_PORT(hwirq); + bit = RZG2L_PIN_ID_TO_PIN(hwirq); + + addr = pctrl->base + ISEL(port); + if (bit >= 4) { + bit -= 4; + addr += 4; + } + + spin_lock_irqsave(&pctrl->lock, flags); + writel(readl(addr) | BIT(bit * 8), addr); + spin_unlock_irqrestore(&pctrl->lock, flags); + + irq_chip_enable_parent(d); +} + +static int rzg2l_gpio_irq_set_type(struct irq_data *d, unsigned int type) +{ + return irq_chip_set_type_parent(d, type); +} + +static void rzg2l_gpio_irqc_eoi(struct irq_data *d) +{ + irq_chip_eoi_parent(d); +} + +static void rzg2l_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + + seq_printf(p, dev_name(gc->parent)); +} + +static const struct irq_chip rzg2l_gpio_irqchip = { + .name = "rzg2l-gpio", + .irq_disable = rzg2l_gpio_irq_disable, + .irq_enable = rzg2l_gpio_irq_enable, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_set_type = rzg2l_gpio_irq_set_type, + .irq_eoi = rzg2l_gpio_irqc_eoi, + .irq_print_chip = rzg2l_gpio_irq_print_chip, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + +static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc, + unsigned int child, + unsigned int child_type, + unsigned int *parent, + unsigned int *parent_type) +{ + struct rzg2l_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned long flags; + int gpioint, irq; + + gpioint = rzg2l_gpio_get_gpioint(child); + if (gpioint < 0) + return gpioint; + + spin_lock_irqsave(&pctrl->bitmap_lock, flags); + irq = bitmap_find_free_region(pctrl->tint_slot, RZG2L_TINT_MAX_INTERRUPT, get_order(1)); + spin_unlock_irqrestore(&pctrl->bitmap_lock, flags); + if (irq < 0) + return -ENOSPC; + pctrl->hwirq[irq] = child; + irq += RZG2L_TINT_IRQ_START_INDEX; + + /* All these interrupts are level high in the CPU */ + *parent_type = IRQ_TYPE_LEVEL_HIGH; + *parent = RZG2L_PACK_HWIRQ(gpioint, irq); + return 0; +} + +static int rzg2l_gpio_populate_parent_fwspec(struct gpio_chip *chip, + union gpio_irq_fwspec *gfwspec, + unsigned int parent_hwirq, + unsigned int parent_type) +{ + struct irq_fwspec *fwspec = &gfwspec->fwspec; + + fwspec->fwnode = chip->irq.parent_domain->fwnode; + fwspec->param_count = 2; + fwspec->param[0] = parent_hwirq; + fwspec->param[1] = parent_type; + + return 0; +} + +static void rzg2l_gpio_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d; + + d = irq_domain_get_irq_data(domain, virq); + if (d) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + unsigned int i; + + for (i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) { + if (pctrl->hwirq[i] == hwirq) { + spin_lock_irqsave(&pctrl->bitmap_lock, flags); + bitmap_release_region(pctrl->tint_slot, i, get_order(1)); + spin_unlock_irqrestore(&pctrl->bitmap_lock, flags); + pctrl->hwirq[i] = 0; + break; + } + } + } + irq_domain_free_irqs_common(domain, virq, nr_irqs); +} + +static void rzg2l_init_irq_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct rzg2l_pinctrl *pctrl = gpiochip_get_data(gc); + struct gpio_chip *chip = &pctrl->gpio_chip; + unsigned int offset; + + /* Forbid unused lines to be mapped as IRQs */ + for (offset = 0; offset < chip->ngpio; offset++) { + u32 port, bit; + + port = offset / 8; + bit = offset % 8; + + if (port >= ARRAY_SIZE(rzg2l_gpio_configs) || + bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port])) + clear_bit(offset, valid_mask); + } +} + static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) { struct device_node *np = pctrl->dev->of_node; struct gpio_chip *chip = &pctrl->gpio_chip; const char *name = dev_name(pctrl->dev); + struct irq_domain *parent_domain; struct of_phandle_args of_args; + struct device_node *parent_np; + struct gpio_irq_chip *girq; int ret; + parent_np = of_irq_find_parent(np); + if (!parent_np) + return -ENXIO; + + parent_domain = irq_find_host(parent_np); + of_node_put(parent_np); + if (!parent_domain) + return -EPROBE_DEFER; + ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args); if (ret) { dev_err(pctrl->dev, "Unable to parse gpio-ranges\n"); @@ -1138,6 +1361,15 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) chip->base = -1; chip->ngpio = of_args.args[2]; + girq = &chip->irq; + gpio_irq_chip_set_chip(girq, &rzg2l_gpio_irqchip); + girq->fwnode = of_node_to_fwnode(np); + girq->parent_domain = parent_domain; + girq->child_to_parent_hwirq = rzg2l_gpio_child_to_parent_hwirq; + girq->populate_parent_alloc_arg = rzg2l_gpio_populate_parent_fwspec; + girq->child_irq_domain_ops.free = rzg2l_gpio_irq_domain_free; + girq->init_valid_mask = rzg2l_init_irq_valid_mask; + pctrl->gpio_range.id = 0; pctrl->gpio_range.pin_base = 0; pctrl->gpio_range.base = 0; @@ -1253,6 +1485,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev) } spin_lock_init(&pctrl->lock); + spin_lock_init(&pctrl->bitmap_lock); platform_set_drvdata(pdev, pctrl); diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 57a33fb0f2d7..14bcca73238a 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -1338,16 +1338,18 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode bank->secure_control = pctl->match_data->secure_control; spin_lock_init(&bank->lock); - /* create irq hierarchical domain */ - bank->fwnode = fwnode; + if (pctl->domain) { + /* create irq hierarchical domain */ + bank->fwnode = fwnode; - bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, - STM32_GPIO_IRQ_LINE, bank->fwnode, - &stm32_gpio_domain_ops, bank); + bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, STM32_GPIO_IRQ_LINE, + bank->fwnode, &stm32_gpio_domain_ops, + bank); - if (!bank->domain) { - err = -ENODEV; - goto err_clk; + if (!bank->domain) { + err = -ENODEV; + goto err_clk; + } } err = gpiochip_add_data(&bank->gpio_chip, bank); @@ -1510,6 +1512,8 @@ int stm32_pctl_probe(struct platform_device *pdev) pctl->domain = stm32_pctrl_get_irq_domain(pdev); if (IS_ERR(pctl->domain)) return PTR_ERR(pctl->domain); + if (!pctl->domain) + dev_warn(dev, "pinctrl without interrupt support\n"); /* hwspinlock is optional */ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c index 3ba47040ac42..2b3335ab56c6 100644 --- a/drivers/pinctrl/sunplus/sppctl.c +++ b/drivers/pinctrl/sunplus/sppctl.c @@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node } *map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL); + if (*map == NULL) + return -ENOMEM; + for (i = 0; i < (*num_maps); i++) { dt_pin = be32_to_cpu(list[i]); pin_num = FIELD_GET(GENMASK(31, 24), dt_pin); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c index 4ada80317a3b..b5c1a8f363f3 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c @@ -158,26 +158,26 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQS */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQS */ SUNXI_FUNCTION(0x3, "mmc2")), /* RST */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand")), /* CE2 */ + SUNXI_FUNCTION(0x2, "nand0")), /* CE2 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand")), /* CE3 */ + SUNXI_FUNCTION(0x2, "nand0")), /* CE3 */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2), SUNXI_FUNCTION(0x0, "gpio_in"), diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index d9327d7d56ee..dd928402af99 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -544,6 +544,8 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin, struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); int i; + pin -= pctl->desc->pin_base; + for (i = 0; i < num_configs; i++) { enum pin_config_param param; unsigned long flags; diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c index 2923daf63b75..7b9c107c17ce 100644 --- a/drivers/platform/mellanox/nvsw-sn2201.c +++ b/drivers/platform/mellanox/nvsw-sn2201.c @@ -890,6 +890,7 @@ nvsw_sn2201_create_static_devices(struct nvsw_sn2201 *nvsw_sn2201, int size) { struct mlxreg_hotplug_device *dev = devs; + int ret; int i; /* Create I2C static devices. */ @@ -901,6 +902,7 @@ nvsw_sn2201_create_static_devices(struct nvsw_sn2201 *nvsw_sn2201, dev->nr, dev->brdinfo->addr); dev->adapter = NULL; + ret = PTR_ERR(dev->client); goto fail_create_static_devices; } } @@ -914,7 +916,7 @@ fail_create_static_devices: dev->client = NULL; dev->adapter = NULL; } - return IS_ERR(dev->client); + return ret; } static void nvsw_sn2201_destroy_static_devices(struct nvsw_sn2201 *nvsw_sn2201, diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index f08ad85683cb..bc4013e950ed 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -945,6 +945,8 @@ config PANASONIC_LAPTOP tristate "Panasonic Laptop Extras" depends on INPUT && ACPI depends on BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO=n || ACPI_VIDEO + depends on SERIO_I8042 || SERIO_I8042 = n select INPUT_SPARSEKMAP help This driver adds support for access to backlight control and hotkeys diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index f11d18beac18..700eb19e8450 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -91,6 +91,8 @@ #define AMD_CPU_ID_PCO AMD_CPU_ID_RV #define AMD_CPU_ID_CZN AMD_CPU_ID_RN #define AMD_CPU_ID_YC 0x14B5 +#define AMD_CPU_ID_CB 0x14D8 +#define AMD_CPU_ID_PS 0x14E8 #define PMC_MSG_DELAY_MIN_US 50 #define RESPONSE_REGISTER_LOOP_MAX 20000 @@ -318,6 +320,8 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev, val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN); break; case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + case AMD_CPU_ID_PS: val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC); break; default: @@ -491,7 +495,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) &amd_pmc_idlemask_fops); /* Enable STB only when the module_param is set */ if (enable_stb) { - if (dev->cpu_id == AMD_CPU_ID_YC) + if (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB || + dev->cpu_id == AMD_CPU_ID_PS) debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, &amd_pmc_stb_debugfs_fops_v2); else @@ -615,6 +620,8 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) return MSG_OS_HINT_PCO; case AMD_CPU_ID_RN: case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + case AMD_CPU_ID_PS: return MSG_OS_HINT_RN; } return -EINVAL; @@ -735,6 +742,8 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = { #endif static const struct pci_device_id pmc_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) }, @@ -877,7 +886,7 @@ static int amd_pmc_probe(struct platform_device *pdev) mutex_init(&dev->lock); - if (enable_stb && dev->cpu_id == AMD_CPU_ID_YC) { + if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) { err = amd_pmc_s2d_init(dev); if (err) return err; @@ -915,6 +924,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = { {"AMDI0005", 0}, {"AMDI0006", 0}, {"AMDI0007", 0}, + {"AMDI0008", 0}, {"AMD0004", 0}, {"AMD0005", 0}, { } diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 57a07db659cb..478dd300b9c9 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -522,6 +522,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, { KE_KEY, 0x32, { KEY_MUTE } }, { KE_KEY, 0x35, { KEY_SCREENLOCK } }, + { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */ { KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, { KE_KEY, 0x41, { KEY_NEXTSONG } }, { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */ @@ -574,6 +575,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */ { KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */ { KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */ + { KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */ { KE_KEY, 0xB5, { KEY_CALC } }, { KE_KEY, 0xC4, { KEY_KBDILLUMUP } }, { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } }, diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c index 497ad2f64a51..5e7e6659a849 100644 --- a/drivers/platform/x86/gigabyte-wmi.c +++ b/drivers/platform/x86/gigabyte-wmi.c @@ -150,6 +150,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"), + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z490 AORUS ELITE AC"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"), diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 0d8cb22e30df..bc7020e9df9e 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -89,6 +89,7 @@ enum hp_wmi_event_ids { HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D, HPWMI_PEAKSHIFT_PERIOD = 0x0F, HPWMI_BATTERY_CHARGE_PERIOD = 0x10, + HPWMI_SANITIZATION_MODE = 0x17, }; /* @@ -853,6 +854,8 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_BATTERY_CHARGE_PERIOD: break; + case HPWMI_SANITIZATION_MODE: + break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 3ccb7b71dfb1..abd0c81d62c4 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -152,6 +152,10 @@ static bool no_bt_rfkill; module_param(no_bt_rfkill, bool, 0444); MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth."); +static bool allow_v4_dytc; +module_param(allow_v4_dytc, bool, 0444); +MODULE_PARM_DESC(allow_v4_dytc, "Enable DYTC version 4 platform-profile support."); + /* * ACPI Helpers */ @@ -871,12 +875,18 @@ static void dytc_profile_refresh(struct ideapad_private *priv) static const struct dmi_system_id ideapad_dytc_v4_allow_table[] = { { /* Ideapad 5 Pro 16ACH6 */ - .ident = "LENOVO 82L5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82L5") } }, + { + /* Ideapad 5 15ITL05 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad 5 15ITL05") + } + }, {} }; @@ -901,13 +911,16 @@ static int ideapad_dytc_profile_init(struct ideapad_private *priv) dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; - if (dytc_version < 5) { - if (dytc_version < 4 || !dmi_check_system(ideapad_dytc_v4_allow_table)) { - dev_info(&priv->platform_device->dev, - "DYTC_VERSION is less than 4 or is not allowed: %d\n", - dytc_version); - return -ENODEV; - } + if (dytc_version < 4) { + dev_info(&priv->platform_device->dev, "DYTC_VERSION < 4 is not supported\n"); + return -ENODEV; + } + + if (dytc_version < 5 && + !(allow_v4_dytc || dmi_check_system(ideapad_dytc_v4_allow_table))) { + dev_info(&priv->platform_device->dev, + "DYTC_VERSION 4 support may not work. Pass ideapad_laptop.allow_v4_dytc=Y on the kernel commandline to enable\n"); + return -ENODEV; } priv->dytc = kzalloc(sizeof(*priv->dytc), GFP_KERNEL); diff --git a/drivers/platform/x86/intel/atomisp2/led.c b/drivers/platform/x86/intel/atomisp2/led.c index 5935dfca166f..10077a61d8c5 100644 --- a/drivers/platform/x86/intel/atomisp2/led.c +++ b/drivers/platform/x86/intel/atomisp2/led.c @@ -50,7 +50,8 @@ static const struct dmi_system_id atomisp2_led_systems[] __initconst = { { .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), + /* Non exact match to also match T100TAF */ + DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"), }, .driver_data = &asus_t100ta_lookup, }, diff --git a/drivers/platform/x86/intel/ifs/Kconfig b/drivers/platform/x86/intel/ifs/Kconfig index 7ce896434b8f..c341a27cc1a3 100644 --- a/drivers/platform/x86/intel/ifs/Kconfig +++ b/drivers/platform/x86/intel/ifs/Kconfig @@ -1,6 +1,9 @@ config INTEL_IFS tristate "Intel In Field Scan" depends on X86 && CPU_SUP_INTEL && 64BIT && SMP + # Discussion on the list has shown that the sysfs API needs a bit + # more work, mark this as broken for now + depends on BROKEN select INTEL_IFS_DEVICE help Enable support for the In Field Scan capability in select diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index 40183bda7894..a1fe1e0dcf4a 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -1911,6 +1911,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &tgl_reg_map), {} diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 37850d07987d..615e39cbbbf1 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -119,20 +119,22 @@ * - v0.1 start from toshiba_acpi driver written by John Belmonte */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/types.h> +#include <linux/acpi.h> #include <linux/backlight.h> #include <linux/ctype.h> -#include <linux/seq_file.h> -#include <linux/uaccess.h> -#include <linux/slab.h> -#include <linux/acpi.h> +#include <linux/i8042.h> +#include <linux/init.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/platform_device.h> - +#include <linux/seq_file.h> +#include <linux/serio.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <acpi/video.h> MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>"); MODULE_AUTHOR("David Bronaugh <dbronaugh@linuxboxen.org>"); @@ -241,6 +243,42 @@ struct pcc_acpi { struct platform_device *platform; }; +/* + * On some Panasonic models the volume up / down / mute keys send duplicate + * keypress events over the PS/2 kbd interface, filter these out. + */ +static bool panasonic_i8042_filter(unsigned char data, unsigned char str, + struct serio *port) +{ + static bool extended; + + if (str & I8042_STR_AUXDATA) + return false; + + if (data == 0xe0) { + extended = true; + return true; + } else if (extended) { + extended = false; + + switch (data & 0x7f) { + case 0x20: /* e0 20 / e0 a0, Volume Mute press / release */ + case 0x2e: /* e0 2e / e0 ae, Volume Down press / release */ + case 0x30: /* e0 30 / e0 b0, Volume Up press / release */ + return true; + default: + /* + * Report the previously filtered e0 before continuing + * with the next non-filtered byte. + */ + serio_interrupt(port, 0xe0, 0); + return false; + } + } + + return false; +} + /* method access functions */ static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val) { @@ -762,6 +800,8 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) struct input_dev *hotk_input_dev = pcc->input_dev; int rc; unsigned long long result; + unsigned int key; + unsigned int updown; rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY, NULL, &result); @@ -770,20 +810,27 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) return; } + key = result & 0xf; + updown = result & 0x80; /* 0x80 == key down; 0x00 = key up */ + /* hack: some firmware sends no key down for sleep / hibernate */ - if ((result & 0xf) == 0x7 || (result & 0xf) == 0xa) { - if (result & 0x80) + if (key == 7 || key == 10) { + if (updown) sleep_keydown_seen = 1; if (!sleep_keydown_seen) sparse_keymap_report_event(hotk_input_dev, - result & 0xf, 0x80, false); + key, 0x80, false); } - if ((result & 0xf) == 0x7 || (result & 0xf) == 0x9 || (result & 0xf) == 0xa) { - if (!sparse_keymap_report_event(hotk_input_dev, - result & 0xf, result & 0x80, false)) - pr_err("Unknown hotkey event: 0x%04llx\n", result); - } + /* + * Don't report brightness key-presses if they are also reported + * by the ACPI video bus. + */ + if ((key == 1 || key == 2) && acpi_video_handles_brightness_key_presses()) + return; + + if (!sparse_keymap_report_event(hotk_input_dev, key, updown, false)) + pr_err("Unknown hotkey event: 0x%04llx\n", result); } static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event) @@ -997,6 +1044,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) pcc->platform = NULL; } + i8042_install_filter(panasonic_i8042_filter); return 0; out_platform: @@ -1020,6 +1068,8 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device) if (!device || !pcc) return -EINVAL; + i8042_remove_filter(panasonic_i8042_filter); + if (pcc->platform) { device_remove_file(&pcc->platform->dev, &dev_attr_cdpower); platform_device_unregister(pcc->platform); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e6cb4a14cdd4..a8b383051528 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -4529,6 +4529,7 @@ static void thinkpad_acpi_amd_s2idle_restore(void) iounmap(addr); cleanup_resource: release_resource(res); + kfree(res); } static struct acpi_s2idle_dev_ops thinkpad_acpi_s2idle_dev_ops = { @@ -10299,21 +10300,15 @@ static struct ibm_struct proxsensor_driver_data = { #define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 0) #define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 1) -enum dytc_profile_funcmode { - DYTC_FUNCMODE_NONE = 0, - DYTC_FUNCMODE_MMC, - DYTC_FUNCMODE_PSC, -}; - -static enum dytc_profile_funcmode dytc_profile_available; static enum platform_profile_option dytc_current_profile; static atomic_t dytc_ignore_event = ATOMIC_INIT(0); static DEFINE_MUTEX(dytc_mutex); +static int dytc_capabilities; static bool dytc_mmc_get_available; static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile) { - if (dytc_profile_available == DYTC_FUNCMODE_MMC) { + if (dytc_capabilities & BIT(DYTC_FC_MMC)) { switch (dytcmode) { case DYTC_MODE_MMC_LOWPOWER: *profile = PLATFORM_PROFILE_LOW_POWER; @@ -10330,7 +10325,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p } return 0; } - if (dytc_profile_available == DYTC_FUNCMODE_PSC) { + if (dytc_capabilities & BIT(DYTC_FC_PSC)) { switch (dytcmode) { case DYTC_MODE_PSC_LOWPOWER: *profile = PLATFORM_PROFILE_LOW_POWER; @@ -10352,21 +10347,21 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe { switch (profile) { case PLATFORM_PROFILE_LOW_POWER: - if (dytc_profile_available == DYTC_FUNCMODE_MMC) + if (dytc_capabilities & BIT(DYTC_FC_MMC)) *perfmode = DYTC_MODE_MMC_LOWPOWER; - else if (dytc_profile_available == DYTC_FUNCMODE_PSC) + else if (dytc_capabilities & BIT(DYTC_FC_PSC)) *perfmode = DYTC_MODE_PSC_LOWPOWER; break; case PLATFORM_PROFILE_BALANCED: - if (dytc_profile_available == DYTC_FUNCMODE_MMC) + if (dytc_capabilities & BIT(DYTC_FC_MMC)) *perfmode = DYTC_MODE_MMC_BALANCE; - else if (dytc_profile_available == DYTC_FUNCMODE_PSC) + else if (dytc_capabilities & BIT(DYTC_FC_PSC)) *perfmode = DYTC_MODE_PSC_BALANCE; break; case PLATFORM_PROFILE_PERFORMANCE: - if (dytc_profile_available == DYTC_FUNCMODE_MMC) + if (dytc_capabilities & BIT(DYTC_FC_MMC)) *perfmode = DYTC_MODE_MMC_PERFORM; - else if (dytc_profile_available == DYTC_FUNCMODE_PSC) + else if (dytc_capabilities & BIT(DYTC_FC_PSC)) *perfmode = DYTC_MODE_PSC_PERFORM; break; default: /* Unknown profile */ @@ -10445,7 +10440,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof, if (err) goto unlock; - if (dytc_profile_available == DYTC_FUNCMODE_MMC) { + if (dytc_capabilities & BIT(DYTC_FC_MMC)) { if (profile == PLATFORM_PROFILE_BALANCED) { /* * To get back to balanced mode we need to issue a reset command. @@ -10464,7 +10459,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof, goto unlock; } } - if (dytc_profile_available == DYTC_FUNCMODE_PSC) { + if (dytc_capabilities & BIT(DYTC_FC_PSC)) { err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output); if (err) goto unlock; @@ -10483,12 +10478,12 @@ static void dytc_profile_refresh(void) int perfmode; mutex_lock(&dytc_mutex); - if (dytc_profile_available == DYTC_FUNCMODE_MMC) { + if (dytc_capabilities & BIT(DYTC_FC_MMC)) { if (dytc_mmc_get_available) err = dytc_command(DYTC_CMD_MMC_GET, &output); else err = dytc_cql_command(DYTC_CMD_GET, &output); - } else if (dytc_profile_available == DYTC_FUNCMODE_PSC) + } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) err = dytc_command(DYTC_CMD_GET, &output); mutex_unlock(&dytc_mutex); @@ -10517,7 +10512,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) set_bit(PLATFORM_PROFILE_BALANCED, dytc_profile.choices); set_bit(PLATFORM_PROFILE_PERFORMANCE, dytc_profile.choices); - dytc_profile_available = DYTC_FUNCMODE_NONE; err = dytc_command(DYTC_CMD_QUERY, &output); if (err) return err; @@ -10530,13 +10524,12 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) return -ENODEV; /* Check what capabilities are supported */ - err = dytc_command(DYTC_CMD_FUNC_CAP, &output); + err = dytc_command(DYTC_CMD_FUNC_CAP, &dytc_capabilities); if (err) return err; - if (output & BIT(DYTC_FC_MMC)) { /* MMC MODE */ - dytc_profile_available = DYTC_FUNCMODE_MMC; - + if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */ + pr_debug("MMC is supported\n"); /* * Check if MMC_GET functionality available * Version > 6 and return success from MMC_GET command @@ -10547,8 +10540,13 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) if (!err && ((output & DYTC_ERR_MASK) == DYTC_ERR_SUCCESS)) dytc_mmc_get_available = true; } - } else if (output & BIT(DYTC_FC_PSC)) { /* PSC MODE */ - dytc_profile_available = DYTC_FUNCMODE_PSC; + } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */ + /* Support for this only works on AMD platforms */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { + dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n"); + return -ENODEV; + } + pr_debug("PSC is supported\n"); } else { dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n"); return -ENODEV; @@ -10574,7 +10572,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) static void dytc_profile_exit(void) { - dytc_profile_available = DYTC_FUNCMODE_NONE; platform_profile_remove(); } diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c index f446be72e539..480375977435 100644 --- a/drivers/platform/x86/x86-android-tablets.c +++ b/drivers/platform/x86/x86-android-tablets.c @@ -27,8 +27,8 @@ #include <linux/pinctrl/machine.h> #include <linux/platform_data/lp855x.h> #include <linux/platform_device.h> -#include <linux/pm.h> #include <linux/power/bq24190_charger.h> +#include <linux/reboot.h> #include <linux/rmi.h> #include <linux/serdev.h> #include <linux/spi/spi.h> @@ -889,6 +889,7 @@ static const struct pinctrl_map lenovo_yoga_tab2_830_1050_codec_pinctrl_map = "INT33FC:02", "pmu_clk2_grp", "pmu_clk"); static struct pinctrl *lenovo_yoga_tab2_830_1050_codec_pinctrl; +static struct sys_off_handler *lenovo_yoga_tab2_830_1050_sys_off_handler; static int __init lenovo_yoga_tab2_830_1050_init_codec(void) { @@ -933,9 +934,11 @@ err_put_device: * followed by a normal 3 second press to recover. Avoid this by doing an EFI * poweroff instead. */ -static void lenovo_yoga_tab2_830_1050_power_off(void) +static int lenovo_yoga_tab2_830_1050_power_off(struct sys_off_data *data) { efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); + + return NOTIFY_DONE; } static int __init lenovo_yoga_tab2_830_1050_init(void) @@ -950,13 +953,19 @@ static int __init lenovo_yoga_tab2_830_1050_init(void) if (ret) return ret; - pm_power_off = lenovo_yoga_tab2_830_1050_power_off; + /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */ + lenovo_yoga_tab2_830_1050_sys_off_handler = + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1, + lenovo_yoga_tab2_830_1050_power_off, NULL); + if (IS_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler)) + return PTR_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler); + return 0; } static void lenovo_yoga_tab2_830_1050_exit(void) { - pm_power_off = NULL; /* Just turn poweroff into halt on module unload */ + unregister_sys_off_handler(lenovo_yoga_tab2_830_1050_sys_off_handler); if (lenovo_yoga_tab2_830_1050_codec_pinctrl) { pinctrl_put(lenovo_yoga_tab2_830_1050_codec_pinctrl); diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c index 08d0a07b58ef..c7624d7611a7 100644 --- a/drivers/power/reset/arm-versatile-reboot.c +++ b/drivers/power/reset/arm-versatile-reboot.c @@ -146,6 +146,7 @@ static int __init versatile_reboot_probe(void) versatile_reboot_type = (enum versatile_reboot)reboot_id->data; syscon_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(syscon_regmap)) return PTR_ERR(syscon_regmap); diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index ec8a404d71b4..4339fa9ff009 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -3148,6 +3148,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) ret = ab8500_fg_init_hw_registers(di); if (ret) { dev_err(dev, "failed to initialize registers\n"); + destroy_workqueue(di->fg_wq); return ret; } @@ -3159,6 +3160,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) di->fg_psy = devm_power_supply_register(dev, &ab8500_fg_desc, &psy_cfg); if (IS_ERR(di->fg_psy)) { dev_err(dev, "failed to register FG psy\n"); + destroy_workqueue(di->fg_wq); return PTR_ERR(di->fg_psy); } @@ -3174,8 +3176,10 @@ static int ab8500_fg_probe(struct platform_device *pdev) /* Register primary interrupt handlers */ for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) { irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name); - if (irq < 0) + if (irq < 0) { + destroy_workqueue(di->fg_wq); return irq; + } ret = devm_request_threaded_irq(dev, irq, NULL, ab8500_fg_irq[i].isr, @@ -3185,6 +3189,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) if (ret != 0) { dev_err(dev, "failed to request %s IRQ %d: %d\n", ab8500_fg_irq[i].name, irq, ret); + destroy_workqueue(di->fg_wq); return ret; } dev_dbg(dev, "Requested %s IRQ %d: %d\n", @@ -3200,6 +3205,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) ret = ab8500_fg_sysfs_init(di); if (ret) { dev_err(dev, "failed to create sysfs entry\n"); + destroy_workqueue(di->fg_wq); return ret; } @@ -3207,6 +3213,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "failed to create FG psy\n"); ab8500_fg_sysfs_exit(di); + destroy_workqueue(di->fg_wq); return ret; } diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index fad5890c899e..470253c337c7 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -846,17 +846,17 @@ int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *t { int i, high, low; - /* Break loop at table_len - 1 because that is the highest index */ - for (i = 0; i < table_len - 1; i++) + for (i = 0; i < table_len; i++) if (temp > table[i].temp) break; /* The library function will deal with high == low */ - if ((i == 0) || (i == (table_len - 1))) - high = i; + if (i == 0) + high = low = i; + else if (i == table_len) + high = low = i - 1; else - high = i - 1; - low = i; + high = (low = i) - 1; return fixp_linear_interpolate(table[low].temp, table[low].resistance, @@ -958,17 +958,17 @@ int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, { int i, high, low; - /* Break loop at table_len - 1 because that is the highest index */ - for (i = 0; i < table_len - 1; i++) + for (i = 0; i < table_len; i++) if (ocv > table[i].ocv) break; /* The library function will deal with high == low */ - if ((i == 0) || (i == (table_len - 1))) - high = i - 1; + if (i == 0) + high = low = i; + else if (i == table_len) + high = low = i - 1; else - high = i; /* i.e. i == 0 */ - low = i; + high = (low = i) - 1; return fixp_linear_interpolate(table[low].ocv, table[low].capacity, diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c index f5eced0842b3..6a88eb7e9f75 100644 --- a/drivers/powercap/dtpm_cpu.c +++ b/drivers/powercap/dtpm_cpu.c @@ -71,34 +71,19 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit) static u64 scale_pd_power_uw(struct cpumask *pd_mask, u64 power) { - unsigned long max = 0, sum_util = 0; + unsigned long max, sum_util = 0; int cpu; - for_each_cpu_and(cpu, pd_mask, cpu_online_mask) { - - /* - * The capacity is the same for all CPUs belonging to - * the same perf domain, so a single call to - * arch_scale_cpu_capacity() is enough. However, we - * need the CPU parameter to be initialized by the - * loop, so the call ends up in this block. - * - * We can initialize 'max' with a cpumask_first() call - * before the loop but the bits computation is not - * worth given the arch_scale_cpu_capacity() just - * returns a value where the resulting assembly code - * will be optimized by the compiler. - */ - max = arch_scale_cpu_capacity(cpu); - sum_util += sched_cpu_util(cpu, max); - } - /* - * In the improbable case where all the CPUs of the perf - * domain are offline, 'max' will be zero and will lead to an - * illegal operation with a zero division. + * The capacity is the same for all CPUs belonging to + * the same perf domain. */ - return max ? (power * ((sum_util << 10) / max)) >> 10 : 0; + max = arch_scale_cpu_capacity(cpumask_first(pd_mask)); + + for_each_cpu_and(cpu, pd_mask, cpu_online_mask) + sum_util += sched_cpu_util(cpu); + + return (power * ((sum_util << 10) / max)) >> 10; } static u64 get_pd_power_uw(struct dtpm *dtpm) diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 458218f88c5e..fe4971b65c64 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -176,6 +176,7 @@ config PTP_1588_CLOCK_OCP depends on !S390 depends on COMMON_CLK select NET_DEVLINK + select CRC16 help This driver adds support for an OpenCompute time card. diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index cb2491761958..ae1d6ee382a5 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -60,7 +60,7 @@ static LIST_HEAD(sclp_reg_list); /* List of queued requests. */ static LIST_HEAD(sclp_req_queue); -/* Data for read and and init requests. */ +/* Data for read and init requests. */ static struct sclp_req sclp_read_req; static struct sclp_req sclp_init_req; static void *sclp_read_sccb; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 5c13d2079d96..0a9045b49c50 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1435,7 +1435,7 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data) if (ap_drv->in_use) { rc = ap_drv->in_use(ap_perms.apm, newaqm); if (rc) - return -EBUSY; + rc = -EBUSY; } /* release the driver's module */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9e54fe76a9b2..35d4b398c197 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3565,7 +3565,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, if (!atomic_read(&queue->set_pci_flags_count)) { /* * there's no outstanding PCI any more, so we - * have to request a PCI to be sure the the PCI + * have to request a PCI to be sure the PCI * will wake at some time in the future then we * can flush packed buffers that might still be * hanging around, which can happen if no diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 97e51c34e6cf..161d3b141f0d 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -1136,8 +1136,13 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev, vcdev->err = -EIO; } virtio_ccw_check_activity(vcdev, activity); - /* Interrupts are disabled here */ +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION + /* + * Paired with virtio_ccw_synchronize_cbs() and interrupts are + * disabled here. + */ read_lock(&vcdev->irq_lock); +#endif for_each_set_bit(i, indicators(vcdev), sizeof(*indicators(vcdev)) * BITS_PER_BYTE) { /* The bit clear must happen before the vring kick. */ @@ -1146,7 +1151,9 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev, vq = virtio_ccw_vq_by_ind(vcdev, i); vring_interrupt(0, vq); } +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION read_unlock(&vcdev->irq_lock); +#endif if (test_bit(0, indicators2(vcdev))) { virtio_config_changed(&vcdev->vdev); clear_bit(0, indicators2(vcdev)); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 7d819fc0395e..eb86afb21aab 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2782,6 +2782,7 @@ static int slave_configure_v3_hw(struct scsi_device *sdev) struct hisi_hba *hisi_hba = shost_priv(shost); struct device *dev = hisi_hba->dev; int ret = sas_slave_configure(sdev); + unsigned int max_sectors; if (ret) return ret; @@ -2799,6 +2800,12 @@ static int slave_configure_v3_hw(struct scsi_device *sdev) } } + /* Set according to IOMMU IOVA caching limit */ + max_sectors = min_t(size_t, queue_max_hw_sectors(sdev->request_queue), + (PAGE_SIZE * 32) >> SECTOR_SHIFT); + + blk_queue_max_hw_sectors(sdev->request_queue, max_sectors); + return 0; } diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index c95360a3c186..0917b05059b4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3195,6 +3195,9 @@ static int megasas_map_queues(struct Scsi_Host *shost) qoff += map->nr_queues; offset += map->nr_queues; + /* we never use READ queue, so can't cheat blk-mq */ + shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0; + /* Setup Poll hctx */ map = &shost->tag_set.map[HCTX_TYPE_POLL]; map->nr_queues = instance->iopoll_q_count; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index b519f4b59d30..5e8887fa02c8 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -11386,6 +11386,7 @@ scsih_shutdown(struct pci_dev *pdev) _scsih_ir_shutdown(ioc); _scsih_nvme_shutdown(ioc); mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_stop_watchdog(ioc); ioc->shost_recovery = 1; mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); ioc->shost_recovery = 0; diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index f7466a895d3b..991eb01bb1e0 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -3145,15 +3145,6 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) if (!phy->phy_attached) return; - if (sas_phy->phy) { - struct sas_phy *sphy = sas_phy->phy; - sphy->negotiated_linkrate = sas_phy->linkrate; - sphy->minimum_linkrate = phy->minimum_linkrate; - sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; - sphy->maximum_linkrate = phy->maximum_linkrate; - sphy->maximum_linkrate_hw = phy->maximum_linkrate; - } - if (phy->phy_type & PORT_TYPE_SAS) { struct sas_identify_frame *id; id = (struct sas_identify_frame *)phy->frame_rcvd; @@ -3177,26 +3168,22 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) switch (link_rate) { case PHY_SPEED_120: phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS; - phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS; break; case PHY_SPEED_60: phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; - phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; break; case PHY_SPEED_30: phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS; - phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; break; case PHY_SPEED_15: phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS; - phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; break; } sas_phy->negotiated_linkrate = phy->sas_phy.linkrate; - sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS; + sas_phy->maximum_linkrate_hw = phy->maximum_linkrate; sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; - sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; - sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + sas_phy->maximum_linkrate = phy->maximum_linkrate; + sas_phy->minimum_linkrate = phy->minimum_linkrate; } /** diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 9b04f1a6a67d..01f2f41928eb 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -143,6 +143,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id) struct asd_sas_phy *sas_phy = &phy->sas_phy; phy->phy_state = PHY_LINK_DISABLE; phy->pm8001_ha = pm8001_ha; + phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0; sas_phy->class = SAS; sas_phy->iproto = SAS_PROTOCOL_ALL; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 01c5e8ff4cc5..303cd05fec50 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -3723,8 +3723,12 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n", phyid, status); if (status == PHY_STOP_SUCCESS || - status == PHY_STOP_ERR_DEVICE_ATTACHED) + status == PHY_STOP_ERR_DEVICE_ATTACHED) { phy->phy_state = PHY_LINK_DISABLE; + phy->sas_phy.phy->negotiated_linkrate = SAS_PHY_DISABLED; + phy->sas_phy.linkrate = SAS_PHY_DISABLED; + } + return 0; } diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index a480c4d589f5..729e309e6034 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -450,7 +450,7 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode) goto out_put_request; ret = 0; - if (hdr->iovec_count) { + if (hdr->iovec_count && hdr->dxfer_len) { struct iov_iter i; struct iovec *iov = NULL; diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c index 358df7510186..828d81e02b37 100644 --- a/drivers/sh/intc/chip.c +++ b/drivers/sh/intc/chip.c @@ -72,7 +72,7 @@ static int intc_set_affinity(struct irq_data *data, if (!cpumask_intersects(cpumask, cpu_online_mask)) return -1; - cpumask_copy(irq_data_get_affinity_mask(data), cpumask); + irq_data_update_affinity(data, cpumask); return IRQ_SET_MASK_OK_NOCOPY; } diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 86ccf5970bc1..e461c071189b 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -9,6 +9,7 @@ source "drivers/soc/atmel/Kconfig" source "drivers/soc/bcm/Kconfig" source "drivers/soc/canaan/Kconfig" source "drivers/soc/fsl/Kconfig" +source "drivers/soc/fujitsu/Kconfig" source "drivers/soc/imx/Kconfig" source "drivers/soc/ixp4xx/Kconfig" source "drivers/soc/litex/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 919716e0e700..69ba6508cf2c 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_SOC_CANAAN) += canaan/ obj-$(CONFIG_ARCH_DOVE) += dove/ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ +obj-y += fujitsu/ obj-$(CONFIG_ARCH_GEMINI) += gemini/ obj-y += imx/ obj-y += ixp4xx/ diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c index 78f0f1aeca57..92125dd65f33 100644 --- a/drivers/soc/amlogic/meson-mx-socinfo.c +++ b/drivers/soc/amlogic/meson-mx-socinfo.c @@ -126,6 +126,7 @@ static int __init meson_mx_socinfo_init(void) np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids); if (np) { analog_top_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(analog_top_regmap)) return PTR_ERR(analog_top_regmap); diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c index a10a417a87db..e93518763526 100644 --- a/drivers/soc/amlogic/meson-secure-pwrc.c +++ b/drivers/soc/amlogic/meson-secure-pwrc.c @@ -152,8 +152,10 @@ static int meson_secure_pwrc_probe(struct platform_device *pdev) } pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL); - if (!pwrc) + if (!pwrc) { + of_node_put(sm_np); return -ENOMEM; + } pwrc->fw = meson_sm_get(sm_np); of_node_put(sm_np); diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c index b2d365ae0282..dae8a2e0f745 100644 --- a/drivers/soc/atmel/soc.c +++ b/drivers/soc/atmel/soc.c @@ -91,14 +91,14 @@ static const struct at91_soc socs[] __initconst = { AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK, AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH, "sam9x60", "sam9x60"), - AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D5M_EXID_MATCH, - AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH, + AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK, + AT91_CIDR_VERSION_MASK, SAM9X60_D5M_EXID_MATCH, "sam9x60 64MiB DDR2 SiP", "sam9x60"), - AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D1G_EXID_MATCH, - AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH, + AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK, + AT91_CIDR_VERSION_MASK, SAM9X60_D1G_EXID_MATCH, "sam9x60 128MiB DDR2 SiP", "sam9x60"), - AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D6K_EXID_MATCH, - AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH, + AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK, + AT91_CIDR_VERSION_MASK, SAM9X60_D6K_EXID_MATCH, "sam9x60 8MiB SDRAM SiP", "sam9x60"), #endif #ifdef CONFIG_SOC_SAMA5 diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c index 1e0041ec8132..5bcd047768b6 100644 --- a/drivers/soc/bcm/bcm2835-power.c +++ b/drivers/soc/bcm/bcm2835-power.c @@ -126,8 +126,7 @@ #define ASB_AXI_BRDG_ID 0x20 -#define ASB_READ(reg) readl(power->asb + (reg)) -#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg)) +#define BCM2835_BRDG_ID 0x62726467 struct bcm2835_power_domain { struct generic_pm_domain base; @@ -142,24 +141,41 @@ struct bcm2835_power { void __iomem *base; /* AXI Async bridge registers. */ void __iomem *asb; + /* RPiVid bridge registers. */ + void __iomem *rpivid_asb; struct genpd_onecell_data pd_xlate; struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT]; struct reset_controller_dev reset; }; -static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) +static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable) { + void __iomem *base = power->asb; u64 start; + u32 val; - if (!reg) + switch (reg) { + case 0: return 0; + case ASB_V3D_S_CTRL: + case ASB_V3D_M_CTRL: + if (power->rpivid_asb) + base = power->rpivid_asb; + break; + } start = ktime_get_ns(); /* Enable the module's async AXI bridges. */ - ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); - while (ASB_READ(reg) & ASB_ACK) { + if (enable) { + val = readl(base + reg) & ~ASB_REQ_STOP; + } else { + val = readl(base + reg) | ASB_REQ_STOP; + } + writel(PM_PASSWORD | val, base + reg); + + while (readl(base + reg) & ASB_ACK) { cpu_relax(); if (ktime_get_ns() - start >= 1000) return -ETIMEDOUT; @@ -168,30 +184,24 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) return 0; } -static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) +static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) { - u64 start; - - if (!reg) - return 0; - - start = ktime_get_ns(); - - /* Enable the module's async AXI bridges. */ - ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); - while (!(ASB_READ(reg) & ASB_ACK)) { - cpu_relax(); - if (ktime_get_ns() - start >= 1000) - return -ETIMEDOUT; - } + return bcm2835_asb_control(power, reg, true); +} - return 0; +static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) +{ + return bcm2835_asb_control(power, reg, false); } static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg) { struct bcm2835_power *power = pd->power; + /* We don't run this on BCM2711 */ + if (power->rpivid_asb) + return 0; + /* Enable functional isolation */ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC); @@ -213,6 +223,10 @@ static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg) int inrush; bool powok; + /* We don't run this on BCM2711 */ + if (power->rpivid_asb) + return 0; + /* If it was already powered on by the fw, leave it that way. */ if (PM_READ(pm_reg) & PM_POWUP) return 0; @@ -626,13 +640,23 @@ static int bcm2835_power_probe(struct platform_device *pdev) power->dev = dev; power->base = pm->base; power->asb = pm->asb; + power->rpivid_asb = pm->rpivid_asb; - id = ASB_READ(ASB_AXI_BRDG_ID); - if (id != 0x62726467 /* "BRDG" */) { + id = readl(power->asb + ASB_AXI_BRDG_ID); + if (id != BCM2835_BRDG_ID /* "BRDG" */) { dev_err(dev, "ASB register ID returned 0x%08x\n", id); return -ENODEV; } + if (power->rpivid_asb) { + id = readl(power->rpivid_asb + ASB_AXI_BRDG_ID); + if (id != BCM2835_BRDG_ID /* "BRDG" */) { + dev_err(dev, "RPiVid ASB register ID returned 0x%08x\n", + id); + return -ENODEV; + } + } + power->pd_xlate.domains = devm_kcalloc(dev, ARRAY_SIZE(power_domain_names), sizeof(*power->pd_xlate.domains), diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c index 2c975d79fe8e..1467bbd59690 100644 --- a/drivers/soc/bcm/brcmstb/biuctrl.c +++ b/drivers/soc/bcm/brcmstb/biuctrl.c @@ -340,12 +340,12 @@ static int __init brcmstb_biuctrl_init(void) ret = setup_hifcpubiuctrl_regs(np); if (ret) - return ret; + goto out_put; ret = mcp_write_pairing_set(); if (ret) { pr_err("MCP: Unable to disable write pairing!\n"); - return ret; + goto out_put; } a72_b53_rac_enable_all(np); @@ -353,6 +353,9 @@ static int __init brcmstb_biuctrl_init(void) #ifdef CONFIG_PM_SLEEP register_syscore_ops(&brcmstb_cpu_credit_syscore_ops); #endif - return 0; + ret = 0; +out_put: + of_node_put(np); + return ret; } early_initcall(brcmstb_biuctrl_init); diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c index 70ad0f3dce28..d6b30d521307 100644 --- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c +++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c @@ -721,7 +721,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev) ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs; ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs; /* - * Slightly grosss to use the phy ver to get a memc, + * Slightly gross to use the phy ver to get a memc, * offset but that is the only versioned things so far * we can test for. */ diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 5ed2fc1c53a0..6bf3e6a980ff 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -14,21 +14,16 @@ #include <linux/platform_device.h> #include <linux/fsl/guts.h> -struct guts { - struct ccsr_guts __iomem *regs; - bool little_endian; -}; - struct fsl_soc_die_attr { char *die; u32 svr; u32 mask; }; -static struct guts *guts; -static struct soc_device_attribute soc_dev_attr; -static struct soc_device *soc_dev; - +struct fsl_soc_data { + const char *sfp_compat; + u32 uid_offset; +}; /* SoC die attribute definition for QorIQ platform */ static const struct fsl_soc_die_attr fsl_soc_die[] = { @@ -120,88 +115,36 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match( return NULL; } -static u32 fsl_guts_get_svr(void) -{ - u32 svr = 0; - - if (!guts || !guts->regs) - return svr; - - if (guts->little_endian) - svr = ioread32(&guts->regs->svr); - else - svr = ioread32be(&guts->regs->svr); - - return svr; -} - -static int fsl_guts_probe(struct platform_device *pdev) +static u64 fsl_guts_get_soc_uid(const char *compat, unsigned int offset) { - struct device_node *root, *np = pdev->dev.of_node; - struct device *dev = &pdev->dev; - const struct fsl_soc_die_attr *soc_die; - const char *machine; - u32 svr; - - /* Initialize guts */ - guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL); - if (!guts) - return -ENOMEM; - - guts->little_endian = of_property_read_bool(np, "little-endian"); + struct device_node *np; + void __iomem *sfp_base; + u64 uid; - guts->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(guts->regs)) - return PTR_ERR(guts->regs); + np = of_find_compatible_node(NULL, NULL, compat); + if (!np) + return 0; - /* Register soc device */ - root = of_find_node_by_path("/"); - if (of_property_read_string(root, "model", &machine)) - of_property_read_string_index(root, "compatible", 0, &machine); - if (machine) { - soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); - if (!soc_dev_attr.machine) { - of_node_put(root); - return -ENOMEM; - } + sfp_base = of_iomap(np, 0); + if (!sfp_base) { + of_node_put(np); + return 0; } - of_node_put(root); - svr = fsl_guts_get_svr(); - soc_die = fsl_soc_die_match(svr, fsl_soc_die); - if (soc_die) { - soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, - "QorIQ %s", soc_die->die); - } else { - soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ"); - } - if (!soc_dev_attr.family) - return -ENOMEM; - soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL, - "svr:0x%08x", svr); - if (!soc_dev_attr.soc_id) - return -ENOMEM; - soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d", - (svr >> 4) & 0xf, svr & 0xf); - if (!soc_dev_attr.revision) - return -ENOMEM; + uid = ioread32(sfp_base + offset); + uid <<= 32; + uid |= ioread32(sfp_base + offset + 4); - soc_dev = soc_device_register(&soc_dev_attr); - if (IS_ERR(soc_dev)) - return PTR_ERR(soc_dev); + iounmap(sfp_base); + of_node_put(np); - pr_info("Machine: %s\n", soc_dev_attr.machine); - pr_info("SoC family: %s\n", soc_dev_attr.family); - pr_info("SoC ID: %s, Revision: %s\n", - soc_dev_attr.soc_id, soc_dev_attr.revision); - return 0; + return uid; } -static int fsl_guts_remove(struct platform_device *dev) -{ - soc_device_unregister(soc_dev); - return 0; -} +static const struct fsl_soc_data ls1028a_data = { + .sfp_compat = "fsl,ls1028a-sfp", + .uid_offset = 0x21c, +}; /* * Table for matching compatible strings, for device tree @@ -231,28 +174,106 @@ static const struct of_device_id fsl_guts_of_match[] = { { .compatible = "fsl,ls1012a-dcfg", }, { .compatible = "fsl,ls1046a-dcfg", }, { .compatible = "fsl,lx2160a-dcfg", }, - { .compatible = "fsl,ls1028a-dcfg", }, + { .compatible = "fsl,ls1028a-dcfg", .data = &ls1028a_data}, {} }; -MODULE_DEVICE_TABLE(of, fsl_guts_of_match); - -static struct platform_driver fsl_guts_driver = { - .driver = { - .name = "fsl-guts", - .of_match_table = fsl_guts_of_match, - }, - .probe = fsl_guts_probe, - .remove = fsl_guts_remove, -}; static int __init fsl_guts_init(void) { - return platform_driver_register(&fsl_guts_driver); -} -core_initcall(fsl_guts_init); + struct soc_device_attribute *soc_dev_attr; + static struct soc_device *soc_dev; + const struct fsl_soc_die_attr *soc_die; + const struct fsl_soc_data *soc_data; + const struct of_device_id *match; + struct ccsr_guts __iomem *regs; + const char *machine = NULL; + struct device_node *np; + bool little_endian; + u64 soc_uid = 0; + u32 svr; + int ret; -static void __exit fsl_guts_exit(void) -{ - platform_driver_unregister(&fsl_guts_driver); + np = of_find_matching_node_and_match(NULL, fsl_guts_of_match, &match); + if (!np) + return 0; + soc_data = match->data; + + regs = of_iomap(np, 0); + if (!regs) { + of_node_put(np); + return -ENOMEM; + } + + little_endian = of_property_read_bool(np, "little-endian"); + if (little_endian) + svr = ioread32(®s->svr); + else + svr = ioread32be(®s->svr); + iounmap(regs); + of_node_put(np); + + /* Register soc device */ + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return -ENOMEM; + + if (of_property_read_string(of_root, "model", &machine)) + of_property_read_string_index(of_root, "compatible", 0, &machine); + if (machine) { + soc_dev_attr->machine = kstrdup(machine, GFP_KERNEL); + if (!soc_dev_attr->machine) + goto err_nomem; + } + + soc_die = fsl_soc_die_match(svr, fsl_soc_die); + if (soc_die) { + soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ %s", + soc_die->die); + } else { + soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ"); + } + if (!soc_dev_attr->family) + goto err_nomem; + + soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "svr:0x%08x", svr); + if (!soc_dev_attr->soc_id) + goto err_nomem; + + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d", + (svr >> 4) & 0xf, svr & 0xf); + if (!soc_dev_attr->revision) + goto err_nomem; + + if (soc_data) + soc_uid = fsl_guts_get_soc_uid(soc_data->sfp_compat, + soc_data->uid_offset); + if (soc_uid) + soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", + soc_uid); + + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR(soc_dev)) { + ret = PTR_ERR(soc_dev); + goto err; + } + + pr_info("Machine: %s\n", soc_dev_attr->machine); + pr_info("SoC family: %s\n", soc_dev_attr->family); + pr_info("SoC ID: %s, Revision: %s\n", + soc_dev_attr->soc_id, soc_dev_attr->revision); + + return 0; + +err_nomem: + ret = -ENOMEM; +err: + kfree(soc_dev_attr->machine); + kfree(soc_dev_attr->family); + kfree(soc_dev_attr->soc_id); + kfree(soc_dev_attr->revision); + kfree(soc_dev_attr->serial_number); + kfree(soc_dev_attr); + + return ret; } -module_exit(fsl_guts_exit); +core_initcall(fsl_guts_init); diff --git a/drivers/soc/fujitsu/Kconfig b/drivers/soc/fujitsu/Kconfig new file mode 100644 index 000000000000..987731e80612 --- /dev/null +++ b/drivers/soc/fujitsu/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "fujitsu SoC drivers" + +config A64FX_DIAG + bool "A64FX diag driver" + depends on ARM64 + depends on ACPI + help + Say Y here if you want to enable diag interrupt on Fujitsu A64FX. + This driver enables BMC's diagnostic requests and enables + A64FX-specific interrupts. This allows administrators to obtain + kernel dumps via diagnostic requests using ipmitool, etc. + + If unsure, say N. + +endmenu diff --git a/drivers/soc/fujitsu/Makefile b/drivers/soc/fujitsu/Makefile new file mode 100644 index 000000000000..945bc1c14ad0 --- /dev/null +++ b/drivers/soc/fujitsu/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_A64FX_DIAG) += a64fx-diag.o diff --git a/drivers/soc/fujitsu/a64fx-diag.c b/drivers/soc/fujitsu/a64fx-diag.c new file mode 100644 index 000000000000..d87f348427bf --- /dev/null +++ b/drivers/soc/fujitsu/a64fx-diag.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * A64FX diag driver. + * Copyright (c) 2022 Fujitsu Ltd. + */ + +#include <linux/acpi.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#define A64FX_DIAG_IRQ 1 +#define BMC_DIAG_INTERRUPT_ENABLE 0x40 +#define BMC_DIAG_INTERRUPT_STATUS 0x44 +#define BMC_DIAG_INTERRUPT_MASK BIT(31) + +struct a64fx_diag_priv { + void __iomem *mmsc_reg_base; + int irq; + bool has_nmi; +}; + +static irqreturn_t a64fx_diag_handler_nmi(int irq, void *dev_id) +{ + nmi_panic(NULL, "a64fx_diag: interrupt received\n"); + + return IRQ_HANDLED; +} + +static irqreturn_t a64fx_diag_handler_irq(int irq, void *dev_id) +{ + panic("a64fx_diag: interrupt received\n"); + + return IRQ_HANDLED; +} + +static void a64fx_diag_interrupt_clear(struct a64fx_diag_priv *priv) +{ + void __iomem *diag_status_reg_addr; + u32 mmsc; + + diag_status_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_STATUS; + mmsc = readl(diag_status_reg_addr); + if (mmsc & BMC_DIAG_INTERRUPT_MASK) + writel(BMC_DIAG_INTERRUPT_MASK, diag_status_reg_addr); +} + +static void a64fx_diag_interrupt_enable(struct a64fx_diag_priv *priv) +{ + void __iomem *diag_enable_reg_addr; + u32 mmsc; + + diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE; + mmsc = readl(diag_enable_reg_addr); + if (!(mmsc & BMC_DIAG_INTERRUPT_MASK)) { + mmsc |= BMC_DIAG_INTERRUPT_MASK; + writel(mmsc, diag_enable_reg_addr); + } +} + +static void a64fx_diag_interrupt_disable(struct a64fx_diag_priv *priv) +{ + void __iomem *diag_enable_reg_addr; + u32 mmsc; + + diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE; + mmsc = readl(diag_enable_reg_addr); + if (mmsc & BMC_DIAG_INTERRUPT_MASK) { + mmsc &= ~BMC_DIAG_INTERRUPT_MASK; + writel(mmsc, diag_enable_reg_addr); + } +} + +static int a64fx_diag_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct a64fx_diag_priv *priv; + unsigned long irq_flags; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + priv->mmsc_reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->mmsc_reg_base)) + return PTR_ERR(priv->mmsc_reg_base); + + priv->irq = platform_get_irq(pdev, A64FX_DIAG_IRQ); + if (priv->irq < 0) + return priv->irq; + + platform_set_drvdata(pdev, priv); + + irq_flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_AUTOEN | + IRQF_NO_THREAD; + ret = request_nmi(priv->irq, &a64fx_diag_handler_nmi, irq_flags, + "a64fx_diag_nmi", NULL); + if (ret) { + ret = request_irq(priv->irq, &a64fx_diag_handler_irq, + irq_flags, "a64fx_diag_irq", NULL); + if (ret) { + dev_err(dev, "cannot register IRQ %d\n", ret); + return ret; + } + enable_irq(priv->irq); + } else { + enable_nmi(priv->irq); + priv->has_nmi = true; + } + + a64fx_diag_interrupt_clear(priv); + a64fx_diag_interrupt_enable(priv); + + return 0; +} + +static int a64fx_diag_remove(struct platform_device *pdev) +{ + struct a64fx_diag_priv *priv = platform_get_drvdata(pdev); + + a64fx_diag_interrupt_disable(priv); + a64fx_diag_interrupt_clear(priv); + + if (priv->has_nmi) + free_nmi(priv->irq, NULL); + else + free_irq(priv->irq, NULL); + + return 0; +} + +static const struct acpi_device_id a64fx_diag_acpi_match[] = { + { "FUJI2007", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, a64fx_diag_acpi_match); + + +static struct platform_driver a64fx_diag_driver = { + .driver = { + .name = "a64fx_diag_driver", + .acpi_match_table = ACPI_PTR(a64fx_diag_acpi_match), + }, + .probe = a64fx_diag_probe, + .remove = a64fx_diag_remove, +}; + +module_platform_driver(a64fx_diag_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Hitomi Hasegawa <hasegawa-hitomi@fujitsu.com>"); +MODULE_DESCRIPTION("A64FX diag driver"); diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 85aa86e1338a..6383a4edc360 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -328,7 +328,9 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd) if (!IS_ERR(domain->regulator)) { ret = regulator_enable(domain->regulator); if (ret) { - dev_err(domain->dev, "failed to enable regulator\n"); + dev_err(domain->dev, + "failed to enable regulator: %pe\n", + ERR_PTR(ret)); goto out_put_pm; } } @@ -467,7 +469,9 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd) if (!IS_ERR(domain->regulator)) { ret = regulator_disable(domain->regulator); if (ret) { - dev_err(domain->dev, "failed to disable regulator\n"); + dev_err(domain->dev, + "failed to disable regulator: %pe\n", + ERR_PTR(ret)); return ret; } } diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c index 7ebc28709e94..dff7529268e4 100644 --- a/drivers/soc/imx/imx8m-blk-ctrl.c +++ b/drivers/soc/imx/imx8m-blk-ctrl.c @@ -216,7 +216,7 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev) bc->bus_power_dev = genpd_dev_pm_attach_by_name(dev, "bus"); if (IS_ERR(bc->bus_power_dev)) return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev), - "failed to attach power domain\n"); + "failed to attach power domain \"bus\"\n"); for (i = 0; i < bc_data->num_domains; i++) { const struct imx8m_blk_ctrl_domain_data *data = &bc_data->domains[i]; @@ -238,7 +238,8 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev) dev_pm_domain_attach_by_name(dev, data->gpc_name); if (IS_ERR(domain->power_dev)) { dev_err_probe(dev, PTR_ERR(domain->power_dev), - "failed to attach power domain\n"); + "failed to attach power domain \"%s\"\n", + data->gpc_name); ret = PTR_ERR(domain->power_dev); goto cleanup_pds; } @@ -251,7 +252,9 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev) ret = pm_genpd_init(&domain->genpd, NULL, true); if (ret) { - dev_err_probe(dev, ret, "failed to init power domain\n"); + dev_err_probe(dev, ret, + "failed to init power domain \"%s\"\n", + data->gpc_name); dev_pm_domain_detach(domain->power_dev, true); goto cleanup_pds; } diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c index 613935cb6a48..58240e320c13 100644 --- a/drivers/soc/ixp4xx/ixp4xx-npe.c +++ b/drivers/soc/ixp4xx/ixp4xx-npe.c @@ -758,7 +758,7 @@ static const struct of_device_id ixp4xx_npe_of_match[] = { static struct platform_driver ixp4xx_npe_driver = { .driver = { .name = "ixp4xx-npe", - .of_match_table = of_match_ptr(ixp4xx_npe_of_match), + .of_match_table = ixp4xx_npe_of_match, }, .probe = ixp4xx_npe_probe, .remove = ixp4xx_npe_remove, diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index fdd8bc08569e..3c3eedea35f7 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -73,4 +73,14 @@ config MTK_MMSYS Say yes here to add support for the MediaTek Multimedia Subsystem (MMSYS). +config MTK_SVS + tristate "MediaTek Smart Voltage Scaling(SVS)" + depends on MTK_EFUSE && NVMEM + help + The Smart Voltage Scaling(SVS) engine is a piece of hardware + which has several controllers(banks) for calculating suitable + voltage to different power domains(CPU/GPU/CCI) according to + chip process corner, temperatures and other factors. Then DVFS + driver could apply SVS bank voltage to PMIC/Buck. + endmenu diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile index 90270f8114ed..0e9e703c931a 100644 --- a/drivers/soc/mediatek/Makefile +++ b/drivers/soc/mediatek/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o +obj-$(CONFIG_MTK_SVS) += mtk-svs.o diff --git a/drivers/soc/mediatek/mt6795-pm-domains.h b/drivers/soc/mediatek/mt6795-pm-domains.h new file mode 100644 index 000000000000..ef07c9dfdd9b --- /dev/null +++ b/drivers/soc/mediatek/mt6795-pm-domains.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __SOC_MEDIATEK_MT6795_PM_DOMAINS_H +#define __SOC_MEDIATEK_MT6795_PM_DOMAINS_H + +#include "mtk-pm-domains.h" +#include <dt-bindings/power/mt6795-power.h> + +/* + * MT6795 power domain support + */ + +static const struct scpsys_domain_data scpsys_domain_data_mt6795[] = { + [MT6795_POWER_DOMAIN_VDEC] = { + .name = "vdec", + .sta_mask = PWR_STATUS_VDEC, + .ctl_offs = SPM_VDE_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + }, + [MT6795_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = PWR_STATUS_VENC, + .ctl_offs = SPM_VEN_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_ISP] = { + .name = "isp", + .sta_mask = PWR_STATUS_ISP, + .ctl_offs = SPM_ISP_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + }, + [MT6795_POWER_DOMAIN_MM] = { + .name = "mm", + .sta_mask = PWR_STATUS_DISP, + .ctl_offs = SPM_DIS_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MM_M0 | + MT8173_TOP_AXI_PROT_EN_MM_M1), + }, + }, + [MT6795_POWER_DOMAIN_MJC] = { + .name = "mjc", + .sta_mask = BIT(20), + .ctl_offs = 0x298, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_AUDIO] = { + .name = "audio", + .sta_mask = PWR_STATUS_AUDIO, + .ctl_offs = SPM_AUDIO_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_MFG_ASYNC] = { + .name = "mfg_async", + .sta_mask = PWR_STATUS_MFG_ASYNC, + .ctl_offs = SPM_MFG_ASYNC_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = 0, + }, + [MT6795_POWER_DOMAIN_MFG_2D] = { + .name = "mfg_2d", + .sta_mask = PWR_STATUS_MFG_2D, + .ctl_offs = SPM_MFG_2D_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + }, + [MT6795_POWER_DOMAIN_MFG] = { + .name = "mfg", + .sta_mask = PWR_STATUS_MFG, + .ctl_offs = SPM_MFG_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(13, 8), + .sram_pdn_ack_bits = GENMASK(21, 16), + .bp_infracfg = { + BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MFG_S | + MT8173_TOP_AXI_PROT_EN_MFG_M0 | + MT8173_TOP_AXI_PROT_EN_MFG_M1 | + MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT), + }, + }, +}; + +static const struct scpsys_soc_data mt6795_scpsys_data = { + .domains_data = scpsys_domain_data_mt6795, + .num_domains = ARRAY_SIZE(scpsys_domain_data_mt6795), +}; + +#endif /* __SOC_MEDIATEK_MT6795_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h index 71b8757e552d..99de67fe5de8 100644 --- a/drivers/soc/mediatek/mt8183-pm-domains.h +++ b/drivers/soc/mediatek/mt8183-pm-domains.h @@ -41,6 +41,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = 0, .sram_pdn_ack_bits = 0, + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8183_POWER_DOMAIN_MFG] = { .name = "mfg", diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h index bf2dd0cdc3a8..108af61854a3 100644 --- a/drivers/soc/mediatek/mt8186-pm-domains.h +++ b/drivers/soc/mediatek/mt8186-pm-domains.h @@ -51,7 +51,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { MT8186_TOP_AXI_PROT_EN_1_CLR, MT8186_TOP_AXI_PROT_EN_1_STA), }, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, }, [MT8186_POWER_DOMAIN_MFG2] = { .name = "mfg2", diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h index 558c4ee4784a..b97b2051920f 100644 --- a/drivers/soc/mediatek/mt8192-pm-domains.h +++ b/drivers/soc/mediatek/mt8192-pm-domains.h @@ -58,6 +58,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8192_POWER_DOMAIN_MFG1] = { .name = "mfg1", @@ -85,6 +86,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { MT8192_TOP_AXI_PROT_EN_2_CLR, MT8192_TOP_AXI_PROT_EN_2_STA1), }, + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8192_POWER_DOMAIN_MFG2] = { .name = "mfg2", diff --git a/drivers/soc/mediatek/mt8195-pm-domains.h b/drivers/soc/mediatek/mt8195-pm-domains.h index 938f4d51f5ae..d7387ea1b9c9 100644 --- a/drivers/soc/mediatek/mt8195-pm-domains.h +++ b/drivers/soc/mediatek/mt8195-pm-domains.h @@ -67,7 +67,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = { .ctl_offs = 0x334, .pwr_sta_offs = 0x174, .pwr_sta2nd_offs = 0x178, - .caps = MTK_SCPD_ACTIVE_WAKEUP, + .caps = MTK_SCPD_ACTIVE_WAKEUP | MTK_SCPD_ALWAYS_ON, }, [MT8195_POWER_DOMAIN_CSI_RX_TOP] = { .name = "csi_rx_top", @@ -162,7 +162,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = { MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1), }, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, }, [MT8195_POWER_DOMAIN_MFG2] = { .name = "mfg2", diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h index 24129a6c25f8..7abaf048d91e 100644 --- a/drivers/soc/mediatek/mt8365-mmsys.h +++ b/drivers/soc/mediatek/mt8365-mmsys.h @@ -10,6 +10,9 @@ #define MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN 0xf60 #define MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0xf64 #define MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN 0xf68 +#define MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL 0xfd0 +#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8 +#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc #define MT8365_RDMA0_SOUT_COLOR0 0x1 #define MT8365_DITHER_MOUT_EN_DSI0 0x1 @@ -18,6 +21,10 @@ #define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0 #define MT8365_DISP_COLOR_SEL_IN_COLOR0 0x0 #define MT8365_OVL0_MOUT_PATH0_SEL BIT(0) +#define MT8365_RDMA1_SOUT_DPI0 0x1 +#define MT8365_DPI0_SEL_IN_RDMA1 0x0 +#define MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK 0x1 +#define MT8365_DPI0_SEL_IN_RDMA1 0x0 static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = { { @@ -55,6 +62,21 @@ static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = { MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00, + MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK + }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN, + MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1 + }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL, + MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0 + }, }; #endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */ diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c index 7c65ad3d1f8a..fc13334db1b1 100644 --- a/drivers/soc/mediatek/mtk-devapc.c +++ b/drivers/soc/mediatek/mtk-devapc.c @@ -31,10 +31,7 @@ struct mtk_devapc_vio_dbgs { u32 vio_dbg1; }; -struct mtk_devapc_data { - /* numbers of violation index */ - u32 vio_idx_num; - +struct mtk_devapc_regs_ofs { /* reg offset */ u32 vio_mask_offset; u32 vio_sta_offset; @@ -46,6 +43,12 @@ struct mtk_devapc_data { u32 vio_shift_con_offset; }; +struct mtk_devapc_data { + /* numbers of violation index */ + u32 vio_idx_num; + const struct mtk_devapc_regs_ofs *regs_ofs; +}; + struct mtk_devapc_context { struct device *dev; void __iomem *infra_base; @@ -58,7 +61,7 @@ static void clear_vio_status(struct mtk_devapc_context *ctx) void __iomem *reg; int i; - reg = ctx->infra_base + ctx->data->vio_sta_offset; + reg = ctx->infra_base + ctx->data->regs_ofs->vio_sta_offset; for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++) writel(GENMASK(31, 0), reg + 4 * i); @@ -73,7 +76,7 @@ static void mask_module_irq(struct mtk_devapc_context *ctx, bool mask) u32 val; int i; - reg = ctx->infra_base + ctx->data->vio_mask_offset; + reg = ctx->infra_base + ctx->data->regs_ofs->vio_mask_offset; if (mask) val = GENMASK(31, 0); @@ -116,11 +119,11 @@ static int devapc_sync_vio_dbg(struct mtk_devapc_context *ctx) u32 val; pd_vio_shift_sta_reg = ctx->infra_base + - ctx->data->vio_shift_sta_offset; + ctx->data->regs_ofs->vio_shift_sta_offset; pd_vio_shift_sel_reg = ctx->infra_base + - ctx->data->vio_shift_sel_offset; + ctx->data->regs_ofs->vio_shift_sel_offset; pd_vio_shift_con_reg = ctx->infra_base + - ctx->data->vio_shift_con_offset; + ctx->data->regs_ofs->vio_shift_con_offset; /* Find the minimum shift group which has violation */ val = readl(pd_vio_shift_sta_reg); @@ -161,8 +164,8 @@ static void devapc_extract_vio_dbg(struct mtk_devapc_context *ctx) void __iomem *vio_dbg0_reg; void __iomem *vio_dbg1_reg; - vio_dbg0_reg = ctx->infra_base + ctx->data->vio_dbg0_offset; - vio_dbg1_reg = ctx->infra_base + ctx->data->vio_dbg1_offset; + vio_dbg0_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg0_offset; + vio_dbg1_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg1_offset; vio_dbgs.vio_dbg0 = readl(vio_dbg0_reg); vio_dbgs.vio_dbg1 = readl(vio_dbg1_reg); @@ -200,7 +203,7 @@ static irqreturn_t devapc_violation_irq(int irq_number, void *data) */ static void start_devapc(struct mtk_devapc_context *ctx) { - writel(BIT(31), ctx->infra_base + ctx->data->apc_con_offset); + writel(BIT(31), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset); mask_module_irq(ctx, false); } @@ -212,11 +215,10 @@ static void stop_devapc(struct mtk_devapc_context *ctx) { mask_module_irq(ctx, true); - writel(BIT(2), ctx->infra_base + ctx->data->apc_con_offset); + writel(BIT(2), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset); } -static const struct mtk_devapc_data devapc_mt6779 = { - .vio_idx_num = 511, +static const struct mtk_devapc_regs_ofs devapc_regs_ofs_mt6779 = { .vio_mask_offset = 0x0, .vio_sta_offset = 0x400, .vio_dbg0_offset = 0x900, @@ -227,11 +229,24 @@ static const struct mtk_devapc_data devapc_mt6779 = { .vio_shift_con_offset = 0xF20, }; +static const struct mtk_devapc_data devapc_mt6779 = { + .vio_idx_num = 511, + .regs_ofs = &devapc_regs_ofs_mt6779, +}; + +static const struct mtk_devapc_data devapc_mt8186 = { + .vio_idx_num = 519, + .regs_ofs = &devapc_regs_ofs_mt6779, +}; + static const struct of_device_id mtk_devapc_dt_match[] = { { .compatible = "mediatek,mt6779-devapc", .data = &devapc_mt6779, }, { + .compatible = "mediatek,mt8186-devapc", + .data = &devapc_mt8186, + }, { }, }; MODULE_DEVICE_TABLE(of, mtk_devapc_dt_match); diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index 981d56967e7a..5ea43de4e410 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -7,10 +7,12 @@ #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/soc/mediatek/mtk-mmsys.h> #include <linux/soc/mediatek/mtk-mutex.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #define MT2701_MUTEX0_MOD0 0x2c #define MT2701_MUTEX0_SOF0 0x30 @@ -80,6 +82,15 @@ #define MT8183_MUTEX_MOD_DISP_GAMMA0 16 #define MT8183_MUTEX_MOD_DISP_DITHER0 17 +#define MT8183_MUTEX_MOD_MDP_RDMA0 2 +#define MT8183_MUTEX_MOD_MDP_RSZ0 4 +#define MT8183_MUTEX_MOD_MDP_RSZ1 5 +#define MT8183_MUTEX_MOD_MDP_TDSHP0 6 +#define MT8183_MUTEX_MOD_MDP_WROT0 7 +#define MT8183_MUTEX_MOD_MDP_WDMA 8 +#define MT8183_MUTEX_MOD_MDP_AAL0 23 +#define MT8183_MUTEX_MOD_MDP_CCORR0 24 + #define MT8173_MUTEX_MOD_DISP_OVL0 11 #define MT8173_MUTEX_MOD_DISP_OVL1 12 #define MT8173_MUTEX_MOD_DISP_RDMA0 13 @@ -110,6 +121,20 @@ #define MT8195_MUTEX_MOD_DISP_DP_INTF0 21 #define MT8195_MUTEX_MOD_DISP_PWM0 27 +#define MT8365_MUTEX_MOD_DISP_OVL0 7 +#define MT8365_MUTEX_MOD_DISP_OVL0_2L 8 +#define MT8365_MUTEX_MOD_DISP_RDMA0 9 +#define MT8365_MUTEX_MOD_DISP_RDMA1 10 +#define MT8365_MUTEX_MOD_DISP_WDMA0 11 +#define MT8365_MUTEX_MOD_DISP_COLOR0 12 +#define MT8365_MUTEX_MOD_DISP_CCORR 13 +#define MT8365_MUTEX_MOD_DISP_AAL 14 +#define MT8365_MUTEX_MOD_DISP_GAMMA 15 +#define MT8365_MUTEX_MOD_DISP_DITHER 16 +#define MT8365_MUTEX_MOD_DISP_DSI0 17 +#define MT8365_MUTEX_MOD_DISP_PWM0 20 +#define MT8365_MUTEX_MOD_DISP_DPI0 22 + #define MT2712_MUTEX_MOD_DISP_PWM2 10 #define MT2712_MUTEX_MOD_DISP_OVL0 11 #define MT2712_MUTEX_MOD_DISP_OVL1 12 @@ -185,6 +210,7 @@ struct mtk_mutex_data { const unsigned int *mutex_sof; const unsigned int mutex_mod_reg; const unsigned int mutex_sof_reg; + const unsigned int *mutex_table_mod; const bool no_clk; }; @@ -194,6 +220,8 @@ struct mtk_mutex_ctx { void __iomem *regs; struct mtk_mutex mutex[10]; const struct mtk_mutex_data *data; + phys_addr_t addr; + struct cmdq_client_reg cmdq_reg; }; static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { @@ -272,6 +300,17 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0, }; +static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { + [MUTEX_MOD_IDX_MDP_RDMA0] = MT8183_MUTEX_MOD_MDP_RDMA0, + [MUTEX_MOD_IDX_MDP_RSZ0] = MT8183_MUTEX_MOD_MDP_RSZ0, + [MUTEX_MOD_IDX_MDP_RSZ1] = MT8183_MUTEX_MOD_MDP_RSZ1, + [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8183_MUTEX_MOD_MDP_TDSHP0, + [MUTEX_MOD_IDX_MDP_WROT0] = MT8183_MUTEX_MOD_MDP_WROT0, + [MUTEX_MOD_IDX_MDP_WDMA] = MT8183_MUTEX_MOD_MDP_WDMA, + [MUTEX_MOD_IDX_MDP_AAL0] = MT8183_MUTEX_MOD_MDP_AAL0, + [MUTEX_MOD_IDX_MDP_CCORR0] = MT8183_MUTEX_MOD_MDP_CCORR0, +}; + static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0, @@ -315,6 +354,22 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_DP_INTF0] = MT8195_MUTEX_MOD_DISP_DP_INTF0, }; +static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = { + [DDP_COMPONENT_AAL0] = MT8365_MUTEX_MOD_DISP_AAL, + [DDP_COMPONENT_CCORR] = MT8365_MUTEX_MOD_DISP_CCORR, + [DDP_COMPONENT_COLOR0] = MT8365_MUTEX_MOD_DISP_COLOR0, + [DDP_COMPONENT_DITHER0] = MT8365_MUTEX_MOD_DISP_DITHER, + [DDP_COMPONENT_DPI0] = MT8365_MUTEX_MOD_DISP_DPI0, + [DDP_COMPONENT_DSI0] = MT8365_MUTEX_MOD_DISP_DSI0, + [DDP_COMPONENT_GAMMA] = MT8365_MUTEX_MOD_DISP_GAMMA, + [DDP_COMPONENT_OVL0] = MT8365_MUTEX_MOD_DISP_OVL0, + [DDP_COMPONENT_OVL_2L0] = MT8365_MUTEX_MOD_DISP_OVL0_2L, + [DDP_COMPONENT_PWM0] = MT8365_MUTEX_MOD_DISP_PWM0, + [DDP_COMPONENT_RDMA0] = MT8365_MUTEX_MOD_DISP_RDMA0, + [DDP_COMPONENT_RDMA1] = MT8365_MUTEX_MOD_DISP_RDMA1, + [DDP_COMPONENT_WDMA0] = MT8365_MUTEX_MOD_DISP_WDMA0, +}; + static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, @@ -399,6 +454,7 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = { .mutex_sof = mt8183_mutex_sof, .mutex_mod_reg = MT8183_MUTEX0_MOD0, .mutex_sof_reg = MT8183_MUTEX0_SOF0, + .mutex_table_mod = mt8183_mutex_table_mod, .no_clk = true, }; @@ -423,6 +479,14 @@ static const struct mtk_mutex_data mt8195_mutex_driver_data = { .mutex_sof_reg = MT8183_MUTEX0_SOF0, }; +static const struct mtk_mutex_data mt8365_mutex_driver_data = { + .mutex_mod = mt8365_mutex_mod, + .mutex_sof = mt8183_mutex_sof, + .mutex_mod_reg = MT8183_MUTEX0_MOD0, + .mutex_sof_reg = MT8183_MUTEX0_SOF0, + .no_clk = true, +}; + struct mtk_mutex *mtk_mutex_get(struct device *dev) { struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev); @@ -572,6 +636,30 @@ void mtk_mutex_enable(struct mtk_mutex *mutex) } EXPORT_SYMBOL_GPL(mtk_mutex_enable); +int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex, void *pkt) +{ + struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, + mutex[mutex->id]); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_pkt *cmdq_pkt = (struct cmdq_pkt *)pkt; + + WARN_ON(&mtx->mutex[mutex->id] != mutex); + + if (!mtx->cmdq_reg.size) { + dev_err(mtx->dev, "mediatek,gce-client-reg hasn't been set"); + return -EINVAL; + } + + cmdq_pkt_write(cmdq_pkt, mtx->cmdq_reg.subsys, + mtx->addr + DISP_REG_MUTEX_EN(mutex->id), 1); + return 0; +#else + dev_err(mtx->dev, "Not support for enable MUTEX by CMDQ"); + return -ENODEV; +#endif +} +EXPORT_SYMBOL_GPL(mtk_mutex_enable_by_cmdq); + void mtk_mutex_disable(struct mtk_mutex *mutex) { struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, @@ -606,12 +694,67 @@ void mtk_mutex_release(struct mtk_mutex *mutex) } EXPORT_SYMBOL_GPL(mtk_mutex_release); +int mtk_mutex_write_mod(struct mtk_mutex *mutex, + enum mtk_mutex_mod_index idx, bool clear) +{ + struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, + mutex[mutex->id]); + unsigned int reg; + unsigned int offset; + + WARN_ON(&mtx->mutex[mutex->id] != mutex); + + if (idx < MUTEX_MOD_IDX_MDP_RDMA0 || + idx >= MUTEX_MOD_IDX_MAX) { + dev_err(mtx->dev, "Not supported MOD table index : %d", idx); + return -EINVAL; + } + + offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg, + mutex->id); + reg = readl_relaxed(mtx->regs + offset); + + if (clear) + reg &= ~BIT(mtx->data->mutex_table_mod[idx]); + else + reg |= BIT(mtx->data->mutex_table_mod[idx]); + + writel_relaxed(reg, mtx->regs + offset); + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_mutex_write_mod); + +int mtk_mutex_write_sof(struct mtk_mutex *mutex, + enum mtk_mutex_sof_index idx) +{ + struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, + mutex[mutex->id]); + + WARN_ON(&mtx->mutex[mutex->id] != mutex); + + if (idx < MUTEX_SOF_IDX_SINGLE_MODE || + idx >= MUTEX_SOF_IDX_MAX) { + dev_err(mtx->dev, "Not supported SOF index : %d", idx); + return -EINVAL; + } + + writel_relaxed(idx, mtx->regs + + DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, mutex->id)); + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_mutex_write_sof); + static int mtk_mutex_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_mutex_ctx *mtx; struct resource *regs; int i; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + int ret; +#endif mtx = devm_kzalloc(dev, sizeof(*mtx), GFP_KERNEL); if (!mtx) @@ -631,12 +774,18 @@ static int mtk_mutex_probe(struct platform_device *pdev) } } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mtx->regs = devm_ioremap_resource(dev, regs); + mtx->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ®s); if (IS_ERR(mtx->regs)) { dev_err(dev, "Failed to map mutex registers\n"); return PTR_ERR(mtx->regs); } + mtx->addr = regs->start; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &mtx->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "No mediatek,gce-client-reg!\n"); +#endif platform_set_drvdata(pdev, mtx); @@ -665,6 +814,8 @@ static const struct of_device_id mutex_driver_dt_match[] = { .data = &mt8192_mutex_driver_data}, { .compatible = "mediatek,mt8195-disp-mutex", .data = &mt8195_mutex_driver_data}, + { .compatible = "mediatek,mt8365-disp-mutex", + .data = &mt8365_mutex_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mutex_driver_dt_match); diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index 5ced254b082b..9734f1091c69 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -16,6 +16,7 @@ #include <linux/regulator/consumer.h> #include <linux/soc/mediatek/infracfg.h> +#include "mt6795-pm-domains.h" #include "mt8167-pm-domains.h" #include "mt8173-pm-domains.h" #include "mt8183-pm-domains.h" @@ -428,6 +429,9 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret); goto err_put_subsys_clocks; } + + if (MTK_SCPD_CAPS(pd, MTK_SCPD_ALWAYS_ON)) + pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON; } if (scpsys->domains[id]) { @@ -556,6 +560,10 @@ static void scpsys_domain_cleanup(struct scpsys *scpsys) static const struct of_device_id scpsys_of_match[] = { { + .compatible = "mediatek,mt6795-power-controller", + .data = &mt6795_scpsys_data, + }, + { .compatible = "mediatek,mt8167-power-controller", .data = &mt8167_scpsys_data, }, diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h index daa24e890dd4..7d3c0c36316c 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.h +++ b/drivers/soc/mediatek/mtk-pm-domains.h @@ -8,6 +8,8 @@ #define MTK_SCPD_SRAM_ISO BIT(2) #define MTK_SCPD_KEEP_DEFAULT_OFF BIT(3) #define MTK_SCPD_DOMAIN_SUPPLY BIT(4) +/* can't set MTK_SCPD_KEEP_DEFAULT_OFF at the same time */ +#define MTK_SCPD_ALWAYS_ON BIT(5) #define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x)) #define SPM_VDE_PWR_CON 0x0210 diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index bf39a64f3ecc..d8cb0f833645 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -13,6 +13,9 @@ #include <linux/regmap.h> #include <linux/reset.h> +#define PWRAP_POLL_DELAY_US 10 +#define PWRAP_POLL_TIMEOUT_US 10000 + #define PWRAP_MT8135_BRIDGE_IORD_ARB_EN 0x4 #define PWRAP_MT8135_BRIDGE_WACS3_EN 0x10 #define PWRAP_MT8135_BRIDGE_INIT_DONE3 0x14 @@ -1140,12 +1143,9 @@ enum pwrap_type { }; struct pmic_wrapper; -struct pwrap_slv_type { - const u32 *dew_regs; - enum pmic_type type; + +struct pwrap_slv_regops { const struct regmap_config *regmap; - /* Flags indicating the capability for the target slave */ - u32 caps; /* * pwrap operations are highly associated with the PMIC types, * so the pointers added increases flexibility allowing determination @@ -1155,6 +1155,14 @@ struct pwrap_slv_type { int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata); }; +struct pwrap_slv_type { + const u32 *dew_regs; + enum pmic_type type; + const struct pwrap_slv_regops *regops; + /* Flags indicating the capability for the target slave */ + u32 caps; +}; + struct pmic_wrapper { struct device *dev; void __iomem *base; @@ -1241,27 +1249,14 @@ static bool pwrap_is_fsm_idle_and_sync_idle(struct pmic_wrapper *wrp) (val & PWRAP_STATE_SYNC_IDLE0); } -static int pwrap_wait_for_state(struct pmic_wrapper *wrp, - bool (*fp)(struct pmic_wrapper *)) -{ - unsigned long timeout; - - timeout = jiffies + usecs_to_jiffies(10000); - - do { - if (time_after(jiffies, timeout)) - return fp(wrp) ? 0 : -ETIMEDOUT; - if (fp(wrp)) - return 0; - } while (1); -} - static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { + bool tmp; int ret; u32 val; - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1273,7 +1268,8 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) val = (adr >> 1) << 16; pwrap_writel(wrp, val, PWRAP_WACS2_CMD); - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr); + ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) return ret; @@ -1290,11 +1286,14 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { + bool tmp; int ret, msb; *rdata = 0; for (msb = 0; msb < 2; msb++) { - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); + if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1303,7 +1302,8 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) pwrap_writel(wrp, ((msb << 30) | (adr << 16)), PWRAP_WACS2_CMD); - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr); + ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) return ret; @@ -1318,14 +1318,16 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { - return wrp->slave->pwrap_read(wrp, adr, rdata); + return wrp->slave->regops->pwrap_read(wrp, adr, rdata); } static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { + bool tmp; int ret; - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1344,10 +1346,12 @@ static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { + bool tmp; int ret, msb, rdata; for (msb = 0; msb < 2; msb++) { - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1373,7 +1377,7 @@ static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { - return wrp->slave->pwrap_write(wrp, adr, wdata); + return wrp->slave->regops->pwrap_write(wrp, adr, wdata); } static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata) @@ -1388,6 +1392,7 @@ static int pwrap_regmap_write(void *context, u32 adr, u32 wdata) static int pwrap_reset_spislave(struct pmic_wrapper *wrp) { + bool tmp; int ret, i; pwrap_writel(wrp, 0, PWRAP_HIPRIO_ARB_EN); @@ -1407,7 +1412,8 @@ static int pwrap_reset_spislave(struct pmic_wrapper *wrp) pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS, PWRAP_MAN_CMD); - ret = pwrap_wait_for_state(wrp, pwrap_is_sync_idle); + ret = readx_poll_timeout(pwrap_is_sync_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret); return ret; @@ -1458,14 +1464,15 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp) static int pwrap_init_dual_io(struct pmic_wrapper *wrp) { int ret; + bool tmp; u32 rdata; /* Enable dual IO mode */ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1); /* Check IDLE & INIT_DONE in advance */ - ret = pwrap_wait_for_state(wrp, - pwrap_is_fsm_idle_and_sync_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret); return ret; @@ -1570,6 +1577,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp) static int pwrap_init_cipher(struct pmic_wrapper *wrp) { int ret; + bool tmp; u32 rdata = 0; pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST); @@ -1624,14 +1632,16 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) } /* wait for cipher data ready@AP */ - ret = pwrap_wait_for_state(wrp, pwrap_is_cipher_ready); + ret = readx_poll_timeout(pwrap_is_cipher_ready, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "cipher data ready@AP fail, ret=%d\n", ret); return ret; } /* wait for cipher data ready@PMIC */ - ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready); + ret = readx_poll_timeout(pwrap_is_pmic_cipher_ready, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "timeout waiting for cipher data ready@PMIC\n"); @@ -1640,7 +1650,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) /* wait for cipher mode idle */ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_MODE], 0x1); - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret); return ret; @@ -1885,99 +1896,82 @@ static const struct regmap_config pwrap_regmap_config32 = { .max_register = 0xffff, }; +static const struct pwrap_slv_regops pwrap_regops16 = { + .pwrap_read = pwrap_read16, + .pwrap_write = pwrap_write16, + .regmap = &pwrap_regmap_config16, +}; + +static const struct pwrap_slv_regops pwrap_regops32 = { + .pwrap_read = pwrap_read32, + .pwrap_write = pwrap_write32, + .regmap = &pwrap_regmap_config32, +}; + static const struct pwrap_slv_type pmic_mt6323 = { .dew_regs = mt6323_regs, .type = PMIC_MT6323, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO | PWRAP_SLV_CAP_SECURITY, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6351 = { .dew_regs = mt6351_regs, .type = PMIC_MT6351, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = 0, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6357 = { .dew_regs = mt6357_regs, .type = PMIC_MT6357, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = 0, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6358 = { .dew_regs = mt6358_regs, .type = PMIC_MT6358, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6359 = { .dew_regs = mt6359_regs, .type = PMIC_MT6359, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_DUALIO, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6380 = { .dew_regs = NULL, .type = PMIC_MT6380, - .regmap = &pwrap_regmap_config32, + .regops = &pwrap_regops32, .caps = 0, - .pwrap_read = pwrap_read32, - .pwrap_write = pwrap_write32, }; static const struct pwrap_slv_type pmic_mt6397 = { .dew_regs = mt6397_regs, .type = PMIC_MT6397, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO | PWRAP_SLV_CAP_SECURITY, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct of_device_id of_slave_match_tbl[] = { - { - .compatible = "mediatek,mt6323", - .data = &pmic_mt6323, - }, { - .compatible = "mediatek,mt6351", - .data = &pmic_mt6351, - }, { - .compatible = "mediatek,mt6357", - .data = &pmic_mt6357, - }, { - .compatible = "mediatek,mt6358", - .data = &pmic_mt6358, - }, { - .compatible = "mediatek,mt6359", - .data = &pmic_mt6359, - }, { - /* The MT6380 PMIC only implements a regulator, so we bind it - * directly instead of using a MFD. - */ - .compatible = "mediatek,mt6380-regulator", - .data = &pmic_mt6380, - }, { - .compatible = "mediatek,mt6397", - .data = &pmic_mt6397, - }, { - /* sentinel */ - } + { .compatible = "mediatek,mt6323", .data = &pmic_mt6323 }, + { .compatible = "mediatek,mt6351", .data = &pmic_mt6351 }, + { .compatible = "mediatek,mt6357", .data = &pmic_mt6357 }, + { .compatible = "mediatek,mt6358", .data = &pmic_mt6358 }, + { .compatible = "mediatek,mt6359", .data = &pmic_mt6359 }, + + /* The MT6380 PMIC only implements a regulator, so we bind it + * directly instead of using a MFD. + */ + { .compatible = "mediatek,mt6380-regulator", .data = &pmic_mt6380 }, + { .compatible = "mediatek,mt6397", .data = &pmic_mt6397 }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_slave_match_tbl); @@ -2136,45 +2130,19 @@ static struct pmic_wrapper_type pwrap_mt8186 = { }; static const struct of_device_id of_pwrap_match_tbl[] = { - { - .compatible = "mediatek,mt2701-pwrap", - .data = &pwrap_mt2701, - }, { - .compatible = "mediatek,mt6765-pwrap", - .data = &pwrap_mt6765, - }, { - .compatible = "mediatek,mt6779-pwrap", - .data = &pwrap_mt6779, - }, { - .compatible = "mediatek,mt6797-pwrap", - .data = &pwrap_mt6797, - }, { - .compatible = "mediatek,mt6873-pwrap", - .data = &pwrap_mt6873, - }, { - .compatible = "mediatek,mt7622-pwrap", - .data = &pwrap_mt7622, - }, { - .compatible = "mediatek,mt8135-pwrap", - .data = &pwrap_mt8135, - }, { - .compatible = "mediatek,mt8173-pwrap", - .data = &pwrap_mt8173, - }, { - .compatible = "mediatek,mt8183-pwrap", - .data = &pwrap_mt8183, - }, { - .compatible = "mediatek,mt8186-pwrap", - .data = &pwrap_mt8186, - }, { - .compatible = "mediatek,mt8195-pwrap", - .data = &pwrap_mt8195, - }, { - .compatible = "mediatek,mt8516-pwrap", - .data = &pwrap_mt8516, - }, { - /* sentinel */ - } + { .compatible = "mediatek,mt2701-pwrap", .data = &pwrap_mt2701 }, + { .compatible = "mediatek,mt6765-pwrap", .data = &pwrap_mt6765 }, + { .compatible = "mediatek,mt6779-pwrap", .data = &pwrap_mt6779 }, + { .compatible = "mediatek,mt6797-pwrap", .data = &pwrap_mt6797 }, + { .compatible = "mediatek,mt6873-pwrap", .data = &pwrap_mt6873 }, + { .compatible = "mediatek,mt7622-pwrap", .data = &pwrap_mt7622 }, + { .compatible = "mediatek,mt8135-pwrap", .data = &pwrap_mt8135 }, + { .compatible = "mediatek,mt8173-pwrap", .data = &pwrap_mt8173 }, + { .compatible = "mediatek,mt8183-pwrap", .data = &pwrap_mt8183 }, + { .compatible = "mediatek,mt8186-pwrap", .data = &pwrap_mt8186 }, + { .compatible = "mediatek,mt8195-pwrap", .data = &pwrap_mt8195 }, + { .compatible = "mediatek,mt8516-pwrap", .data = &pwrap_mt8516 }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl); @@ -2185,7 +2153,6 @@ static int pwrap_probe(struct platform_device *pdev) struct pmic_wrapper *wrp; struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_slave_id = NULL; - struct resource *res; if (np->child) of_slave_id = of_match_node(of_slave_match_tbl, np->child); @@ -2205,8 +2172,7 @@ static int pwrap_probe(struct platform_device *pdev) wrp->slave = of_slave_id->data; wrp->dev = &pdev->dev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap"); - wrp->base = devm_ioremap_resource(wrp->dev, res); + wrp->base = devm_platform_ioremap_resource_byname(pdev, "pwrap"); if (IS_ERR(wrp->base)) return PTR_ERR(wrp->base); @@ -2220,9 +2186,7 @@ static int pwrap_probe(struct platform_device *pdev) } if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "pwrap-bridge"); - wrp->bridge_base = devm_ioremap_resource(wrp->dev, res); + wrp->bridge_base = devm_platform_ioremap_resource_byname(pdev, "pwrap-bridge"); if (IS_ERR(wrp->bridge_base)) return PTR_ERR(wrp->bridge_base); @@ -2315,13 +2279,18 @@ static int pwrap_probe(struct platform_device *pdev) pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN); irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_out2; + } + ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH, "mt-pmic-pwrap", wrp); if (ret) goto err_out2; - wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regmap); + wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regops->regmap); if (IS_ERR(wrp->regmap)) { ret = PTR_ERR(wrp->regmap); goto err_out2; diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c new file mode 100644 index 000000000000..dee8664a12fd --- /dev/null +++ b/drivers/soc/mediatek/mtk-svs.c @@ -0,0 +1,2403 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 MediaTek Inc. + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/cpuidle.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nvmem-consumer.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/thermal.h> + +/* svs bank 1-line software id */ +#define SVSB_CPU_LITTLE BIT(0) +#define SVSB_CPU_BIG BIT(1) +#define SVSB_CCI BIT(2) +#define SVSB_GPU BIT(3) + +/* svs bank 2-line type */ +#define SVSB_LOW BIT(8) +#define SVSB_HIGH BIT(9) + +/* svs bank mode support */ +#define SVSB_MODE_ALL_DISABLE 0 +#define SVSB_MODE_INIT01 BIT(1) +#define SVSB_MODE_INIT02 BIT(2) +#define SVSB_MODE_MON BIT(3) + +/* svs bank volt flags */ +#define SVSB_INIT01_PD_REQ BIT(0) +#define SVSB_INIT01_VOLT_IGNORE BIT(1) +#define SVSB_INIT01_VOLT_INC_ONLY BIT(2) +#define SVSB_MON_VOLT_IGNORE BIT(16) +#define SVSB_REMOVE_DVTFIXED_VOLT BIT(24) + +/* svs bank register common configuration */ +#define SVSB_DET_MAX 0xffff +#define SVSB_DET_WINDOW 0xa28 +#define SVSB_DTHI 0x1 +#define SVSB_DTLO 0xfe +#define SVSB_EN_INIT01 0x1 +#define SVSB_EN_INIT02 0x5 +#define SVSB_EN_MON 0x2 +#define SVSB_EN_OFF 0x0 +#define SVSB_INTEN_INIT0x 0x00005f01 +#define SVSB_INTEN_MONVOPEN 0x00ff0000 +#define SVSB_INTSTS_CLEAN 0x00ffffff +#define SVSB_INTSTS_COMPLETE 0x1 +#define SVSB_INTSTS_MONVOP 0x00ff0000 +#define SVSB_RUNCONFIG_DEFAULT 0x80000000 + +/* svs bank related setting */ +#define BITS8 8 +#define MAX_OPP_ENTRIES 16 +#define REG_BYTES 4 +#define SVSB_DC_SIGNED_BIT BIT(15) +#define SVSB_DET_CLK_EN BIT(31) +#define SVSB_TEMP_LOWER_BOUND 0xb2 +#define SVSB_TEMP_UPPER_BOUND 0x64 + +static DEFINE_SPINLOCK(svs_lock); + +#define debug_fops_ro(name) \ + static int svs_##name##_debug_open(struct inode *inode, \ + struct file *filp) \ + { \ + return single_open(filp, svs_##name##_debug_show, \ + inode->i_private); \ + } \ + static const struct file_operations svs_##name##_debug_fops = { \ + .owner = THIS_MODULE, \ + .open = svs_##name##_debug_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define debug_fops_rw(name) \ + static int svs_##name##_debug_open(struct inode *inode, \ + struct file *filp) \ + { \ + return single_open(filp, svs_##name##_debug_show, \ + inode->i_private); \ + } \ + static const struct file_operations svs_##name##_debug_fops = { \ + .owner = THIS_MODULE, \ + .open = svs_##name##_debug_open, \ + .read = seq_read, \ + .write = svs_##name##_debug_write, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define svs_dentry_data(name) {__stringify(name), &svs_##name##_debug_fops} + +/** + * enum svsb_phase - svs bank phase enumeration + * @SVSB_PHASE_ERROR: svs bank encounters unexpected condition + * @SVSB_PHASE_INIT01: svs bank basic init for data calibration + * @SVSB_PHASE_INIT02: svs bank can provide voltages to opp table + * @SVSB_PHASE_MON: svs bank can provide voltages with thermal effect + * @SVSB_PHASE_MAX: total number of svs bank phase (debug purpose) + * + * Each svs bank has its own independent phase and we enable each svs bank by + * running their phase orderly. However, when svs bank encounters unexpected + * condition, it will fire an irq (PHASE_ERROR) to inform svs software. + * + * svs bank general phase-enabled order: + * SVSB_PHASE_INIT01 -> SVSB_PHASE_INIT02 -> SVSB_PHASE_MON + */ +enum svsb_phase { + SVSB_PHASE_ERROR = 0, + SVSB_PHASE_INIT01, + SVSB_PHASE_INIT02, + SVSB_PHASE_MON, + SVSB_PHASE_MAX, +}; + +enum svs_reg_index { + DESCHAR = 0, + TEMPCHAR, + DETCHAR, + AGECHAR, + DCCONFIG, + AGECONFIG, + FREQPCT30, + FREQPCT74, + LIMITVALS, + VBOOT, + DETWINDOW, + CONFIG, + TSCALCS, + RUNCONFIG, + SVSEN, + INIT2VALS, + DCVALUES, + AGEVALUES, + VOP30, + VOP74, + TEMP, + INTSTS, + INTSTSRAW, + INTEN, + CHKINT, + CHKSHIFT, + STATUS, + VDESIGN30, + VDESIGN74, + DVT30, + DVT74, + AGECOUNT, + SMSTATE0, + SMSTATE1, + CTL0, + DESDETSEC, + TEMPAGESEC, + CTRLSPARE0, + CTRLSPARE1, + CTRLSPARE2, + CTRLSPARE3, + CORESEL, + THERMINTST, + INTST, + THSTAGE0ST, + THSTAGE1ST, + THSTAGE2ST, + THAHBST0, + THAHBST1, + SPARE0, + SPARE1, + SPARE2, + SPARE3, + THSLPEVEB, + SVS_REG_MAX, +}; + +static const u32 svs_regs_v2[] = { + [DESCHAR] = 0xc00, + [TEMPCHAR] = 0xc04, + [DETCHAR] = 0xc08, + [AGECHAR] = 0xc0c, + [DCCONFIG] = 0xc10, + [AGECONFIG] = 0xc14, + [FREQPCT30] = 0xc18, + [FREQPCT74] = 0xc1c, + [LIMITVALS] = 0xc20, + [VBOOT] = 0xc24, + [DETWINDOW] = 0xc28, + [CONFIG] = 0xc2c, + [TSCALCS] = 0xc30, + [RUNCONFIG] = 0xc34, + [SVSEN] = 0xc38, + [INIT2VALS] = 0xc3c, + [DCVALUES] = 0xc40, + [AGEVALUES] = 0xc44, + [VOP30] = 0xc48, + [VOP74] = 0xc4c, + [TEMP] = 0xc50, + [INTSTS] = 0xc54, + [INTSTSRAW] = 0xc58, + [INTEN] = 0xc5c, + [CHKINT] = 0xc60, + [CHKSHIFT] = 0xc64, + [STATUS] = 0xc68, + [VDESIGN30] = 0xc6c, + [VDESIGN74] = 0xc70, + [DVT30] = 0xc74, + [DVT74] = 0xc78, + [AGECOUNT] = 0xc7c, + [SMSTATE0] = 0xc80, + [SMSTATE1] = 0xc84, + [CTL0] = 0xc88, + [DESDETSEC] = 0xce0, + [TEMPAGESEC] = 0xce4, + [CTRLSPARE0] = 0xcf0, + [CTRLSPARE1] = 0xcf4, + [CTRLSPARE2] = 0xcf8, + [CTRLSPARE3] = 0xcfc, + [CORESEL] = 0xf00, + [THERMINTST] = 0xf04, + [INTST] = 0xf08, + [THSTAGE0ST] = 0xf0c, + [THSTAGE1ST] = 0xf10, + [THSTAGE2ST] = 0xf14, + [THAHBST0] = 0xf18, + [THAHBST1] = 0xf1c, + [SPARE0] = 0xf20, + [SPARE1] = 0xf24, + [SPARE2] = 0xf28, + [SPARE3] = 0xf2c, + [THSLPEVEB] = 0xf30, +}; + +/** + * struct svs_platform - svs platform control + * @name: svs platform name + * @base: svs platform register base + * @dev: svs platform device + * @main_clk: main clock for svs bank + * @pbank: svs bank pointer needing to be protected by spin_lock section + * @banks: svs banks that svs platform supports + * @rst: svs platform reset control + * @efuse_parsing: svs platform efuse parsing function pointer + * @probe: svs platform probe function pointer + * @irqflags: svs platform irq settings flags + * @efuse_max: total number of svs efuse + * @tefuse_max: total number of thermal efuse + * @regs: svs platform registers map + * @bank_max: total number of svs banks + * @efuse: svs efuse data received from NVMEM framework + * @tefuse: thermal efuse data received from NVMEM framework + */ +struct svs_platform { + char *name; + void __iomem *base; + struct device *dev; + struct clk *main_clk; + struct svs_bank *pbank; + struct svs_bank *banks; + struct reset_control *rst; + bool (*efuse_parsing)(struct svs_platform *svsp); + int (*probe)(struct svs_platform *svsp); + unsigned long irqflags; + size_t efuse_max; + size_t tefuse_max; + const u32 *regs; + u32 bank_max; + u32 *efuse; + u32 *tefuse; +}; + +struct svs_platform_data { + char *name; + struct svs_bank *banks; + bool (*efuse_parsing)(struct svs_platform *svsp); + int (*probe)(struct svs_platform *svsp); + unsigned long irqflags; + const u32 *regs; + u32 bank_max; +}; + +/** + * struct svs_bank - svs bank representation + * @dev: bank device + * @opp_dev: device for opp table/buck control + * @init_completion: the timeout completion for bank init + * @buck: regulator used by opp_dev + * @tzd: thermal zone device for getting temperature + * @lock: mutex lock to protect voltage update process + * @set_freq_pct: function pointer to set bank frequency percent table + * @get_volts: function pointer to get bank voltages + * @name: bank name + * @buck_name: regulator name + * @tzone_name: thermal zone name + * @phase: bank current phase + * @volt_od: bank voltage overdrive + * @reg_data: bank register data in different phase for debug purpose + * @pm_runtime_enabled_count: bank pm runtime enabled count + * @mode_support: bank mode support. + * @freq_base: reference frequency for bank init + * @turn_freq_base: refenrece frequency for 2-line turn point + * @vboot: voltage request for bank init01 only + * @opp_dfreq: default opp frequency table + * @opp_dvolt: default opp voltage table + * @freq_pct: frequency percent table for bank init + * @volt: bank voltage table + * @volt_step: bank voltage step + * @volt_base: bank voltage base + * @volt_flags: bank voltage flags + * @vmax: bank voltage maximum + * @vmin: bank voltage minimum + * @age_config: bank age configuration + * @age_voffset_in: bank age voltage offset + * @dc_config: bank dc configuration + * @dc_voffset_in: bank dc voltage offset + * @dvt_fixed: bank dvt fixed value + * @vco: bank VCO value + * @chk_shift: bank chicken shift + * @core_sel: bank selection + * @opp_count: bank opp count + * @int_st: bank interrupt identification + * @sw_id: bank software identification + * @cpu_id: cpu core id for SVS CPU bank use only + * @ctl0: TS-x selection + * @temp: bank temperature + * @tzone_htemp: thermal zone high temperature threshold + * @tzone_htemp_voffset: thermal zone high temperature voltage offset + * @tzone_ltemp: thermal zone low temperature threshold + * @tzone_ltemp_voffset: thermal zone low temperature voltage offset + * @bts: svs efuse data + * @mts: svs efuse data + * @bdes: svs efuse data + * @mdes: svs efuse data + * @mtdes: svs efuse data + * @dcbdet: svs efuse data + * @dcmdet: svs efuse data + * @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank + * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank + * + * Svs bank will generate suitalbe voltages by below general math equation + * and provide these voltages to opp voltage table. + * + * opp_volt[i] = (volt[i] * volt_step) + volt_base; + */ +struct svs_bank { + struct device *dev; + struct device *opp_dev; + struct completion init_completion; + struct regulator *buck; + struct thermal_zone_device *tzd; + struct mutex lock; /* lock to protect voltage update process */ + void (*set_freq_pct)(struct svs_platform *svsp); + void (*get_volts)(struct svs_platform *svsp); + char *name; + char *buck_name; + char *tzone_name; + enum svsb_phase phase; + s32 volt_od; + u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX]; + u32 pm_runtime_enabled_count; + u32 mode_support; + u32 freq_base; + u32 turn_freq_base; + u32 vboot; + u32 opp_dfreq[MAX_OPP_ENTRIES]; + u32 opp_dvolt[MAX_OPP_ENTRIES]; + u32 freq_pct[MAX_OPP_ENTRIES]; + u32 volt[MAX_OPP_ENTRIES]; + u32 volt_step; + u32 volt_base; + u32 volt_flags; + u32 vmax; + u32 vmin; + u32 age_config; + u32 age_voffset_in; + u32 dc_config; + u32 dc_voffset_in; + u32 dvt_fixed; + u32 vco; + u32 chk_shift; + u32 core_sel; + u32 opp_count; + u32 int_st; + u32 sw_id; + u32 cpu_id; + u32 ctl0; + u32 temp; + u32 tzone_htemp; + u32 tzone_htemp_voffset; + u32 tzone_ltemp; + u32 tzone_ltemp_voffset; + u32 bts; + u32 mts; + u32 bdes; + u32 mdes; + u32 mtdes; + u32 dcbdet; + u32 dcmdet; + u32 turn_pt; + u32 type; +}; + +static u32 percent(u32 numerator, u32 denominator) +{ + /* If not divide 1000, "numerator * 100" will have data overflow. */ + numerator /= 1000; + denominator /= 1000; + + return DIV_ROUND_UP(numerator * 100, denominator); +} + +static u32 svs_readl_relaxed(struct svs_platform *svsp, enum svs_reg_index rg_i) +{ + return readl_relaxed(svsp->base + svsp->regs[rg_i]); +} + +static void svs_writel_relaxed(struct svs_platform *svsp, u32 val, + enum svs_reg_index rg_i) +{ + writel_relaxed(val, svsp->base + svsp->regs[rg_i]); +} + +static void svs_switch_bank(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_writel_relaxed(svsp, svsb->core_sel, CORESEL); +} + +static u32 svs_bank_volt_to_opp_volt(u32 svsb_volt, u32 svsb_volt_step, + u32 svsb_volt_base) +{ + return (svsb_volt * svsb_volt_step) + svsb_volt_base; +} + +static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step, + u32 svsb_volt_base) +{ + return (opp_u_volt - svsb_volt_base) / svsb_volt_step; +} + +static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb) +{ + struct dev_pm_opp *opp; + u32 i, opp_u_volt; + + for (i = 0; i < svsb->opp_count; i++) { + opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, + svsb->opp_dfreq[i], + true); + if (IS_ERR(opp)) { + dev_err(svsb->dev, "cannot find freq = %u (%ld)\n", + svsb->opp_dfreq[i], PTR_ERR(opp)); + return PTR_ERR(opp); + } + + opp_u_volt = dev_pm_opp_get_voltage(opp); + svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt, + svsb->volt_step, + svsb->volt_base); + dev_pm_opp_put(opp); + } + + return 0; +} + +static int svs_adjust_pm_opp_volts(struct svs_bank *svsb) +{ + int ret = -EPERM, tzone_temp = 0; + u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop; + + mutex_lock(&svsb->lock); + + /* + * 2-line bank updates its corresponding opp volts. + * 1-line bank updates all opp volts. + */ + if (svsb->type == SVSB_HIGH) { + opp_start = 0; + opp_stop = svsb->turn_pt; + } else if (svsb->type == SVSB_LOW) { + opp_start = svsb->turn_pt; + opp_stop = svsb->opp_count; + } else { + opp_start = 0; + opp_stop = svsb->opp_count; + } + + /* Get thermal effect */ + if (svsb->phase == SVSB_PHASE_MON) { + ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp); + if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND && + svsb->temp < SVSB_TEMP_LOWER_BOUND)) { + dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n", + svsb->tzone_name, ret, svsb->temp); + svsb->phase = SVSB_PHASE_ERROR; + } + + if (tzone_temp >= svsb->tzone_htemp) + temp_voffset += svsb->tzone_htemp_voffset; + else if (tzone_temp <= svsb->tzone_ltemp) + temp_voffset += svsb->tzone_ltemp_voffset; + + /* 2-line bank update all opp volts when running mon mode */ + if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { + opp_start = 0; + opp_stop = svsb->opp_count; + } + } + + /* vmin <= svsb_volt (opp_volt) <= default opp voltage */ + for (i = opp_start; i < opp_stop; i++) { + switch (svsb->phase) { + case SVSB_PHASE_ERROR: + opp_volt = svsb->opp_dvolt[i]; + break; + case SVSB_PHASE_INIT01: + /* do nothing */ + goto unlock_mutex; + case SVSB_PHASE_INIT02: + svsb_volt = max(svsb->volt[i], svsb->vmin); + opp_volt = svs_bank_volt_to_opp_volt(svsb_volt, + svsb->volt_step, + svsb->volt_base); + break; + case SVSB_PHASE_MON: + svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin); + opp_volt = svs_bank_volt_to_opp_volt(svsb_volt, + svsb->volt_step, + svsb->volt_base); + break; + default: + dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase); + ret = -EINVAL; + goto unlock_mutex; + } + + opp_volt = min(opp_volt, svsb->opp_dvolt[i]); + ret = dev_pm_opp_adjust_voltage(svsb->opp_dev, + svsb->opp_dfreq[i], + opp_volt, opp_volt, + svsb->opp_dvolt[i]); + if (ret) { + dev_err(svsb->dev, "set %uuV fail: %d\n", + opp_volt, ret); + goto unlock_mutex; + } + } + +unlock_mutex: + mutex_unlock(&svsb->lock); + + return ret; +} + +static int svs_dump_debug_show(struct seq_file *m, void *p) +{ + struct svs_platform *svsp = (struct svs_platform *)m->private; + struct svs_bank *svsb; + unsigned long svs_reg_addr; + u32 idx, i, j, bank_id; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse && svsp->efuse[i]) + seq_printf(m, "M_HW_RES%d = 0x%08x\n", + i, svsp->efuse[i]); + + for (i = 0; i < svsp->tefuse_max; i++) + if (svsp->tefuse) + seq_printf(m, "THERMAL_EFUSE%d = 0x%08x\n", + i, svsp->tefuse[i]); + + for (bank_id = 0, idx = 0; idx < svsp->bank_max; idx++, bank_id++) { + svsb = &svsp->banks[idx]; + + for (i = SVSB_PHASE_INIT01; i <= SVSB_PHASE_MON; i++) { + seq_printf(m, "Bank_number = %u\n", bank_id); + + if (i == SVSB_PHASE_INIT01 || i == SVSB_PHASE_INIT02) + seq_printf(m, "mode = init%d\n", i); + else if (i == SVSB_PHASE_MON) + seq_puts(m, "mode = mon\n"); + else + seq_puts(m, "mode = error\n"); + + for (j = DESCHAR; j < SVS_REG_MAX; j++) { + svs_reg_addr = (unsigned long)(svsp->base + + svsp->regs[j]); + seq_printf(m, "0x%08lx = 0x%08x\n", + svs_reg_addr, svsb->reg_data[i][j]); + } + } + } + + return 0; +} + +debug_fops_ro(dump); + +static int svs_enable_debug_show(struct seq_file *m, void *v) +{ + struct svs_bank *svsb = (struct svs_bank *)m->private; + + switch (svsb->phase) { + case SVSB_PHASE_ERROR: + seq_puts(m, "disabled\n"); + break; + case SVSB_PHASE_INIT01: + seq_puts(m, "init1\n"); + break; + case SVSB_PHASE_INIT02: + seq_puts(m, "init2\n"); + break; + case SVSB_PHASE_MON: + seq_puts(m, "mon mode\n"); + break; + default: + seq_puts(m, "unknown\n"); + break; + } + + return 0; +} + +static ssize_t svs_enable_debug_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *pos) +{ + struct svs_bank *svsb = file_inode(filp)->i_private; + struct svs_platform *svsp = dev_get_drvdata(svsb->dev); + unsigned long flags; + int enabled, ret; + char *buf = NULL; + + if (count >= PAGE_SIZE) + return -EINVAL; + + buf = (char *)memdup_user_nul(buffer, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = kstrtoint(buf, 10, &enabled); + if (ret) + return ret; + + if (!enabled) { + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svsb->mode_support = SVSB_MODE_ALL_DISABLE; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + kfree(buf); + + return count; +} + +debug_fops_rw(enable); + +static int svs_status_debug_show(struct seq_file *m, void *v) +{ + struct svs_bank *svsb = (struct svs_bank *)m->private; + struct dev_pm_opp *opp; + int tzone_temp = 0, ret; + u32 i; + + ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp); + if (ret) + seq_printf(m, "%s: temperature ignore, turn_pt = %u\n", + svsb->name, svsb->turn_pt); + else + seq_printf(m, "%s: temperature = %d, turn_pt = %u\n", + svsb->name, tzone_temp, svsb->turn_pt); + + for (i = 0; i < svsb->opp_count; i++) { + opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, + svsb->opp_dfreq[i], true); + if (IS_ERR(opp)) { + seq_printf(m, "%s: cannot find freq = %u (%ld)\n", + svsb->name, svsb->opp_dfreq[i], + PTR_ERR(opp)); + return PTR_ERR(opp); + } + + seq_printf(m, "opp_freq[%02u]: %u, opp_volt[%02u]: %lu, ", + i, svsb->opp_dfreq[i], i, + dev_pm_opp_get_voltage(opp)); + seq_printf(m, "svsb_volt[%02u]: 0x%x, freq_pct[%02u]: %u\n", + i, svsb->volt[i], i, svsb->freq_pct[i]); + dev_pm_opp_put(opp); + } + + return 0; +} + +debug_fops_ro(status); + +static int svs_create_debug_cmds(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct dentry *svs_dir, *svsb_dir, *file_entry; + const char *d = "/sys/kernel/debug/svs"; + u32 i, idx; + + struct svs_dentry { + const char *name; + const struct file_operations *fops; + }; + + struct svs_dentry svs_entries[] = { + svs_dentry_data(dump), + }; + + struct svs_dentry svsb_entries[] = { + svs_dentry_data(enable), + svs_dentry_data(status), + }; + + svs_dir = debugfs_create_dir("svs", NULL); + if (IS_ERR(svs_dir)) { + dev_err(svsp->dev, "cannot create %s: %ld\n", + d, PTR_ERR(svs_dir)); + return PTR_ERR(svs_dir); + } + + for (i = 0; i < ARRAY_SIZE(svs_entries); i++) { + file_entry = debugfs_create_file(svs_entries[i].name, 0664, + svs_dir, svsp, + svs_entries[i].fops); + if (IS_ERR(file_entry)) { + dev_err(svsp->dev, "cannot create %s/%s: %ld\n", + d, svs_entries[i].name, PTR_ERR(file_entry)); + return PTR_ERR(file_entry); + } + } + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (svsb->mode_support == SVSB_MODE_ALL_DISABLE) + continue; + + svsb_dir = debugfs_create_dir(svsb->name, svs_dir); + if (IS_ERR(svsb_dir)) { + dev_err(svsp->dev, "cannot create %s/%s: %ld\n", + d, svsb->name, PTR_ERR(svsb_dir)); + return PTR_ERR(svsb_dir); + } + + for (i = 0; i < ARRAY_SIZE(svsb_entries); i++) { + file_entry = debugfs_create_file(svsb_entries[i].name, + 0664, svsb_dir, svsb, + svsb_entries[i].fops); + if (IS_ERR(file_entry)) { + dev_err(svsp->dev, "no %s/%s/%s?: %ld\n", + d, svsb->name, svsb_entries[i].name, + PTR_ERR(file_entry)); + return PTR_ERR(file_entry); + } + } + } + + return 0; +} + +static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx) +{ + u32 vx; + + if (v0 == v1 || f0 == f1) + return v0; + + /* *100 to have decimal fraction factor */ + vx = (v0 * 100) - ((((v0 - v1) * 100) / (f0 - f1)) * (f0 - fx)); + + return DIV_ROUND_UP(vx, 100); +} + +static void svs_get_bank_volts_v3(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt; + u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0; + u32 middle_index = (svsb->opp_count / 2); + + if (svsb->phase == SVSB_PHASE_MON && + svsb->volt_flags & SVSB_MON_VOLT_IGNORE) + return; + + vop74 = svs_readl_relaxed(svsp, VOP74); + vop30 = svs_readl_relaxed(svsp, VOP30); + + /* Target is to set svsb->volt[] by algorithm */ + if (turn_pt < middle_index) { + if (svsb->type == SVSB_HIGH) { + /* volt[0] ~ volt[turn_pt - 1] */ + for (i = 0; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */ + j = svsb->opp_count - 7; + svsb->volt[turn_pt] = vop30 & GENMASK(7, 0); + shift_byte++; + for (i = j; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + + /* volt[turn_pt + 1] ~ volt[j - 1] by interpolate */ + for (i = turn_pt + 1; i < j; i++) + svsb->volt[i] = interpolate(svsb->freq_pct[turn_pt], + svsb->freq_pct[j], + svsb->volt[turn_pt], + svsb->volt[j], + svsb->freq_pct[i]); + } + } else { + if (svsb->type == SVSB_HIGH) { + /* volt[0] + volt[j] ~ volt[turn_pt - 1] */ + j = turn_pt - 7; + svsb->volt[0] = vop30 & GENMASK(7, 0); + shift_byte++; + for (i = j; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + + /* volt[1] ~ volt[j - 1] by interpolate */ + for (i = 1; i < j; i++) + svsb->volt[i] = interpolate(svsb->freq_pct[0], + svsb->freq_pct[j], + svsb->volt[0], + svsb->volt[j], + svsb->freq_pct[i]); + } else if (svsb->type == SVSB_LOW) { + /* volt[turn_pt] ~ volt[opp_count - 1] */ + for (i = turn_pt; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + } + } + + if (svsb->type == SVSB_HIGH) { + opp_start = 0; + opp_stop = svsb->turn_pt; + } else if (svsb->type == SVSB_LOW) { + opp_start = svsb->turn_pt; + opp_stop = svsb->opp_count; + } + + for (i = opp_start; i < opp_stop; i++) + if (svsb->volt_flags & SVSB_REMOVE_DVTFIXED_VOLT) + svsb->volt[i] -= svsb->dvt_fixed; +} + +static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0; + u32 b_sft, shift_byte = 0, turn_pt; + u32 middle_index = (svsb->opp_count / 2); + + for (i = 0; i < svsb->opp_count; i++) { + if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) { + svsb->turn_pt = i; + break; + } + } + + turn_pt = svsb->turn_pt; + + /* Target is to fill out freq_pct74 / freq_pct30 by algorithm */ + if (turn_pt < middle_index) { + if (svsb->type == SVSB_HIGH) { + /* + * If we don't handle this situation, + * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0" + * and this leads SVSB_LOW to work abnormally. + */ + if (turn_pt == 0) + freq_pct30 = svsb->freq_pct[0]; + + /* freq_pct[0] ~ freq_pct[turn_pt - 1] */ + for (i = 0; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* + * freq_pct[turn_pt] + + * freq_pct[opp_count - 7] ~ freq_pct[opp_count -1] + */ + freq_pct30 = svsb->freq_pct[turn_pt]; + shift_byte++; + j = svsb->opp_count - 7; + for (i = j; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } + } else { + if (svsb->type == SVSB_HIGH) { + /* + * freq_pct[0] + + * freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1] + */ + freq_pct30 = svsb->freq_pct[0]; + shift_byte++; + j = turn_pt - 7; + for (i = j; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */ + for (i = turn_pt; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } + } + + svs_writel_relaxed(svsp, freq_pct74, FREQPCT74); + svs_writel_relaxed(svsp, freq_pct30, FREQPCT30); +} + +static void svs_get_bank_volts_v2(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 temp, i; + + temp = svs_readl_relaxed(svsp, VOP74); + svsb->volt[14] = (temp >> 24) & GENMASK(7, 0); + svsb->volt[12] = (temp >> 16) & GENMASK(7, 0); + svsb->volt[10] = (temp >> 8) & GENMASK(7, 0); + svsb->volt[8] = (temp & GENMASK(7, 0)); + + temp = svs_readl_relaxed(svsp, VOP30); + svsb->volt[6] = (temp >> 24) & GENMASK(7, 0); + svsb->volt[4] = (temp >> 16) & GENMASK(7, 0); + svsb->volt[2] = (temp >> 8) & GENMASK(7, 0); + svsb->volt[0] = (temp & GENMASK(7, 0)); + + for (i = 0; i <= 12; i += 2) + svsb->volt[i + 1] = interpolate(svsb->freq_pct[i], + svsb->freq_pct[i + 2], + svsb->volt[i], + svsb->volt[i + 2], + svsb->freq_pct[i + 1]); + + svsb->volt[15] = interpolate(svsb->freq_pct[12], + svsb->freq_pct[14], + svsb->volt[12], + svsb->volt[14], + svsb->freq_pct[15]); + + for (i = 0; i < svsb->opp_count; i++) + svsb->volt[i] += svsb->volt_od; +} + +static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_writel_relaxed(svsp, + (svsb->freq_pct[14] << 24) | + (svsb->freq_pct[12] << 16) | + (svsb->freq_pct[10] << 8) | + svsb->freq_pct[8], + FREQPCT74); + + svs_writel_relaxed(svsp, + (svsb->freq_pct[6] << 24) | + (svsb->freq_pct[4] << 16) | + (svsb->freq_pct[2] << 8) | + svsb->freq_pct[0], + FREQPCT30); +} + +static void svs_set_bank_phase(struct svs_platform *svsp, + enum svsb_phase target_phase) +{ + struct svs_bank *svsb = svsp->pbank; + u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs; + + svs_switch_bank(svsp); + + des_char = (svsb->bdes << 8) | svsb->mdes; + svs_writel_relaxed(svsp, des_char, DESCHAR); + + temp_char = (svsb->vco << 16) | (svsb->mtdes << 8) | svsb->dvt_fixed; + svs_writel_relaxed(svsp, temp_char, TEMPCHAR); + + det_char = (svsb->dcbdet << 8) | svsb->dcmdet; + svs_writel_relaxed(svsp, det_char, DETCHAR); + + svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG); + svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG); + svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG); + + svsb->set_freq_pct(svsp); + + limit_vals = (svsb->vmax << 24) | (svsb->vmin << 16) | + (SVSB_DTHI << 8) | SVSB_DTLO; + svs_writel_relaxed(svsp, limit_vals, LIMITVALS); + + svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW); + svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG); + svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT); + svs_writel_relaxed(svsp, svsb->ctl0, CTL0); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + + switch (target_phase) { + case SVSB_PHASE_INIT01: + svs_writel_relaxed(svsp, svsb->vboot, VBOOT); + svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN); + svs_writel_relaxed(svsp, SVSB_EN_INIT01, SVSEN); + break; + case SVSB_PHASE_INIT02: + svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN); + init2vals = (svsb->age_voffset_in << 16) | svsb->dc_voffset_in; + svs_writel_relaxed(svsp, init2vals, INIT2VALS); + svs_writel_relaxed(svsp, SVSB_EN_INIT02, SVSEN); + break; + case SVSB_PHASE_MON: + ts_calcs = (svsb->bts << 12) | svsb->mts; + svs_writel_relaxed(svsp, ts_calcs, TSCALCS); + svs_writel_relaxed(svsp, SVSB_INTEN_MONVOPEN, INTEN); + svs_writel_relaxed(svsp, SVSB_EN_MON, SVSEN); + break; + default: + dev_err(svsb->dev, "requested unknown target phase: %u\n", + target_phase); + break; + } +} + +static inline void svs_save_bank_register_data(struct svs_platform *svsp, + enum svsb_phase phase) +{ + struct svs_bank *svsb = svsp->pbank; + enum svs_reg_index rg_i; + + for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++) + svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i); +} + +static inline void svs_error_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n", + __func__, svs_readl_relaxed(svsp, CORESEL)); + dev_err(svsb->dev, "SVSEN = 0x%08x, INTSTS = 0x%08x\n", + svs_readl_relaxed(svsp, SVSEN), + svs_readl_relaxed(svsp, INTSTS)); + dev_err(svsb->dev, "SMSTATE0 = 0x%08x, SMSTATE1 = 0x%08x\n", + svs_readl_relaxed(svsp, SMSTATE0), + svs_readl_relaxed(svsp, SMSTATE1)); + dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR); + + svsb->phase = SVSB_PHASE_ERROR; + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); +} + +static inline void svs_init01_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n", + __func__, svs_readl_relaxed(svsp, VDESIGN74), + svs_readl_relaxed(svsp, VDESIGN30), + svs_readl_relaxed(svsp, DCVALUES)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01); + + svsb->phase = SVSB_PHASE_INIT01; + svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) & + GENMASK(15, 0)) + 1; + if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE || + (svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT && + svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY)) + svsb->dc_voffset_in = 0; + + svsb->age_voffset_in = svs_readl_relaxed(svsp, AGEVALUES) & + GENMASK(15, 0); + + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS); + svsb->core_sel &= ~SVSB_DET_CLK_EN; +} + +static inline void svs_init02_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n", + __func__, svs_readl_relaxed(svsp, VOP74), + svs_readl_relaxed(svsp, VOP30), + svs_readl_relaxed(svsp, DCVALUES)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02); + + svsb->phase = SVSB_PHASE_INIT02; + svsb->get_volts(svsp); + + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS); +} + +static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_save_bank_register_data(svsp, SVSB_PHASE_MON); + + svsb->phase = SVSB_PHASE_MON; + svsb->get_volts(svsp); + + svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0); + svs_writel_relaxed(svsp, SVSB_INTSTS_MONVOP, INTSTS); +} + +static irqreturn_t svs_isr(int irq, void *data) +{ + struct svs_platform *svsp = data; + struct svs_bank *svsb = NULL; + unsigned long flags; + u32 idx, int_sts, svs_en; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name); + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + + /* Find out which svs bank fires interrupt */ + if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) { + spin_unlock_irqrestore(&svs_lock, flags); + continue; + } + + svs_switch_bank(svsp); + int_sts = svs_readl_relaxed(svsp, INTSTS); + svs_en = svs_readl_relaxed(svsp, SVSEN); + + if (int_sts == SVSB_INTSTS_COMPLETE && + svs_en == SVSB_EN_INIT01) + svs_init01_isr_handler(svsp); + else if (int_sts == SVSB_INTSTS_COMPLETE && + svs_en == SVSB_EN_INIT02) + svs_init02_isr_handler(svsp); + else if (int_sts & SVSB_INTSTS_MONVOP) + svs_mon_mode_isr_handler(svsp); + else + svs_error_isr_handler(svsp); + + spin_unlock_irqrestore(&svs_lock, flags); + break; + } + + svs_adjust_pm_opp_volts(svsb); + + if (svsb->phase == SVSB_PHASE_INIT01 || + svsb->phase == SVSB_PHASE_INIT02) + complete(&svsb->init_completion); + + return IRQ_HANDLED; +} + +static int svs_init01(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags, time_left; + bool search_done; + int ret = 0, r; + u32 opp_freq, opp_vboot, buck_volt, idx, i; + + /* Keep CPUs' core power on for svs_init01 initialization */ + cpuidle_pause_and_lock(); + + /* Svs bank init01 preparation - power enable */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + ret = regulator_enable(svsb->buck); + if (ret) { + dev_err(svsb->dev, "%s enable fail: %d\n", + svsb->buck_name, ret); + goto svs_init01_resume_cpuidle; + } + + /* Some buck doesn't support mode change. Show fail msg only */ + ret = regulator_set_mode(svsb->buck, REGULATOR_MODE_FAST); + if (ret) + dev_notice(svsb->dev, "set fast mode fail: %d\n", ret); + + if (svsb->volt_flags & SVSB_INIT01_PD_REQ) { + if (!pm_runtime_enabled(svsb->opp_dev)) { + pm_runtime_enable(svsb->opp_dev); + svsb->pm_runtime_enabled_count++; + } + + ret = pm_runtime_get_sync(svsb->opp_dev); + if (ret < 0) { + dev_err(svsb->dev, "mtcmos on fail: %d\n", ret); + goto svs_init01_resume_cpuidle; + } + } + } + + /* + * Svs bank init01 preparation - vboot voltage adjustment + * Sometimes two svs banks use the same buck. Therefore, + * we have to set each svs bank to target voltage(vboot) first. + */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + /* + * Find the fastest freq that can be run at vboot and + * fix to that freq until svs_init01 is done. + */ + search_done = false; + opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, + svsb->volt_step, + svsb->volt_base); + + for (i = 0; i < svsb->opp_count; i++) { + opp_freq = svsb->opp_dfreq[i]; + if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) { + ret = dev_pm_opp_adjust_voltage(svsb->opp_dev, + opp_freq, + opp_vboot, + opp_vboot, + opp_vboot); + if (ret) { + dev_err(svsb->dev, + "set opp %uuV vboot fail: %d\n", + opp_vboot, ret); + goto svs_init01_finish; + } + + search_done = true; + } else { + ret = dev_pm_opp_disable(svsb->opp_dev, + svsb->opp_dfreq[i]); + if (ret) { + dev_err(svsb->dev, + "opp %uHz disable fail: %d\n", + svsb->opp_dfreq[i], ret); + goto svs_init01_finish; + } + } + } + } + + /* Svs bank init01 begins */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, + svsb->volt_step, + svsb->volt_base); + + buck_volt = regulator_get_voltage(svsb->buck); + if (buck_volt != opp_vboot) { + dev_err(svsb->dev, + "buck voltage: %uuV, expected vboot: %uuV\n", + buck_volt, opp_vboot); + ret = -EPERM; + goto svs_init01_finish; + } + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_INIT01); + spin_unlock_irqrestore(&svs_lock, flags); + + time_left = wait_for_completion_timeout(&svsb->init_completion, + msecs_to_jiffies(5000)); + if (!time_left) { + dev_err(svsb->dev, "init01 completion timeout\n"); + ret = -EBUSY; + goto svs_init01_finish; + } + } + +svs_init01_finish: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + for (i = 0; i < svsb->opp_count; i++) { + r = dev_pm_opp_enable(svsb->opp_dev, + svsb->opp_dfreq[i]); + if (r) + dev_err(svsb->dev, "opp %uHz enable fail: %d\n", + svsb->opp_dfreq[i], r); + } + + if (svsb->volt_flags & SVSB_INIT01_PD_REQ) { + r = pm_runtime_put_sync(svsb->opp_dev); + if (r) + dev_err(svsb->dev, "mtcmos off fail: %d\n", r); + + if (svsb->pm_runtime_enabled_count > 0) { + pm_runtime_disable(svsb->opp_dev); + svsb->pm_runtime_enabled_count--; + } + } + + r = regulator_set_mode(svsb->buck, REGULATOR_MODE_NORMAL); + if (r) + dev_notice(svsb->dev, "set normal mode fail: %d\n", r); + + r = regulator_disable(svsb->buck); + if (r) + dev_err(svsb->dev, "%s disable fail: %d\n", + svsb->buck_name, r); + } + +svs_init01_resume_cpuidle: + cpuidle_resume_and_unlock(); + + return ret; +} + +static int svs_init02(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags, time_left; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT02)) + continue; + + reinit_completion(&svsb->init_completion); + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_INIT02); + spin_unlock_irqrestore(&svs_lock, flags); + + time_left = wait_for_completion_timeout(&svsb->init_completion, + msecs_to_jiffies(5000)); + if (!time_left) { + dev_err(svsb->dev, "init02 completion timeout\n"); + return -EBUSY; + } + } + + /* + * 2-line high/low bank update its corresponding opp voltages only. + * Therefore, we sync voltages from opp for high/low bank voltages + * consistency. + */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT02)) + continue; + + if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { + if (svs_sync_bank_volts_from_opp(svsb)) { + dev_err(svsb->dev, "sync volt fail\n"); + return -EPERM; + } + } + } + + return 0; +} + +static void svs_mon_mode(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_MON)) + continue; + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_MON); + spin_unlock_irqrestore(&svs_lock, flags); + } +} + +static int svs_start(struct svs_platform *svsp) +{ + int ret; + + ret = svs_init01(svsp); + if (ret) + return ret; + + ret = svs_init02(svsp); + if (ret) + return ret; + + svs_mon_mode(svsp); + + return 0; +} + +static int svs_suspend(struct device *dev) +{ + struct svs_platform *svsp = dev_get_drvdata(dev); + struct svs_bank *svsb; + unsigned long flags; + int ret; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + /* This might wait for svs_isr() process */ + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + ret = reset_control_assert(svsp->rst); + if (ret) { + dev_err(svsp->dev, "cannot assert reset %d\n", ret); + return ret; + } + + clk_disable_unprepare(svsp->main_clk); + + return 0; +} + +static int svs_resume(struct device *dev) +{ + struct svs_platform *svsp = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(svsp->main_clk); + if (ret) { + dev_err(svsp->dev, "cannot enable main_clk, disable svs\n"); + return ret; + } + + ret = reset_control_deassert(svsp->rst); + if (ret) { + dev_err(svsp->dev, "cannot deassert reset %d\n", ret); + goto out_of_resume; + } + + ret = svs_init02(svsp); + if (ret) + goto out_of_resume; + + svs_mon_mode(svsp); + + return 0; + +out_of_resume: + clk_disable_unprepare(svsp->main_clk); + return ret; +} + +static int svs_bank_resource_setup(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct dev_pm_opp *opp; + unsigned long freq; + int count, ret; + u32 idx, i; + + dev_set_drvdata(svsp->dev, svsp); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + svsb->name = "SVSB_CPU_LITTLE"; + break; + case SVSB_CPU_BIG: + svsb->name = "SVSB_CPU_BIG"; + break; + case SVSB_CCI: + svsb->name = "SVSB_CCI"; + break; + case SVSB_GPU: + if (svsb->type == SVSB_HIGH) + svsb->name = "SVSB_GPU_HIGH"; + else if (svsb->type == SVSB_LOW) + svsb->name = "SVSB_GPU_LOW"; + else + svsb->name = "SVSB_GPU"; + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return -EINVAL; + } + + svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev), + GFP_KERNEL); + if (!svsb->dev) + return -ENOMEM; + + ret = dev_set_name(svsb->dev, "%s", svsb->name); + if (ret) + return ret; + + dev_set_drvdata(svsb->dev, svsp); + + ret = dev_pm_opp_of_add_table(svsb->opp_dev); + if (ret) { + dev_err(svsb->dev, "add opp table fail: %d\n", ret); + return ret; + } + + mutex_init(&svsb->lock); + init_completion(&svsb->init_completion); + + if (svsb->mode_support & SVSB_MODE_INIT01) { + svsb->buck = devm_regulator_get_optional(svsb->opp_dev, + svsb->buck_name); + if (IS_ERR(svsb->buck)) { + dev_err(svsb->dev, "cannot get \"%s-supply\"\n", + svsb->buck_name); + return PTR_ERR(svsb->buck); + } + } + + if (svsb->mode_support & SVSB_MODE_MON) { + svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name); + if (IS_ERR(svsb->tzd)) { + dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n", + svsb->tzone_name); + return PTR_ERR(svsb->tzd); + } + } + + count = dev_pm_opp_get_opp_count(svsb->opp_dev); + if (svsb->opp_count != count) { + dev_err(svsb->dev, + "opp_count not \"%u\" but get \"%d\"?\n", + svsb->opp_count, count); + return count; + } + + for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) { + opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq); + if (IS_ERR(opp)) { + dev_err(svsb->dev, "cannot find freq = %ld\n", + PTR_ERR(opp)); + return PTR_ERR(opp); + } + + svsb->opp_dfreq[i] = freq; + svsb->opp_dvolt[i] = dev_pm_opp_get_voltage(opp); + svsb->freq_pct[i] = percent(svsb->opp_dfreq[i], + svsb->freq_base); + dev_pm_opp_put(opp); + } + } + + return 0; +} + +static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct nvmem_cell *cell; + u32 idx, i, vmin, golden_temp; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse[i]) + dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", + i, svsp->efuse[i]); + + if (!svsp->efuse[9]) { + dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n"); + return false; + } + + /* Svs efuse parsing */ + vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (vmin == 0x1) + svsb->vmin = 0x1e; + + if (svsb->type == SVSB_LOW) { + svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0); + svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0); + } else if (svsb->type == SVSB_HIGH) { + svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0); + svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0); + } + + svsb->vmax += svsb->dvt_fixed; + } + + /* Thermal efuse parsing */ + cell = nvmem_cell_get(svsp->dev, "t-calibration-data"); + if (IS_ERR_OR_NULL(cell)) { + dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", + PTR_ERR(cell)); + return false; + } + + svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max); + if (IS_ERR(svsp->tefuse)) { + dev_err(svsp->dev, "cannot read thermal efuse: %ld\n", + PTR_ERR(svsp->tefuse)); + nvmem_cell_put(cell); + return false; + } + + svsp->tefuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + for (i = 0; i < svsp->tefuse_max; i++) + if (svsp->tefuse[i] != 0) + break; + + if (i == svsp->tefuse_max) + golden_temp = 50; /* All thermal efuse data are 0 */ + else + golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mts = 500; + svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4; + } + + return true; +} + +static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct nvmem_cell *cell; + int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0; + int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t; + int o_slope, o_slope_sign, ts_id; + u32 idx, i, ft_pgm, mts, temp0, temp1, temp2; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse[i]) + dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", + i, svsp->efuse[i]); + + if (!svsp->efuse[2]) { + dev_notice(svsp->dev, "svs_efuse[2] = 0x0?\n"); + return false; + } + + /* Svs efuse parsing */ + ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (ft_pgm <= 1) + svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + svsb->bdes = svsp->efuse[16] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0); + svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 10; + else + svsb->volt_od += 2; + break; + case SVSB_CPU_BIG: + svsb->bdes = svsp->efuse[18] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0); + svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 15; + else + svsb->volt_od += 12; + break; + case SVSB_CCI: + svsb->bdes = svsp->efuse[4] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0); + svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 10; + else + svsb->volt_od += 2; + break; + case SVSB_GPU: + svsb->bdes = svsp->efuse[6] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0); + svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0); + + if (ft_pgm >= 2) { + svsb->freq_base = 800000000; /* 800MHz */ + svsb->dvt_fixed = 2; + } + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return false; + } + } + + /* Get thermal efuse by nvmem */ + cell = nvmem_cell_get(svsp->dev, "t-calibration-data"); + if (IS_ERR(cell)) { + dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", + PTR_ERR(cell)); + goto remove_mt8183_svsb_mon_mode; + } + + svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max); + if (IS_ERR(svsp->tefuse)) { + dev_err(svsp->dev, "cannot read thermal efuse: %ld\n", + PTR_ERR(svsp->tefuse)); + nvmem_cell_put(cell); + goto remove_mt8183_svsb_mon_mode; + } + + svsp->tefuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + /* Thermal efuse parsing */ + adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0); + adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0); + + o_vtsmcu[0] = (svsp->tefuse[0] >> 17) & GENMASK(8, 0); + o_vtsmcu[1] = (svsp->tefuse[0] >> 8) & GENMASK(8, 0); + o_vtsmcu[2] = svsp->tefuse[1] & GENMASK(8, 0); + o_vtsmcu[3] = (svsp->tefuse[2] >> 23) & GENMASK(8, 0); + o_vtsmcu[4] = (svsp->tefuse[2] >> 5) & GENMASK(8, 0); + o_vtsabb = (svsp->tefuse[2] >> 14) & GENMASK(8, 0); + + degc_cali = (svsp->tefuse[0] >> 1) & GENMASK(5, 0); + adc_cali_en_t = svsp->tefuse[0] & BIT(0); + o_slope_sign = (svsp->tefuse[0] >> 7) & BIT(0); + + ts_id = (svsp->tefuse[1] >> 9) & BIT(0); + o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0); + + if (adc_cali_en_t == 1) { + if (!ts_id) + o_slope = 0; + + if (adc_ge_t < 265 || adc_ge_t > 758 || + adc_oe_t < 265 || adc_oe_t > 758 || + o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 || + o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 || + o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 || + o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 || + o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 || + o_vtsabb < -8 || o_vtsabb > 484 || + degc_cali < 1 || degc_cali > 63) { + dev_err(svsp->dev, "bad thermal efuse, no mon mode\n"); + goto remove_mt8183_svsb_mon_mode; + } + } else { + dev_err(svsp->dev, "no thermal efuse, no mon mode\n"); + goto remove_mt8183_svsb_mon_mode; + } + + ge = ((adc_ge_t - 512) * 10000) / 4096; + oe = (adc_oe_t - 512); + gain = (10000 + ge); + + format[0] = (o_vtsmcu[0] + 3350 - oe); + format[1] = (o_vtsmcu[1] + 3350 - oe); + format[2] = (o_vtsmcu[2] + 3350 - oe); + format[3] = (o_vtsmcu[3] + 3350 - oe); + format[4] = (o_vtsmcu[4] + 3350 - oe); + format[5] = (o_vtsabb + 3350 - oe); + + for (i = 0; i < 6; i++) + x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / gain; + + temp0 = (10000 * 100000 / gain) * 15 / 18; + + if (!o_slope_sign) + mts = (temp0 * 10) / (1534 + o_slope * 10); + else + mts = (temp0 * 10) / (1534 - o_slope * 10); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mts = mts; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + tb_roomt = x_roomt[3]; + break; + case SVSB_CPU_BIG: + tb_roomt = x_roomt[4]; + break; + case SVSB_CCI: + tb_roomt = x_roomt[3]; + break; + case SVSB_GPU: + tb_roomt = x_roomt[1]; + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + goto remove_mt8183_svsb_mon_mode; + } + + temp0 = (degc_cali * 10 / 2); + temp1 = ((10000 * 100000 / 4096 / gain) * + oe + tb_roomt * 10) * 15 / 18; + + if (!o_slope_sign) + temp2 = temp1 * 100 / (1534 + o_slope * 10); + else + temp2 = temp1 * 100 / (1534 - o_slope * 10); + + svsb->bts = (temp0 + temp2 - 250) * 4 / 10; + } + + return true; + +remove_mt8183_svsb_mon_mode: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mode_support &= ~SVSB_MODE_MON; + } + + return true; +} + +static bool svs_is_efuse_data_correct(struct svs_platform *svsp) +{ + struct nvmem_cell *cell; + + /* Get svs efuse by nvmem */ + cell = nvmem_cell_get(svsp->dev, "svs-calibration-data"); + if (IS_ERR(cell)) { + dev_err(svsp->dev, "no \"svs-calibration-data\"? %ld\n", + PTR_ERR(cell)); + return false; + } + + svsp->efuse = nvmem_cell_read(cell, &svsp->efuse_max); + if (IS_ERR(svsp->efuse)) { + dev_err(svsp->dev, "cannot read svs efuse: %ld\n", + PTR_ERR(svsp->efuse)); + nvmem_cell_put(cell); + return false; + } + + svsp->efuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + return svsp->efuse_parsing(svsp); +} + +static struct device *svs_get_subsys_device(struct svs_platform *svsp, + const char *node_name) +{ + struct platform_device *pdev; + struct device_node *np; + + np = of_find_node_by_name(NULL, node_name); + if (!np) { + dev_err(svsp->dev, "cannot find %s node\n", node_name); + return ERR_PTR(-ENODEV); + } + + pdev = of_find_device_by_node(np); + if (!pdev) { + of_node_put(np); + dev_err(svsp->dev, "cannot find pdev by %s\n", node_name); + return ERR_PTR(-ENXIO); + } + + of_node_put(np); + + return &pdev->dev; +} + +static struct device *svs_add_device_link(struct svs_platform *svsp, + const char *node_name) +{ + struct device *dev; + struct device_link *sup_link; + + if (!node_name) { + dev_err(svsp->dev, "node name cannot be null\n"); + return ERR_PTR(-EINVAL); + } + + dev = svs_get_subsys_device(svsp, node_name); + if (IS_ERR(dev)) + return dev; + + sup_link = device_link_add(svsp->dev, dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!sup_link) { + dev_err(svsp->dev, "sup_link is NULL\n"); + return ERR_PTR(-EINVAL); + } + + if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND) + return ERR_PTR(-EPROBE_DEFER); + + return dev; +} + +static int svs_mt8192_platform_probe(struct svs_platform *svsp) +{ + struct device *dev; + struct svs_bank *svsb; + u32 idx; + + svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst"); + if (IS_ERR(svsp->rst)) + return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst), + "cannot get svs reset control\n"); + + dev = svs_add_device_link(svsp, "lvts"); + if (IS_ERR(dev)) + return dev_err_probe(svsp->dev, PTR_ERR(dev), + "failed to get lvts device\n"); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (svsb->type == SVSB_HIGH) + svsb->opp_dev = svs_add_device_link(svsp, "mali"); + else if (svsb->type == SVSB_LOW) + svsb->opp_dev = svs_get_subsys_device(svsp, "mali"); + + if (IS_ERR(svsb->opp_dev)) + return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), + "failed to get OPP device for bank %d\n", + idx); + } + + return 0; +} + +static int svs_mt8183_platform_probe(struct svs_platform *svsp) +{ + struct device *dev; + struct svs_bank *svsb; + u32 idx; + + dev = svs_add_device_link(svsp, "thermal"); + if (IS_ERR(dev)) + return dev_err_probe(svsp->dev, PTR_ERR(dev), + "failed to get thermal device\n"); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + case SVSB_CPU_BIG: + svsb->opp_dev = get_cpu_device(svsb->cpu_id); + break; + case SVSB_CCI: + svsb->opp_dev = svs_add_device_link(svsp, "cci"); + break; + case SVSB_GPU: + svsb->opp_dev = svs_add_device_link(svsp, "gpu"); + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return -EINVAL; + } + + if (IS_ERR(svsb->opp_dev)) + return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), + "failed to get OPP device for bank %d\n", + idx); + } + + return 0; +} + +static struct svs_bank svs_mt8192_banks[] = { + { + .sw_id = SVSB_GPU, + .type = SVSB_LOW, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, + .mode_support = SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 688000000, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .vmax = 0x60, + .vmin = 0x1a, + .age_config = 0x555555, + .dc_config = 0x1, + .dvt_fixed = 0x1, + .vco = 0x18, + .chk_shift = 0x87, + .core_sel = 0x0fff0100, + .int_st = BIT(0), + .ctl0 = 0x00540003, + }, + { + .sw_id = SVSB_GPU, + .type = SVSB_HIGH, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .tzone_name = "gpu1", + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | + SVSB_MON_VOLT_IGNORE, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 902000000, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .vmax = 0x60, + .vmin = 0x1a, + .age_config = 0x555555, + .dc_config = 0x1, + .dvt_fixed = 0x6, + .vco = 0x18, + .chk_shift = 0x87, + .core_sel = 0x0fff0101, + .int_st = BIT(1), + .ctl0 = 0x00540003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 7, + }, +}; + +static struct svs_bank svs_mt8183_banks[] = { + { + .sw_id = SVSB_CPU_LITTLE, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 0, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1989000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x64, + .vmin = 0x18, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0000, + .int_st = BIT(0), + .ctl0 = 0x00010001, + }, + { + .sw_id = SVSB_CPU_BIG, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 4, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1989000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x58, + .vmin = 0x10, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0001, + .int_st = BIT(1), + .ctl0 = 0x00000001, + }, + { + .sw_id = SVSB_CCI, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1196000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x64, + .vmin = 0x18, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0002, + .int_st = BIT(2), + .ctl0 = 0x00100003, + }, + { + .sw_id = SVSB_GPU, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "mali", + .tzone_name = "tzts2", + .volt_flags = SVSB_INIT01_PD_REQ | + SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 | + SVSB_MODE_MON, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 900000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x40, + .vmin = 0x14, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x3, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0003, + .int_st = BIT(3), + .ctl0 = 0x00050001, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 3, + }, +}; + +static const struct svs_platform_data svs_mt8192_platform_data = { + .name = "mt8192-svs", + .banks = svs_mt8192_banks, + .efuse_parsing = svs_mt8192_efuse_parsing, + .probe = svs_mt8192_platform_probe, + .irqflags = IRQF_TRIGGER_HIGH, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8192_banks), +}; + +static const struct svs_platform_data svs_mt8183_platform_data = { + .name = "mt8183-svs", + .banks = svs_mt8183_banks, + .efuse_parsing = svs_mt8183_efuse_parsing, + .probe = svs_mt8183_platform_probe, + .irqflags = IRQF_TRIGGER_LOW, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8183_banks), +}; + +static const struct of_device_id svs_of_match[] = { + { + .compatible = "mediatek,mt8192-svs", + .data = &svs_mt8192_platform_data, + }, { + .compatible = "mediatek,mt8183-svs", + .data = &svs_mt8183_platform_data, + }, { + /* Sentinel */ + }, +}; + +static struct svs_platform *svs_platform_probe(struct platform_device *pdev) +{ + struct svs_platform *svsp; + const struct svs_platform_data *svsp_data; + int ret; + + svsp_data = of_device_get_match_data(&pdev->dev); + if (!svsp_data) { + dev_err(&pdev->dev, "no svs platform data?\n"); + return ERR_PTR(-EPERM); + } + + svsp = devm_kzalloc(&pdev->dev, sizeof(*svsp), GFP_KERNEL); + if (!svsp) + return ERR_PTR(-ENOMEM); + + svsp->dev = &pdev->dev; + svsp->name = svsp_data->name; + svsp->banks = svsp_data->banks; + svsp->efuse_parsing = svsp_data->efuse_parsing; + svsp->probe = svsp_data->probe; + svsp->irqflags = svsp_data->irqflags; + svsp->regs = svsp_data->regs; + svsp->bank_max = svsp_data->bank_max; + + ret = svsp->probe(svsp); + if (ret) + return ERR_PTR(ret); + + return svsp; +} + +static int svs_probe(struct platform_device *pdev) +{ + struct svs_platform *svsp; + unsigned int svsp_irq; + int ret; + + svsp = svs_platform_probe(pdev); + if (IS_ERR(svsp)) + return PTR_ERR(svsp); + + if (!svs_is_efuse_data_correct(svsp)) { + dev_notice(svsp->dev, "efuse data isn't correct\n"); + ret = -EPERM; + goto svs_probe_free_resource; + } + + ret = svs_bank_resource_setup(svsp); + if (ret) { + dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret); + goto svs_probe_free_resource; + } + + svsp_irq = irq_of_parse_and_map(svsp->dev->of_node, 0); + ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr, + svsp->irqflags | IRQF_ONESHOT, + svsp->name, svsp); + if (ret) { + dev_err(svsp->dev, "register irq(%d) failed: %d\n", + svsp_irq, ret); + goto svs_probe_free_resource; + } + + svsp->main_clk = devm_clk_get(svsp->dev, "main"); + if (IS_ERR(svsp->main_clk)) { + dev_err(svsp->dev, "failed to get clock: %ld\n", + PTR_ERR(svsp->main_clk)); + ret = PTR_ERR(svsp->main_clk); + goto svs_probe_free_resource; + } + + ret = clk_prepare_enable(svsp->main_clk); + if (ret) { + dev_err(svsp->dev, "cannot enable main clk: %d\n", ret); + goto svs_probe_free_resource; + } + + svsp->base = of_iomap(svsp->dev->of_node, 0); + if (IS_ERR_OR_NULL(svsp->base)) { + dev_err(svsp->dev, "cannot find svs register base\n"); + ret = -EINVAL; + goto svs_probe_clk_disable; + } + + ret = svs_start(svsp); + if (ret) { + dev_err(svsp->dev, "svs start fail: %d\n", ret); + goto svs_probe_iounmap; + } + + ret = svs_create_debug_cmds(svsp); + if (ret) { + dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret); + goto svs_probe_iounmap; + } + + return 0; + +svs_probe_iounmap: + iounmap(svsp->base); + +svs_probe_clk_disable: + clk_disable_unprepare(svsp->main_clk); + +svs_probe_free_resource: + if (!IS_ERR_OR_NULL(svsp->efuse)) + kfree(svsp->efuse); + if (!IS_ERR_OR_NULL(svsp->tefuse)) + kfree(svsp->tefuse); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(svs_pm_ops, svs_suspend, svs_resume); + +static struct platform_driver svs_driver = { + .probe = svs_probe, + .driver = { + .name = "mtk-svs", + .pm = &svs_pm_ops, + .of_match_table = of_match_ptr(svs_of_match), + }, +}; + +module_platform_driver(svs_driver); + +MODULE_AUTHOR("Roger Lu <roger.lu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek SVS driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index e718b8735444..e0d7a5459562 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -129,7 +129,10 @@ config QCOM_RPMHPD config QCOM_RPMPD tristate "Qualcomm RPM Power domain driver" + depends on PM depends on QCOM_SMD_RPM + select PM_GENERIC_DOMAINS + select PM_GENERIC_DOMAINS_OF help QCOM RPM Power domain driver to support power-domains with performance states. The driver communicates a performance state @@ -228,4 +231,19 @@ config QCOM_APR application processor and QDSP6. APR is used by audio driver to configure QDSP6 ASM, ADM and AFE modules. + +config QCOM_ICC_BWMON + tristate "QCOM Interconnect Bandwidth Monitor driver" + depends on ARCH_QCOM || COMPILE_TEST + select PM_OPP + help + Sets up driver monitoring bandwidth on various interconnects and + based on that voting for interconnect bandwidth, adjusting their + speed to current demand. + Current implementation brings support for BWMON v4, used for example + on SDM845 to measure bandwidth between CPU (gladiator_noc) and Last + Level Cache (memnoc). Usage of this BWMON allows to remove some of + the fixed bandwidth votes from cpufreq (CPU nodes) thus achieve high + memory throughput even with lower CPU frequencies. + endmenu diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 70d5de69fd7b..d66604aff2b0 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o +obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index 3caabd873322..b4046f393575 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -377,17 +377,14 @@ static int apr_device_probe(struct device *dev) static void apr_device_remove(struct device *dev) { struct apr_device *adev = to_apr_device(dev); - struct apr_driver *adrv; + struct apr_driver *adrv = to_apr_driver(dev->driver); struct packet_router *apr = dev_get_drvdata(adev->dev.parent); - if (dev->driver) { - adrv = to_apr_driver(dev->driver); - if (adrv->remove) - adrv->remove(adev); - spin_lock(&apr->svcs_lock); - idr_remove(&apr->svcs_idr, adev->svc.id); - spin_unlock(&apr->svcs_lock); - } + if (adrv->remove) + adrv->remove(adev); + spin_lock(&apr->svcs_lock); + idr_remove(&apr->svcs_idr, adev->svc.id); + spin_unlock(&apr->svcs_lock); } static int apr_uevent(struct device *dev, struct kobj_uevent_env *env) diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c index dd872017f345..629a7188b576 100644 --- a/drivers/soc/qcom/cmd-db.c +++ b/drivers/soc/qcom/cmd-db.c @@ -141,13 +141,17 @@ static int cmd_db_get_header(const char *id, const struct entry_header **eh, const struct rsc_hdr *rsc_hdr; const struct entry_header *ent; int ret, i, j; - u8 query[8]; + u8 query[sizeof(ent->id)] __nonstring; ret = cmd_db_ready(); if (ret) return ret; - /* Pad out query string to same length as in DB */ + /* + * Pad out query string to same length as in DB. NOTE: the output + * query string is not necessarily '\0' terminated if it bumps up + * against the max size. That's OK and expected. + */ strncpy(query, id, sizeof(query)); for (i = 0; i < MAX_SLV_ID; i++) { diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c new file mode 100644 index 000000000000..7f8aca533cd3 --- /dev/null +++ b/drivers/soc/qcom/icc-bwmon.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2021-2022 Linaro Ltd + * Author: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, based on + * previous work of Thara Gopinath and msm-4.9 downstream sources. + */ +#include <linux/interconnect.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/sizes.h> + +/* + * The BWMON samples data throughput within 'sample_ms' time. With three + * configurable thresholds (Low, Medium and High) gives four windows (called + * zones) of current bandwidth: + * + * Zone 0: byte count < THRES_LO + * Zone 1: THRES_LO < byte count < THRES_MED + * Zone 2: THRES_MED < byte count < THRES_HIGH + * Zone 3: THRES_HIGH < byte count + * + * Zones 0 and 2 are not used by this driver. + */ + +/* Internal sampling clock frequency */ +#define HW_TIMER_HZ 19200000 + +#define BWMON_GLOBAL_IRQ_STATUS 0x0 +#define BWMON_GLOBAL_IRQ_CLEAR 0x8 +#define BWMON_GLOBAL_IRQ_ENABLE 0xc +#define BWMON_GLOBAL_IRQ_ENABLE_ENABLE BIT(0) + +#define BWMON_IRQ_STATUS 0x100 +#define BWMON_IRQ_STATUS_ZONE_SHIFT 4 +#define BWMON_IRQ_CLEAR 0x108 +#define BWMON_IRQ_ENABLE 0x10c +#define BWMON_IRQ_ENABLE_ZONE1_SHIFT 5 +#define BWMON_IRQ_ENABLE_ZONE2_SHIFT 6 +#define BWMON_IRQ_ENABLE_ZONE3_SHIFT 7 +#define BWMON_IRQ_ENABLE_MASK (BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT) | \ + BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT)) + +#define BWMON_ENABLE 0x2a0 +#define BWMON_ENABLE_ENABLE BIT(0) + +#define BWMON_CLEAR 0x2a4 +#define BWMON_CLEAR_CLEAR BIT(0) + +#define BWMON_SAMPLE_WINDOW 0x2a8 +#define BWMON_THRESHOLD_HIGH 0x2ac +#define BWMON_THRESHOLD_MED 0x2b0 +#define BWMON_THRESHOLD_LOW 0x2b4 + +#define BWMON_ZONE_ACTIONS 0x2b8 +/* + * Actions to perform on some zone 'z' when current zone hits the threshold: + * Increment counter of zone 'z' + */ +#define BWMON_ZONE_ACTIONS_INCREMENT(z) (0x2 << ((z) * 2)) +/* Clear counter of zone 'z' */ +#define BWMON_ZONE_ACTIONS_CLEAR(z) (0x1 << ((z) * 2)) + +/* Zone 0 threshold hit: Clear zone count */ +#define BWMON_ZONE_ACTIONS_ZONE0 (BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 1 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE1 (BWMON_ZONE_ACTIONS_INCREMENT(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 2 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE2 (BWMON_ZONE_ACTIONS_INCREMENT(2) | \ + BWMON_ZONE_ACTIONS_CLEAR(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 3 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE3 (BWMON_ZONE_ACTIONS_INCREMENT(3) | \ + BWMON_ZONE_ACTIONS_CLEAR(2) | \ + BWMON_ZONE_ACTIONS_CLEAR(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) +/* Value for BWMON_ZONE_ACTIONS */ +#define BWMON_ZONE_ACTIONS_DEFAULT (BWMON_ZONE_ACTIONS_ZONE0 | \ + BWMON_ZONE_ACTIONS_ZONE1 << 8 | \ + BWMON_ZONE_ACTIONS_ZONE2 << 16 | \ + BWMON_ZONE_ACTIONS_ZONE3 << 24) + +/* + * There is no clear documentation/explanation of BWMON_THRESHOLD_COUNT + * register. Based on observations, this is number of times one threshold has to + * be reached, to trigger interrupt in given zone. + * + * 0xff are maximum values meant to ignore the zones 0 and 2. + */ +#define BWMON_THRESHOLD_COUNT 0x2bc +#define BWMON_THRESHOLD_COUNT_ZONE1_SHIFT 8 +#define BWMON_THRESHOLD_COUNT_ZONE2_SHIFT 16 +#define BWMON_THRESHOLD_COUNT_ZONE3_SHIFT 24 +#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff +#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff + +/* BWMONv4 count registers use count unit of 64 kB */ +#define BWMON_COUNT_UNIT_KB 64 +#define BWMON_ZONE_COUNT 0x2d8 +#define BWMON_ZONE_MAX(zone) (0x2e0 + 4 * (zone)) + +struct icc_bwmon_data { + unsigned int sample_ms; + unsigned int default_highbw_kbps; + unsigned int default_medbw_kbps; + unsigned int default_lowbw_kbps; + u8 zone1_thres_count; + u8 zone3_thres_count; +}; + +struct icc_bwmon { + struct device *dev; + void __iomem *base; + int irq; + + unsigned int default_lowbw_kbps; + unsigned int sample_ms; + unsigned int max_bw_kbps; + unsigned int min_bw_kbps; + unsigned int target_kbps; + unsigned int current_kbps; +}; + +static void bwmon_clear_counters(struct icc_bwmon *bwmon) +{ + /* + * Clear counters. The order and barriers are + * important. Quoting downstream Qualcomm msm-4.9 tree: + * + * The counter clear and IRQ clear bits are not in the same 4KB + * region. So, we need to make sure the counter clear is completed + * before we try to clear the IRQ or do any other counter operations. + */ + writel(BWMON_CLEAR_CLEAR, bwmon->base + BWMON_CLEAR); +} + +static void bwmon_clear_irq(struct icc_bwmon *bwmon) +{ + /* + * Clear zone and global interrupts. The order and barriers are + * important. Quoting downstream Qualcomm msm-4.9 tree: + * + * Synchronize the local interrupt clear in mon_irq_clear() + * with the global interrupt clear here. Otherwise, the CPU + * may reorder the two writes and clear the global interrupt + * before the local interrupt, causing the global interrupt + * to be retriggered by the local interrupt still being high. + * + * Similarly, because the global registers are in a different + * region than the local registers, we need to ensure any register + * writes to enable the monitor after this call are ordered with the + * clearing here so that local writes don't happen before the + * interrupt is cleared. + */ + writel(BWMON_IRQ_ENABLE_MASK, bwmon->base + BWMON_IRQ_CLEAR); + writel(BIT(0), bwmon->base + BWMON_GLOBAL_IRQ_CLEAR); +} + +static void bwmon_disable(struct icc_bwmon *bwmon) +{ + /* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */ + writel(0x0, bwmon->base + BWMON_GLOBAL_IRQ_ENABLE); + writel(0x0, bwmon->base + BWMON_IRQ_ENABLE); + + /* + * Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious + * IRQ. + */ + writel(0x0, bwmon->base + BWMON_ENABLE); +} + +static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable) +{ + /* Enable interrupts */ + writel(BWMON_GLOBAL_IRQ_ENABLE_ENABLE, + bwmon->base + BWMON_GLOBAL_IRQ_ENABLE); + writel(irq_enable, bwmon->base + BWMON_IRQ_ENABLE); + + /* Enable bwmon */ + writel(BWMON_ENABLE_ENABLE, bwmon->base + BWMON_ENABLE); +} + +static unsigned int bwmon_kbps_to_count(unsigned int kbps) +{ + return kbps / BWMON_COUNT_UNIT_KB; +} + +static void bwmon_set_threshold(struct icc_bwmon *bwmon, unsigned int reg, + unsigned int kbps) +{ + unsigned int thres; + + thres = mult_frac(bwmon_kbps_to_count(kbps), bwmon->sample_ms, + MSEC_PER_SEC); + writel_relaxed(thres, bwmon->base + reg); +} + +static void bwmon_start(struct icc_bwmon *bwmon, + const struct icc_bwmon_data *data) +{ + unsigned int thres_count; + int window; + + bwmon_clear_counters(bwmon); + + window = mult_frac(bwmon->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC); + /* Maximum sampling window: 0xfffff */ + writel_relaxed(window, bwmon->base + BWMON_SAMPLE_WINDOW); + + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, + data->default_highbw_kbps); + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, + data->default_medbw_kbps); + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_LOW, + data->default_lowbw_kbps); + + thres_count = data->zone3_thres_count << BWMON_THRESHOLD_COUNT_ZONE3_SHIFT | + BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT << BWMON_THRESHOLD_COUNT_ZONE2_SHIFT | + data->zone1_thres_count << BWMON_THRESHOLD_COUNT_ZONE1_SHIFT | + BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT; + writel_relaxed(thres_count, bwmon->base + BWMON_THRESHOLD_COUNT); + writel_relaxed(BWMON_ZONE_ACTIONS_DEFAULT, + bwmon->base + BWMON_ZONE_ACTIONS); + /* Write barriers in bwmon_clear_irq() */ + + bwmon_clear_irq(bwmon); + bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK); +} + +static irqreturn_t bwmon_intr(int irq, void *dev_id) +{ + struct icc_bwmon *bwmon = dev_id; + unsigned int status, max; + int zone; + + status = readl(bwmon->base + BWMON_IRQ_STATUS); + status &= BWMON_IRQ_ENABLE_MASK; + if (!status) { + /* + * Only zone 1 and zone 3 interrupts are enabled but zone 2 + * threshold could be hit and trigger interrupt even if not + * enabled. + * Such spurious interrupt might come with valuable max count or + * not, so solution would be to always check all + * BWMON_ZONE_MAX() registers to find the highest value. + * Such case is currently ignored. + */ + return IRQ_NONE; + } + + bwmon_disable(bwmon); + + zone = get_bitmask_order(status >> BWMON_IRQ_STATUS_ZONE_SHIFT) - 1; + /* + * Zone max bytes count register returns count units within sampling + * window. Downstream kernel for BWMONv4 (called BWMON type 2 in + * downstream) always increments the max bytes count by one. + */ + max = readl(bwmon->base + BWMON_ZONE_MAX(zone)) + 1; + max *= BWMON_COUNT_UNIT_KB; + bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->sample_ms); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t bwmon_intr_thread(int irq, void *dev_id) +{ + struct icc_bwmon *bwmon = dev_id; + unsigned int irq_enable = 0; + struct dev_pm_opp *opp, *target_opp; + unsigned int bw_kbps, up_kbps, down_kbps; + + bw_kbps = bwmon->target_kbps; + + target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0); + if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE) + target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0); + + bwmon->target_kbps = bw_kbps; + + bw_kbps--; + opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0); + if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE) + down_kbps = bwmon->target_kbps; + else + down_kbps = bw_kbps; + + up_kbps = bwmon->target_kbps + 1; + + if (bwmon->target_kbps >= bwmon->max_bw_kbps) + irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT); + else if (bwmon->target_kbps <= bwmon->min_bw_kbps) + irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT); + else + irq_enable = BWMON_IRQ_ENABLE_MASK; + + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, up_kbps); + bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, down_kbps); + /* Write barriers in bwmon_clear_counters() */ + bwmon_clear_counters(bwmon); + bwmon_clear_irq(bwmon); + bwmon_enable(bwmon, irq_enable); + + if (bwmon->target_kbps == bwmon->current_kbps) + goto out; + + dev_pm_opp_set_opp(bwmon->dev, target_opp); + bwmon->current_kbps = bwmon->target_kbps; + +out: + dev_pm_opp_put(target_opp); + if (!IS_ERR(opp)) + dev_pm_opp_put(opp); + + return IRQ_HANDLED; +} + +static int bwmon_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dev_pm_opp *opp; + struct icc_bwmon *bwmon; + const struct icc_bwmon_data *data; + int ret; + + bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL); + if (!bwmon) + return -ENOMEM; + + data = of_device_get_match_data(dev); + + bwmon->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(bwmon->base)) { + dev_err(dev, "failed to map bwmon registers\n"); + return PTR_ERR(bwmon->base); + } + + bwmon->irq = platform_get_irq(pdev, 0); + if (bwmon->irq < 0) + return bwmon->irq; + + ret = devm_pm_opp_of_add_table(dev); + if (ret) + return dev_err_probe(dev, ret, "failed to add OPP table\n"); + + bwmon->max_bw_kbps = UINT_MAX; + opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); + if (IS_ERR(opp)) + return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n"); + + bwmon->min_bw_kbps = 0; + opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); + if (IS_ERR(opp)) + return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n"); + + bwmon->sample_ms = data->sample_ms; + bwmon->default_lowbw_kbps = data->default_lowbw_kbps; + bwmon->dev = dev; + + bwmon_disable(bwmon); + ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr, + bwmon_intr_thread, + IRQF_ONESHOT, dev_name(dev), bwmon); + if (ret) + return dev_err_probe(dev, ret, "failed to request IRQ\n"); + + platform_set_drvdata(pdev, bwmon); + bwmon_start(bwmon, data); + + return 0; +} + +static int bwmon_remove(struct platform_device *pdev) +{ + struct icc_bwmon *bwmon = platform_get_drvdata(pdev); + + bwmon_disable(bwmon); + + return 0; +} + +/* BWMON v4 */ +static const struct icc_bwmon_data msm8998_bwmon_data = { + .sample_ms = 4, + .default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */ + .default_medbw_kbps = 512 * 1024, /* 512 MBps */ + .default_lowbw_kbps = 0, + .zone1_thres_count = 16, + .zone3_thres_count = 1, +}; + +static const struct of_device_id bwmon_of_match[] = { + { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data }, + {} +}; +MODULE_DEVICE_TABLE(of, bwmon_of_match); + +static struct platform_driver bwmon_driver = { + .probe = bwmon_probe, + .remove = bwmon_remove, + .driver = { + .name = "qcom-bwmon", + .of_match_table = bwmon_of_match, + }, +}; +module_platform_driver(bwmon_driver); + +MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>"); +MODULE_DESCRIPTION("QCOM BWMON driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 4b143cf7b4ce..38d7296315a2 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -382,7 +382,7 @@ static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; * llcc_slice_getd - get llcc slice descriptor * @uid: usecase_id for the client * - * A pointer to llcc slice descriptor will be returned on success and + * A pointer to llcc slice descriptor will be returned on success * and error pointer is returned on failure */ struct llcc_slice_desc *llcc_slice_getd(u32 uid) diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 366db493579b..3f11554df2f3 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -108,6 +108,8 @@ EXPORT_SYMBOL_GPL(qcom_mdt_get_size); * qcom_mdt_read_metadata() - read header and metadata from mdt or mbn * @fw: firmware of mdt header or mbn * @data_len: length of the read metadata blob + * @fw_name: name of the firmware, for construction of segment file names + * @dev: device handle to associate resources with * * The mechanism that performs the authentication of the loading firmware * expects an ELF header directly followed by the segment of hashes, with no @@ -192,7 +194,7 @@ EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata); * qcom_mdt_pas_init() - initialize PAS region for firmware loading * @dev: device handle to associate resources with * @fw: firmware object for the mdt file - * @firmware: name of the firmware, for construction of segment file names + * @fw_name: name of the firmware, for construction of segment file names * @pas_id: PAS identifier * @mem_phys: physical address of allocated memory region * @ctx: PAS metadata context, to be released by caller diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index 97fd24c178f8..c92d26b73e6f 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -194,14 +194,17 @@ struct ocmem *of_get_ocmem(struct device *dev) devnode = of_parse_phandle(dev->of_node, "sram", 0); if (!devnode || !devnode->parent) { dev_err(dev, "Cannot look up sram phandle\n"); + of_node_put(devnode); return ERR_PTR(-ENODEV); } pdev = of_find_device_by_node(devnode->parent); if (!pdev) { dev_err(dev, "Cannot find device node %s\n", devnode->name); + of_node_put(devnode); return ERR_PTR(-EPROBE_DEFER); } + of_node_put(devnode); ocmem = platform_get_drvdata(pdev); if (!ocmem) { diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index a59bb34e5eba..18c856056475 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -399,8 +399,10 @@ static int qmp_cooling_devices_register(struct qmp *qmp) continue; ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], child); - if (ret) + if (ret) { + of_node_put(child); goto unroll; + } } if (!count) diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c index 05fff8691ee3..092f6ab09acf 100644 --- a/drivers/soc/qcom/rpmhpd.c +++ b/drivers/soc/qcom/rpmhpd.c @@ -23,8 +23,8 @@ /** * struct rpmhpd - top level RPMh power domain resource data structure * @dev: rpmh power domain controller device - * @pd: generic_pm_domain corrresponding to the power domain - * @parent: generic_pm_domain corrresponding to the parent's power domain + * @pd: generic_pm_domain corresponding to the power domain + * @parent: generic_pm_domain corresponding to the parent's power domain * @peer: A peer power domain in case Active only Voting is * supported * @active_only: True if it represents an Active only peer diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index 3b5b91621532..5803038c744e 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -453,6 +453,7 @@ static const struct rpmpd_desc qcm2290_desc = { static const struct of_device_id rpmpd_match_table[] = { { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc }, { .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc }, + { .compatible = "qcom,msm8909-rpmpd", .data = &msm8916_desc }, { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc }, { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc }, { .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc }, diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index 30dda1af63c8..413f9f4ae9cd 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -234,6 +234,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = { { .compatible = "qcom,rpm-apq8084" }, { .compatible = "qcom,rpm-ipq6018" }, { .compatible = "qcom,rpm-msm8226" }, + { .compatible = "qcom,rpm-msm8909" }, { .compatible = "qcom,rpm-msm8916" }, { .compatible = "qcom,rpm-msm8936" }, { .compatible = "qcom,rpm-msm8953" }, diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 3e95835653ea..4f163d62942c 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -926,7 +926,7 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host) struct smem_partition_header *header; struct smem_ptable_entry *entry; struct smem_ptable *ptable; - unsigned int remote_host; + u16 remote_host; u16 host0, host1; int i; @@ -951,12 +951,12 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host) continue; if (remote_host >= SMEM_HOST_COUNT) { - dev_err(smem->dev, "bad host %hu\n", remote_host); + dev_err(smem->dev, "bad host %u\n", remote_host); return -EINVAL; } if (smem->partitions[remote_host].virt_base) { - dev_err(smem->dev, "duplicate host %hu\n", remote_host); + dev_err(smem->dev, "duplicate host %u\n", remote_host); return -EINVAL; } diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index 59dbf4b61e6c..d9c28a8a7cbf 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -119,6 +119,9 @@ struct smp2p_entry { * @out: pointer to the outbound smem item * @smem_items: ids of the two smem items * @valid_entries: already scanned inbound entries + * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled + * @ssr_ack: current cached state of the local ack bit + * @negotiation_done: whether negotiating finished * @local_pid: processor id of the inbound edge * @remote_pid: processor id of the outbound edge * @ipc_regmap: regmap for the outbound ipc diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index cee579a267a6..4554fb8655d3 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -328,10 +328,12 @@ static const struct soc_id soc_id[] = { { 455, "QRB5165" }, { 457, "SM8450" }, { 459, "SM7225" }, - { 460, "SA8540P" }, + { 460, "SA8295P" }, + { 461, "SA8540P" }, { 480, "SM8450" }, { 482, "SM8450" }, { 487, "SC7280" }, + { 495, "SC7180P" }, }; static const char *socinfo_machine(struct device *dev, unsigned int id) diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index f831420b7fd4..484b42b7454e 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -74,6 +74,18 @@ static const u16 spm_reg_offset_v3_0[SPM_REG_NR] = { [SPM_REG_SEQ_ENTRY] = 0x400, }; +/* SPM register data for 8909 */ +static const struct spm_reg_data spm_reg_8909_cpu = { + .reg_offset = spm_reg_offset_v3_0, + .spm_cfg = 0x1, + .spm_dly = 0x3C102800, + .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90, + 0x5B, 0x60, 0x03, 0x60, 0x76, 0x76, 0x0B, 0x94, 0x5B, 0x80, + 0x10, 0x26, 0x30, 0x0F }, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 5, +}; + /* SPM register data for 8916 */ static const struct spm_reg_data spm_reg_8916_cpu = { .reg_offset = spm_reg_offset_v3_0, @@ -195,6 +207,8 @@ static const struct of_device_id spm_match_table[] = { .data = &spm_reg_660_silver_l2 }, { .compatible = "qcom,msm8226-saw2-v2.1-cpu", .data = &spm_reg_8226_cpu }, + { .compatible = "qcom,msm8909-saw2-v3.0-cpu", + .data = &spm_reg_8909_cpu }, { .compatible = "qcom,msm8916-saw2-v3.0-cpu", .data = &spm_reg_8916_cpu }, { .compatible = "qcom,msm8974-saw2-v2.1-cpu", diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c index fdfc857df334..04f1bc322ae7 100644 --- a/drivers/soc/renesas/r8a779a0-sysc.c +++ b/drivers/soc/renesas/r8a779a0-sysc.c @@ -57,11 +57,11 @@ static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = { { "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR }, { "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR }, { "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR }, - { "a2dp1", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR }, - { "a2cv2", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR }, - { "a2cv3", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR }, - { "a2cv5", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR }, - { "a2cv7", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR }, + { "a2dp1", R8A779A0_PD_A2DP1, R8A779A0_PD_A3IR }, + { "a2cv2", R8A779A0_PD_A2CV2, R8A779A0_PD_A3IR }, + { "a2cv3", R8A779A0_PD_A2CV3, R8A779A0_PD_A3IR }, + { "a2cv5", R8A779A0_PD_A2CV5, R8A779A0_PD_A3IR }, + { "a2cv7", R8A779A0_PD_A2CV7, R8A779A0_PD_A3IR }, { "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR }, { "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 }, { "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 }, diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h index fe2d98254754..388cfa8f8f9f 100644 --- a/drivers/soc/renesas/rcar-gen4-sysc.h +++ b/drivers/soc/renesas/rcar-gen4-sysc.h @@ -25,8 +25,8 @@ struct rcar_gen4_sysc_area { const char *name; u8 pdr; /* PDRn */ - int parent; /* -1 if none */ - unsigned int flags; /* See PD_* */ + s8 parent; /* -1 if none */ + u8 flags; /* See PD_* */ }; /* diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h index 8d861c1cfdf7..266c599a0a9b 100644 --- a/drivers/soc/renesas/rcar-sysc.h +++ b/drivers/soc/renesas/rcar-sysc.h @@ -31,8 +31,8 @@ struct rcar_sysc_area { u16 chan_offs; /* Offset of PWRSR register for this area */ u8 chan_bit; /* Bit in PWR* (except for PWRUP in PWRSR) */ u8 isr_bit; /* Bit in SYSCI*R */ - int parent; /* -1 if none */ - unsigned int flags; /* See PD_* */ + s8 parent; /* -1 if none */ + u8 flags; /* See PD_* */ }; diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig index 1fef0e711056..8aecbc9b1976 100644 --- a/drivers/soc/sunxi/Kconfig +++ b/drivers/soc/sunxi/Kconfig @@ -6,6 +6,7 @@ config SUNXI_MBUS bool default ARCH_SUNXI + depends on ARM || ARM64 help Say y to enable the fixups needed to support the Allwinner MBUS DMA quirks. diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c index 0e4ba0f89533..6882c86b3ce5 100644 --- a/drivers/soc/ti/pruss.c +++ b/drivers/soc/ti/pruss.c @@ -338,6 +338,7 @@ static const struct of_device_id pruss_of_match[] = { { .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, }, { .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, }, { .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, }, + { .compatible = "ti,am625-pruss", .data = &am65x_j721e_pruss_data, }, {}, }; MODULE_DEVICE_TABLE(of, pruss_of_match); diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 0076d467ff6b..343c58ed5896 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -688,7 +688,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) &m3_ipc->sd_fw_name); if (ret) { dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n"); - }; + } /* * Wait for firmware loading completion in a thread so we diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 5dcb7665fe22..2de082765bef 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -647,8 +647,7 @@ static int xlnx_event_manager_probe(struct platform_device *pdev) cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting", xlnx_event_cpuhp_start, xlnx_event_cpuhp_down); - ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num, - 0, NULL); + ret = zynqmp_pm_register_sgi(sgi_num, 0); if (ret) { dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret); xlnx_event_cleanup_sgi(pdev); @@ -681,7 +680,7 @@ static int xlnx_event_manager_remove(struct platform_device *pdev) kfree(eve_data); } - ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, 0, 1, NULL); + ret = zynqmp_pm_register_sgi(0, 1); if (ret) dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3b1044ebc400..35ce57878b27 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -183,7 +183,7 @@ config SPI_BCM63XX config SPI_BCM63XX_HSSPI tristate "Broadcom BCM63XX HS SPI controller driver" - depends on BCM63XX || BMIPS_GENERIC || ARCH_BCM_63XX || COMPILE_TEST + depends on BCM63XX || BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST help This enables support for the High Speed SPI controller present on newer Broadcom BCM63XX SoCs. diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index cba6a4486c24..efdcbe6c4c26 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -33,6 +33,7 @@ #define AMD_SPI_RX_COUNT_REG 0x4B #define AMD_SPI_STATUS_REG 0x4C +#define AMD_SPI_FIFO_SIZE 70 #define AMD_SPI_MEM_SIZE 200 /* M_CMD OP codes for SPI */ @@ -270,6 +271,11 @@ static int amd_spi_master_transfer(struct spi_master *master, return 0; } +static size_t amd_spi_max_transfer_size(struct spi_device *spi) +{ + return AMD_SPI_FIFO_SIZE; +} + static int amd_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -302,6 +308,8 @@ static int amd_spi_probe(struct platform_device *pdev) master->flags = SPI_MASTER_HALF_DUPLEX; master->setup = amd_spi_master_setup; master->transfer_one_message = amd_spi_master_transfer; + master->max_transfer_size = amd_spi_max_transfer_size; + master->max_message_size = amd_spi_max_transfer_size; /* Register the controller with SPI framework */ err = devm_spi_register_master(dev, master); diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c index 496f3e1e9079..3e891bf22470 100644 --- a/drivers/spi/spi-aspeed-smc.c +++ b/drivers/spi/spi-aspeed-smc.c @@ -558,6 +558,14 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc) u32 ctl_val; int ret = 0; + dev_dbg(aspi->dev, + "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n", + chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write", + desc->info.offset, desc->info.offset + desc->info.length, + op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, + op->dummy.buswidth, op->data.buswidth, + op->addr.nbytes, op->dummy.nbytes); + chip->clk_freq = desc->mem->spi->max_speed_hz; /* Only for reads */ @@ -574,9 +582,11 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc) ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK; ctl_val |= aspeed_spi_get_io_mode(op) | op->cmd.opcode << CTRL_COMMAND_SHIFT | - CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth) | CTRL_IO_MODE_READ; + if (op->dummy.nbytes) + ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth); + /* Tune 4BYTE address mode */ if (op->addr.nbytes) { u32 addr_mode = readl(aspi->regs + CE_CTRL_REG); diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 775c0bf2f923..0933948d7df3 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -1138,10 +1138,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr, struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); /* if an error occurred and we have an active dma, then terminate */ - dmaengine_terminate_sync(ctlr->dma_tx); - bs->tx_dma_active = false; - dmaengine_terminate_sync(ctlr->dma_rx); - bs->rx_dma_active = false; + if (ctlr->dma_tx) { + dmaengine_terminate_sync(ctlr->dma_tx); + bs->tx_dma_active = false; + } + if (ctlr->dma_rx) { + dmaengine_terminate_sync(ctlr->dma_rx); + bs->rx_dma_active = false; + } bcm2835_spi_undo_prologue(bs); /* and reset */ diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 2b9fc8449a62..72b1a5a2298c 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1578,8 +1578,7 @@ static int cqspi_probe(struct platform_device *pdev) ret = cqspi_of_get_pdata(cqspi); if (ret) { dev_err(dev, "Cannot get mandatory OF data.\n"); - ret = -ENODEV; - goto probe_master_put; + return -ENODEV; } /* Obtain QSPI clock. */ @@ -1587,7 +1586,7 @@ static int cqspi_probe(struct platform_device *pdev) if (IS_ERR(cqspi->clk)) { dev_err(dev, "Cannot claim QSPI clock.\n"); ret = PTR_ERR(cqspi->clk); - goto probe_master_put; + return ret; } /* Obtain and remap controller address. */ @@ -1596,7 +1595,7 @@ static int cqspi_probe(struct platform_device *pdev) if (IS_ERR(cqspi->iobase)) { dev_err(dev, "Cannot remap controller address.\n"); ret = PTR_ERR(cqspi->iobase); - goto probe_master_put; + return ret; } /* Obtain and remap AHB address. */ @@ -1605,7 +1604,7 @@ static int cqspi_probe(struct platform_device *pdev) if (IS_ERR(cqspi->ahb_base)) { dev_err(dev, "Cannot remap AHB address.\n"); ret = PTR_ERR(cqspi->ahb_base); - goto probe_master_put; + return ret; } cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start; cqspi->ahb_size = resource_size(res_ahb); @@ -1614,15 +1613,13 @@ static int cqspi_probe(struct platform_device *pdev) /* Obtain IRQ line. */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - ret = -ENXIO; - goto probe_master_put; - } + if (irq < 0) + return -ENXIO; pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) - goto probe_master_put; + return ret; ret = clk_prepare_enable(cqspi->clk); if (ret) { @@ -1716,8 +1713,6 @@ probe_reset_failed: probe_clk_failed: pm_runtime_put_sync(dev); pm_runtime_disable(dev); -probe_master_put: - spi_master_put(master); return ret; } diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 31d778e9d255..6a7f7df1e776 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -69,7 +69,7 @@ #define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */ #define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */ #define CDNS_SPI_SS0 0x1 /* Slave Select zero */ -#define CDNS_SPI_NOSS 0x3C /* No Slave select */ +#define CDNS_SPI_NOSS 0xF /* No Slave select */ /* * SPI Interrupt Registers bit Masks diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 7a014eeec2d0..411b1307b7fd 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -613,6 +613,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, rspi->dma_callbacked, HZ); if (ret > 0 && rspi->dma_callbacked) { ret = 0; + if (tx) + dmaengine_synchronize(rspi->ctlr->dma_tx); + if (rx) + dmaengine_synchronize(rspi->ctlr->dma_rx); } else { if (!ret) { dev_err(&rspi->ctlr->dev, "DMA timeout\n"); diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 33844526c797..02fdef7a16c8 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -2632,7 +2632,7 @@ static void hfa384x_usbctlx_reaper_task(struct work_struct *work) */ static void hfa384x_usbctlx_completion_task(struct work_struct *work) { - struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh); + struct hfa384x *hw = container_of(work, struct hfa384x, completion_bh); struct hfa384x_usbctlx *ctlx, *temp; unsigned long flags; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index e68f1cc8ef98..6c8d8b051bfd 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -448,6 +448,9 @@ fd_execute_write_same(struct se_cmd *cmd) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } + if (!cmd->t_data_nents) + return TCM_INVALID_CDB_FIELD; + if (cmd->t_data_nents > 1 || cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) { pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 378c80313a0f..1ed9381751e6 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -494,6 +494,10 @@ iblock_execute_write_same(struct se_cmd *cmd) " backends not supported\n"); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } + + if (!cmd->t_data_nents) + return TCM_INVALID_CDB_FIELD; + sg = &cmd->t_data_sg[0]; if (cmd->t_data_nents > 1 || diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index ca1b2312d6e7..f6132836eb38 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -312,6 +312,12 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op pr_warn("WRITE SAME with ANCHOR not supported\n"); return TCM_INVALID_CDB_FIELD; } + + if (flags & 0x01) { + pr_warn("WRITE SAME with NDOB not supported\n"); + return TCM_INVALID_CDB_FIELD; + } + /* * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting * translated into block discard requests within backend code. diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index c60896cf71cb..73b5e7760d10 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -189,7 +189,7 @@ struct optee_smc_call_get_os_revision_result { * Have config return register usage: * a0 OPTEE_SMC_RETURN_OK * a1 Physical address of start of SHM - * a2 Size of of SHM + * a2 Size of SHM * a3 Cache settings of memory, as defined by the * OPTEE_SMC_SHM_* values above * a4-7 Preserved diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 385cb0aee610..a1c1fa1a9c28 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -884,8 +884,8 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx, rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs); - if (IS_ERR(arg)) - return PTR_ERR(arg); + if (IS_ERR(rpc_arg)) + return PTR_ERR(rpc_arg); } if (rpc_arg && tee_shm_is_dynamic(shm)) { diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index af0f7c603fa4..98da206cd761 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -1073,7 +1073,7 @@ EXPORT_SYMBOL_GPL(tee_device_unregister); /** * tee_get_drvdata() - Return driver_data pointer * @teedev: Device containing the driver_data pointer - * @returns the driver_data pointer supplied to tee_register(). + * @returns the driver_data pointer supplied to tee_device_alloc(). */ void *tee_get_drvdata(struct tee_device *teedev) { diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index b8151d95a806..b263b0fde03c 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -137,11 +137,9 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, int cpu_idx) { - unsigned long max = arch_scale_cpu_capacity(cpu); - unsigned long util; + unsigned long util = sched_cpu_util(cpu); - util = sched_cpu_util(cpu, max); - return (util * 100) / max; + return (util * 100) / arch_scale_cpu_capacity(cpu); } #else /* !CONFIG_SMP */ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c index cd80c7db4073..a9596e7562ea 100644 --- a/drivers/thermal/intel/intel_tcc_cooling.c +++ b/drivers/thermal/intel/intel_tcc_cooling.c @@ -81,6 +81,7 @@ static const struct x86_cpu_id tcc_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), {} }; diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 43eb25b167bc..ccdf8a24ddc7 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -399,6 +399,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = { .compatible = "renesas,r8a779a0-thermal", .data = &rcar_gen3_ths_tj_1, }, + { + .compatible = "renesas,r8a779f0-thermal", + .data = &rcar_gen3_ths_tj_1, + }, {}, }; MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids); diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 74bfabe5b453..752dab3356d7 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -111,21 +111,11 @@ static void pty_unthrottle(struct tty_struct *tty) static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) { struct tty_struct *to = tty->link; - unsigned long flags; - if (tty->flow.stopped) + if (tty->flow.stopped || !c) return 0; - if (c > 0) { - spin_lock_irqsave(&to->port->lock, flags); - /* Stuff the data into the input queue of the other end */ - c = tty_insert_flip_string(to->port, buf, c); - spin_unlock_irqrestore(&to->port->lock, flags); - /* And shovel */ - if (c) - tty_flip_buffer_push(to->port); - } - return c; + return tty_insert_flip_string_and_push_buffer(to->port, buf, c); } /** diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index cfbd2de0ca6e..3f56dbc9432b 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -23,6 +23,7 @@ #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/tty.h> #include <linux/ratelimit.h> #include <linux/tty_flip.h> @@ -559,6 +560,9 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev) up->port.dev = dev; + if (uart_console_enabled(&up->port)) + pm_runtime_get_sync(up->port.dev); + serial8250_apply_quirks(up); uart_add_one_port(drv, &up->port); } diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index 7133fceed35e..a8dba4a0a8fb 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -106,10 +106,10 @@ int serial8250_tx_dma(struct uart_8250_port *p) UART_XMIT_SIZE, DMA_TO_DEVICE); dma_async_issue_pending(dma->txchan); - if (dma->tx_err) { + serial8250_clear_THRI(p); + if (dma->tx_err) dma->tx_err = 0; - serial8250_clear_THRI(p); - } + return 0; err: dma->tx_err = 1; diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index f57bbd32ef11..bb6aca07ab56 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -47,7 +47,7 @@ #define RZN1_UART_xDMACR_DMA_EN BIT(0) #define RZN1_UART_xDMACR_1_WORD_BURST (0 << 1) #define RZN1_UART_xDMACR_4_WORD_BURST (1 << 1) -#define RZN1_UART_xDMACR_8_WORD_BURST (3 << 1) +#define RZN1_UART_xDMACR_8_WORD_BURST (2 << 1) #define RZN1_UART_xDMACR_BLK_SZ(x) ((x) << 3) /* Quirks */ @@ -773,18 +773,18 @@ static const struct of_device_id dw8250_of_match[] = { MODULE_DEVICE_TABLE(of, dw8250_of_match); static const struct acpi_device_id dw8250_acpi_match[] = { - { "INT33C4", 0 }, - { "INT33C5", 0 }, - { "INT3434", 0 }, - { "INT3435", 0 }, - { "80860F0A", 0 }, - { "8086228A", 0 }, - { "APMC0D08", 0}, - { "AMD0020", 0 }, - { "AMDI0020", 0 }, - { "AMDI0022", 0 }, - { "BRCM2032", 0 }, - { "HISI0031", 0 }, + { "80860F0A", (kernel_ulong_t)&dw8250_dw_apb }, + { "8086228A", (kernel_ulong_t)&dw8250_dw_apb }, + { "AMD0020", (kernel_ulong_t)&dw8250_dw_apb }, + { "AMDI0020", (kernel_ulong_t)&dw8250_dw_apb }, + { "AMDI0022", (kernel_ulong_t)&dw8250_dw_apb }, + { "APMC0D08", (kernel_ulong_t)&dw8250_dw_apb}, + { "BRCM2032", (kernel_ulong_t)&dw8250_dw_apb }, + { "HISI0031", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT33C4", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT3434", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT3435", (kernel_ulong_t)&dw8250_dw_apb }, { }, }; MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 8f32fe9e149e..3c36a06a20b0 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1949,7 +1949,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) if ((status & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) { if (!up->dma || up->dma->tx_err) serial8250_tx_chars(up); - else + else if (!up->dma->tx_running) __stop_tx(up); } @@ -2975,8 +2975,10 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) case UPIO_MEM32BE: case UPIO_MEM16: case UPIO_MEM: - if (!port->mapbase) + if (!port->mapbase) { + ret = -EINVAL; break; + } if (!request_mem_region(port->mapbase, size, "serial")) { ret = -EBUSY; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index a452748c69b2..7172cd1792df 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1099,8 +1099,8 @@ config SERIAL_TIMBERDALE config SERIAL_BCM63XX tristate "Broadcom BCM63xx/BCM33xx UART support" select SERIAL_CORE - depends on ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST - default ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC + depends on ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST + default ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC help This enables the driver for the onchip UART core found on the following chipsets: diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 97ef41cb2721..16a21422ddce 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1367,6 +1367,15 @@ static void pl011_stop_rx(struct uart_port *port) pl011_dma_rx_stop(uap); } +static void pl011_throttle_rx(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + pl011_stop_rx(port); + spin_unlock_irqrestore(&port->lock, flags); +} + static void pl011_enable_ms(struct uart_port *port) { struct uart_amba_port *uap = @@ -1788,9 +1797,10 @@ static int pl011_allocate_irq(struct uart_amba_port *uap) */ static void pl011_enable_interrupts(struct uart_amba_port *uap) { + unsigned long flags; unsigned int i; - spin_lock_irq(&uap->port.lock); + spin_lock_irqsave(&uap->port.lock, flags); /* Clear out any spuriously appearing RX interrupts */ pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); @@ -1812,7 +1822,14 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) if (!pl011_dma_rx_running(uap)) uap->im |= UART011_RXIM; pl011_write(uap->im, uap, REG_IMSC); - spin_unlock_irq(&uap->port.lock); + spin_unlock_irqrestore(&uap->port.lock, flags); +} + +static void pl011_unthrottle_rx(struct uart_port *port) +{ + struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + + pl011_enable_interrupts(uap); } static int pl011_startup(struct uart_port *port) @@ -2225,6 +2242,8 @@ static const struct uart_ops amba_pl011_pops = { .stop_tx = pl011_stop_tx, .start_tx = pl011_start_tx, .stop_rx = pl011_stop_rx, + .throttle = pl011_throttle_rx, + .unthrottle = pl011_unthrottle_rx, .enable_ms = pl011_enable_ms, .break_ctl = pl011_break_ctl, .startup = pl011_startup, diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 0429c2a54290..93489fe334d0 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -470,14 +470,14 @@ static void mvebu_uart_shutdown(struct uart_port *port) } } -static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud) +static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud) { unsigned int d_divisor, m_divisor; unsigned long flags; u32 brdv, osamp; if (!port->uartclk) - return -EOPNOTSUPP; + return 0; /* * The baudrate is derived from the UART clock thanks to divisors: @@ -548,7 +548,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud) (m_divisor << 16) | (m_divisor << 24); writel(osamp, port->membase + UART_OSAMP); - return 0; + return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor); } static void mvebu_uart_set_termios(struct uart_port *port, @@ -587,15 +587,11 @@ static void mvebu_uart_set_termios(struct uart_port *port, max_baud = port->uartclk / 80; baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud); - if (mvebu_uart_baud_rate_set(port, baud)) { - /* No clock available, baudrate cannot be changed */ - if (old) - baud = uart_get_baud_rate(port, old, NULL, - min_baud, max_baud); - } else { - tty_termios_encode_baud_rate(termios, baud, baud); - uart_update_timeout(port, termios->c_cflag, baud); - } + baud = mvebu_uart_baud_rate_set(port, baud); + + /* In case baudrate cannot be changed, report previous old value */ + if (baud == 0 && old) + baud = tty_termios_baud_rate(old); /* Only the following flag changes are supported */ if (old) { @@ -606,6 +602,11 @@ static void mvebu_uart_set_termios(struct uart_port *port, termios->c_cflag |= CS8; } + if (baud != 0) { + tty_termios_encode_baud_rate(termios, baud, baud); + uart_update_timeout(port, termios->c_cflag, baud); + } + spin_unlock_irqrestore(&port->lock, flags); } diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index d5ca904def34..1afe47b62ad5 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -377,8 +377,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport) /* Enable tx dma mode */ ucon = rd_regl(port, S3C2410_UCON); ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK); - ucon |= (dma_get_cache_alignment() >= 16) ? - S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1; + ucon |= S3C64XX_UCON_TXBURST_1; ucon |= S3C64XX_UCON_TXMODE_DMA; wr_regl(port, S3C2410_UCON, ucon); @@ -674,7 +673,7 @@ static void enable_rx_dma(struct s3c24xx_uart_port *ourport) S3C64XX_UCON_DMASUS_EN | S3C64XX_UCON_TIMEOUT_EN | S3C64XX_UCON_RXMODE_MASK); - ucon |= S3C64XX_UCON_RXBURST_16 | + ucon |= S3C64XX_UCON_RXBURST_1 | 0xf << S3C64XX_UCON_TIMEOUT_SHIFT | S3C64XX_UCON_EMPTYINT_EN | S3C64XX_UCON_TIMEOUT_EN | diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 338ebadfd44b..3dc926d6c00a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1941,11 +1941,6 @@ static int uart_proc_show(struct seq_file *m, void *v) } #endif -static inline bool uart_console_enabled(struct uart_port *port) -{ - return uart_console(port) && (port->cons->flags & CON_ENABLED); -} - static void uart_port_spin_lock_init(struct uart_port *port) { spin_lock_init(&port->lock); diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index b7b44f4050d4..0973b03eeeaa 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -72,6 +72,8 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, *cr3 |= USART_CR3_DEM; over8 = *cr1 & USART_CR1_OVER8; + *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); + if (over8) rs485_deat_dedt = delay_ADE * baud * 8; else diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h index b710c5ef89ab..f310a8274df1 100644 --- a/drivers/tty/tty.h +++ b/drivers/tty/tty.h @@ -111,4 +111,7 @@ static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *); +int tty_insert_flip_string_and_push_buffer(struct tty_port *port, + const unsigned char *chars, size_t cnt); + #endif diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index bfa431a8e690..595d8b49c745 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -532,6 +532,15 @@ static void flush_to_ldisc(struct work_struct *work) } +static inline void tty_flip_buffer_commit(struct tty_buffer *tail) +{ + /* + * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees + * buffer data. + */ + smp_store_release(&tail->commit, tail->used); +} + /** * tty_flip_buffer_push - push terminal buffers * @port: tty port to push @@ -546,16 +555,43 @@ void tty_flip_buffer_push(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; - /* - * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees - * buffer data. - */ - smp_store_release(&buf->tail->commit, buf->tail->used); + tty_flip_buffer_commit(buf->tail); queue_work(system_unbound_wq, &buf->work); } EXPORT_SYMBOL(tty_flip_buffer_push); /** + * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and + * push + * @port: tty port + * @chars: characters + * @size: size + * + * The function combines tty_insert_flip_string() and tty_flip_buffer_push() + * with the exception of properly holding the @port->lock. + * + * To be used only internally (by pty currently). + * + * Returns: the number added. + */ +int tty_insert_flip_string_and_push_buffer(struct tty_port *port, + const unsigned char *chars, size_t size) +{ + struct tty_bufhead *buf = &port->buf; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + size = tty_insert_flip_string(port, chars, size); + if (size) + tty_flip_buffer_commit(buf->tail); + spin_unlock_irqrestore(&port->lock, flags); + + queue_work(system_unbound_wq, &buf->work); + + return size; +} + +/** * tty_buffer_init - prepare a tty buffer structure * @port: tty port to initialise * diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index f8c87c4d7399..dfc1f4b445f3 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -855,7 +855,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr) unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_delete(vc, nr); - scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2); + scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2); scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index ce86d1b790c0..3d367be71728 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -2953,37 +2953,59 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int max_timeout) { - int err = 0; - unsigned long time_left; + unsigned long time_left = msecs_to_jiffies(max_timeout); unsigned long flags; + bool pending; + int err; +retry: time_left = wait_for_completion_timeout(hba->dev_cmd.complete, - msecs_to_jiffies(max_timeout)); + time_left); - spin_lock_irqsave(hba->host->host_lock, flags); - hba->dev_cmd.complete = NULL; if (likely(time_left)) { + /* + * The completion handler called complete() and the caller of + * this function still owns the @lrbp tag so the code below does + * not trigger any race conditions. + */ + hba->dev_cmd.complete = NULL; err = ufshcd_get_tr_ocs(lrbp); if (!err) err = ufshcd_dev_cmd_completion(hba, lrbp); - } - spin_unlock_irqrestore(hba->host->host_lock, flags); - - if (!time_left) { + } else { err = -ETIMEDOUT; dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", __func__, lrbp->task_tag); - if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag)) + if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) { /* successfully cleared the command, retry if needed */ err = -EAGAIN; - /* - * in case of an error, after clearing the doorbell, - * we also need to clear the outstanding_request - * field in hba - */ - spin_lock_irqsave(&hba->outstanding_lock, flags); - __clear_bit(lrbp->task_tag, &hba->outstanding_reqs); - spin_unlock_irqrestore(&hba->outstanding_lock, flags); + /* + * Since clearing the command succeeded we also need to + * clear the task tag bit from the outstanding_reqs + * variable. + */ + spin_lock_irqsave(&hba->outstanding_lock, flags); + pending = test_bit(lrbp->task_tag, + &hba->outstanding_reqs); + if (pending) { + hba->dev_cmd.complete = NULL; + __clear_bit(lrbp->task_tag, + &hba->outstanding_reqs); + } + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (!pending) { + /* + * The completion handler ran while we tried to + * clear the command. + */ + time_left = 1; + goto retry; + } + } else { + dev_err(hba->dev, "%s: failed to clear tag %d\n", + __func__, lrbp->task_tag); + } } return err; @@ -5738,7 +5760,7 @@ int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable) } hba->dev_info.wb_enabled = enable; - dev_info(hba->dev, "%s Write Booster %s\n", + dev_dbg(hba->dev, "%s Write Booster %s\n", __func__, enable ? "enabled" : "disabled"); return ret; @@ -7253,7 +7275,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) hba->silence_err_logs = false; /* scale up clocks to max frequency before full reinitialization */ - ufshcd_set_clk_freq(hba, true); + ufshcd_scale_clks(hba, true); err = ufshcd_hba_enable(hba); diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c index e7332cc65b1f..173aea8e9997 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.c +++ b/drivers/ufs/host/ufshcd-pltfrm.c @@ -108,9 +108,20 @@ out: return ret; } +static bool phandle_exists(const struct device_node *np, + const char *phandle_name, int index) +{ + struct device_node *parse_np = of_parse_phandle(np, phandle_name, index); + + if (parse_np) + of_node_put(parse_np); + + return parse_np != NULL; +} + #define MAX_PROP_SIZE 32 static int ufshcd_populate_vreg(struct device *dev, const char *name, - struct ufs_vreg **out_vreg) + struct ufs_vreg **out_vreg) { char prop_name[MAX_PROP_SIZE]; struct ufs_vreg *vreg = NULL; @@ -122,7 +133,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, } snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name); - if (!of_parse_phandle(np, prop_name, 0)) { + if (!phandle_exists(np, prop_name, 0)) { dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n", __func__, prop_name); goto out; diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c index fea7aca35dc8..173cf3579c55 100644 --- a/drivers/usb/dwc3/dwc3-am62.c +++ b/drivers/usb/dwc3/dwc3-am62.c @@ -195,8 +195,7 @@ static int dwc3_ti_probe(struct platform_device *pdev) if (i == ARRAY_SIZE(dwc3_ti_rate_table)) { dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate); - ret = -EINVAL; - goto err_clk_disable; + return -EINVAL; } data->rate_code = i; @@ -204,7 +203,7 @@ static int dwc3_ti_probe(struct platform_device *pdev) /* Read the syscon property and set the rate code */ ret = phy_syscon_pll_refclk(data); if (ret) - goto err_clk_disable; + return ret; /* VBUS divider select */ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider"); @@ -245,8 +244,6 @@ err_pm_disable: clk_disable_unprepare(data->usb2_refclk); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); -err_clk_disable: - clk_put(data->usb2_refclk); return ret; } @@ -276,7 +273,6 @@ static int dwc3_ti_remove(struct platform_device *pdev) pm_runtime_disable(dev); pm_runtime_set_suspended(dev); - clk_put(data->usb2_refclk); platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8716bece1072..0d89dfa6eef5 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4249,7 +4249,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) } evt->count = 0; - evt->flags &= ~DWC3_EVENT_PENDING; ret = IRQ_HANDLED; /* Unmask interrupt */ @@ -4261,6 +4260,9 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); } + /* Keep the clearing of DWC3_EVENT_PENDING at the end */ + evt->flags &= ~DWC3_EVENT_PENDING; + return ret; } diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index e5a6b6e36b3d..4303a3283ba0 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -2371,6 +2371,7 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\ const char *page, size_t len) \ { \ struct f_uvc_opts *opts = to_f_uvc_opts(item); \ + int size = min(sizeof(opts->aname), len + 1); \ int ret = 0; \ \ mutex_lock(&opts->lock); \ @@ -2379,8 +2380,9 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\ goto end; \ } \ \ - ret = snprintf(opts->aname, min(sizeof(opts->aname), len), \ - "%s", page); \ + ret = strscpy(opts->aname, page, size); \ + if (ret == -E2BIG) \ + ret = size - 1; \ \ end: \ mutex_unlock(&opts->lock); \ diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 385be30baad3..896c0d107f72 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -76,14 +76,9 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev) return -ENODEV; } - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(&pdev->dev, - "Found HC with no IRQ. Check %s setup!\n", - dev_name(&pdev->dev)); - return -ENODEV; - } - irq = res->start; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; hcd = __usb_create_hcd(&fsl_ehci_hc_driver, pdev->dev.parent, &pdev->dev, dev_name(&pdev->dev), NULL); diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c index 44a7e58a26e3..e5df17522892 100644 --- a/drivers/usb/host/fsl-mph-dr-of.c +++ b/drivers/usb/host/fsl-mph-dr-of.c @@ -112,6 +112,9 @@ static struct platform_device *fsl_usb2_device_register( goto error; } + pdev->dev.of_node = ofdev->dev.of_node; + pdev->dev.of_node_reused = true; + retval = platform_device_add(pdev); if (retval) goto error; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b440d338a895..d5a3986dfee7 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1023,6 +1023,9 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, + /* Belimo Automation devices */ + { USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) }, + { USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) }, /* ICP DAS I-756xU devices */ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index d1a9564697a4..4e92c165c86b 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1569,6 +1569,12 @@ #define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */ /* + * Belimo Automation + */ +#define BELIMO_ZTH_PID 0x8050 +#define BELIMO_ZIP_PID 0xC811 + +/* * Unjo AB */ #define UNJO_VID 0x22B7 diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index ee0e520707dd..c4724750c81a 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1718,6 +1718,7 @@ void typec_set_pwr_opmode(struct typec_port *port, partner->usb_pd = 1; sysfs_notify(&partner_dev->kobj, NULL, "supports_usb_power_delivery"); + kobject_uevent(&partner_dev->kobj, KOBJ_CHANGE); } put_device(partner_dev); } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 1b6d46b86f81..e85c1d71f4ed 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1962,6 +1962,8 @@ static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_c struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); ndev->event_cbs[idx] = *cb; + if (is_ctrl_vq_idx(mvdev, idx)) + mvdev->cvq.event_cb = *cb; } static void mlx5_cvq_notify(struct vringh *vring) @@ -2174,7 +2176,6 @@ static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev) { struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); - struct mlx5_control_vq *cvq = &mvdev->cvq; int err; int i; @@ -2184,16 +2185,6 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev) goto err_vq; } - if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) { - err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, - MLX5_CVQ_MAX_ENT, false, - (struct vring_desc *)(uintptr_t)cvq->desc_addr, - (struct vring_avail *)(uintptr_t)cvq->driver_addr, - (struct vring_used *)(uintptr_t)cvq->device_addr); - if (err) - goto err_vq; - } - return 0; err_vq: @@ -2466,6 +2457,21 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) ndev->mvdev.cvq.ready = false; } +static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_control_vq *cvq = &mvdev->cvq; + int err = 0; + + if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) + err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, + MLX5_CVQ_MAX_ENT, false, + (struct vring_desc *)(uintptr_t)cvq->desc_addr, + (struct vring_avail *)(uintptr_t)cvq->driver_addr, + (struct vring_used *)(uintptr_t)cvq->device_addr); + + return err; +} + static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) { struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); @@ -2478,6 +2484,11 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { if (status & VIRTIO_CONFIG_S_DRIVER_OK) { + err = setup_cvq_vring(mvdev); + if (err) { + mlx5_vdpa_warn(mvdev, "failed to setup control VQ vring\n"); + goto err_setup; + } err = setup_driver(mvdev); if (err) { mlx5_vdpa_warn(mvdev, "failed to setup driver\n"); diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 776ad7496f53..3bc27de58f46 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1476,16 +1476,12 @@ static char *vduse_devnode(struct device *dev, umode_t *mode) return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev)); } -static void vduse_mgmtdev_release(struct device *dev) -{ -} - -static struct device vduse_mgmtdev = { - .init_name = "vduse", - .release = vduse_mgmtdev_release, +struct vduse_mgmt_dev { + struct vdpa_mgmt_dev mgmt_dev; + struct device dev; }; -static struct vdpa_mgmt_dev mgmt_dev; +static struct vduse_mgmt_dev *vduse_mgmt; static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name) { @@ -1510,7 +1506,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name) } set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops); vdev->vdpa.dma_dev = &vdev->vdpa.dev; - vdev->vdpa.mdev = &mgmt_dev; + vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev; return 0; } @@ -1556,34 +1552,52 @@ static struct virtio_device_id id_table[] = { { 0 }, }; -static struct vdpa_mgmt_dev mgmt_dev = { - .device = &vduse_mgmtdev, - .id_table = id_table, - .ops = &vdpa_dev_mgmtdev_ops, -}; +static void vduse_mgmtdev_release(struct device *dev) +{ + struct vduse_mgmt_dev *mgmt_dev; + + mgmt_dev = container_of(dev, struct vduse_mgmt_dev, dev); + kfree(mgmt_dev); +} static int vduse_mgmtdev_init(void) { int ret; - ret = device_register(&vduse_mgmtdev); - if (ret) + vduse_mgmt = kzalloc(sizeof(*vduse_mgmt), GFP_KERNEL); + if (!vduse_mgmt) + return -ENOMEM; + + ret = dev_set_name(&vduse_mgmt->dev, "vduse"); + if (ret) { + kfree(vduse_mgmt); return ret; + } - ret = vdpa_mgmtdev_register(&mgmt_dev); + vduse_mgmt->dev.release = vduse_mgmtdev_release; + + ret = device_register(&vduse_mgmt->dev); if (ret) - goto err; + goto dev_reg_err; - return 0; -err: - device_unregister(&vduse_mgmtdev); + vduse_mgmt->mgmt_dev.id_table = id_table; + vduse_mgmt->mgmt_dev.ops = &vdpa_dev_mgmtdev_ops; + vduse_mgmt->mgmt_dev.device = &vduse_mgmt->dev; + ret = vdpa_mgmtdev_register(&vduse_mgmt->mgmt_dev); + if (ret) + device_unregister(&vduse_mgmt->dev); + + return ret; + +dev_reg_err: + put_device(&vduse_mgmt->dev); return ret; } static void vduse_mgmtdev_exit(void) { - vdpa_mgmtdev_unregister(&mgmt_dev); - device_unregister(&vduse_mgmtdev); + vdpa_mgmtdev_unregister(&vduse_mgmt->mgmt_dev); + device_unregister(&vduse_mgmt->dev); } static int vduse_init(void) diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 61e71c1154be..e60b06f2ac22 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -549,6 +549,16 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev) if (!iommu_group) return ERR_PTR(-EINVAL); + /* + * VFIO always sets IOMMU_CACHE because we offer no way for userspace to + * restore cache coherency. It has to be checked here because it is only + * valid for cases where we are using iommu groups. + */ + if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) { + iommu_group_put(iommu_group); + return ERR_PTR(-EINVAL); + } + group = vfio_group_get_from_iommu(iommu_group); if (!group) group = vfio_create_group(iommu_group, VFIO_IOMMU); @@ -601,13 +611,6 @@ static int __vfio_register_dev(struct vfio_device *device, int vfio_register_group_dev(struct vfio_device *device) { - /* - * VFIO always sets IOMMU_CACHE because we offer no way for userspace to - * restore cache coherency. - */ - if (!iommu_capable(device->dev->bus, IOMMU_CAP_CACHE_COHERENCY)) - return -EINVAL; - return __vfio_register_dev(device, vfio_group_find_or_alloc(device->dev)); } diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 5ad2596c6e8a..23dcbfdfa13b 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1209,7 +1209,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) vhost_dev_stop(&v->vdev); vhost_vdpa_free_domain(v); vhost_vdpa_config_put(v); - vhost_dev_cleanup(&v->vdev); + vhost_vdpa_cleanup(v); mutex_unlock(&d->mutex); atomic_dec(&v->opened); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index c4e91715ef00..1a9aa12cf886 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2469,6 +2469,11 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, if (charcount != 256 && charcount != 512) return -EINVAL; + /* font bigger than screen resolution ? */ + if (w > FBCON_SWAP(info->var.rotate, info->var.xres, info->var.yres) || + h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres)) + return -EINVAL; + /* Make sure drawing engine can handle the font */ if (!(info->pixmap.blit_x & (1 << (font->width - 1))) || !(info->pixmap.blit_y & (1 << (font->height - 1)))) @@ -2731,6 +2736,34 @@ void fbcon_update_vcs(struct fb_info *info, bool all) } EXPORT_SYMBOL(fbcon_update_vcs); +/* let fbcon check if it supports a new screen resolution */ +int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct vc_data *vc; + unsigned int i; + + WARN_CONSOLE_UNLOCKED(); + + if (!ops) + return 0; + + /* prevent setting a screen size which is smaller than font size */ + for (i = first_fb_vc; i <= last_fb_vc; i++) { + vc = vc_cons[i].d; + if (!vc || vc->vc_mode != KD_TEXT || + fbcon_info_from_console(i) != info) + continue; + + if (vc->vc_font.width > FBCON_SWAP(var->rotate, var->xres, var->yres) || + vc->vc_font.height > FBCON_SWAP(var->rotate, var->yres, var->xres)) + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(fbcon_modechange_possible); + int fbcon_mode_deleted(struct fb_info *info, struct fb_videomode *mode) { diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index afa2863670f3..7ee6eb2fa715 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/major.h> #include <linux/slab.h> +#include <linux/sysfb.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/vt.h> @@ -510,7 +511,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, while (n && (n * (logo->width + 8) - 8 > xres)) --n; - image.dx = (xres - n * (logo->width + 8) - 8) / 2; + image.dx = (xres - (n * (logo->width + 8) - 8)) / 2; image.dy = y ?: (yres - logo->height) / 2; } else { image.dx = 0; @@ -1016,6 +1017,16 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) if (ret) return ret; + /* verify that virtual resolution >= physical resolution */ + if (var->xres_virtual < var->xres || + var->yres_virtual < var->yres) { + pr_warn("WARNING: fbcon: Driver '%s' missed to adjust virtual screen size (%ux%u vs. %ux%u)\n", + info->fix.id, + var->xres_virtual, var->yres_virtual, + var->xres, var->yres); + return -EINVAL; + } + if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_NOW) return 0; @@ -1106,7 +1117,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, return -EFAULT; console_lock(); lock_fb_info(info); - ret = fb_set_var(info, &var); + ret = fbcon_modechange_possible(info, &var); + if (!ret) + ret = fb_set_var(info, &var); if (!ret) fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); unlock_fb_info(info); @@ -1752,6 +1765,17 @@ int remove_conflicting_framebuffers(struct apertures_struct *a, do_free = true; } + /* + * If a driver asked to unregister a platform device registered by + * sysfb, then can be assumed that this is a driver for a display + * that is set up by the system firmware and has a generic driver. + * + * Drivers for devices that don't have a generic driver will never + * ask for this, so let's assume that a real driver for the display + * was already probed and prevent sysfb to register devices later. + */ + sysfb_disable(); + mutex_lock(®istration_lock); do_remove_conflicting_framebuffers(a, name, primary); mutex_unlock(®istration_lock); diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index 90ce16b6e05f..f422f9c58ba7 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct snp_guest_dev *snp_dev; struct miscdevice *misc; + void __iomem *mapping; int ret; if (!dev->platform_data) return -ENODEV; data = (struct sev_guest_platform_data *)dev->platform_data; - layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); - if (!layout) + mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); + if (!mapping) return -ENODEV; + layout = (__force void *)mapping; + ret = -ENOMEM; snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL); if (!snp_dev) @@ -706,7 +709,7 @@ e_free_response: e_free_request: free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); e_unmap: - iounmap(layout); + iounmap(mapping); return ret; } diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index a6dc8b5846fe..e1556d2a355a 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -29,6 +29,19 @@ menuconfig VIRTIO_MENU if VIRTIO_MENU +config VIRTIO_HARDEN_NOTIFICATION + bool "Harden virtio notification" + help + Enable this to harden the device notifications and suppress + those that happen at a time where notifications are illegal. + + Experimental: Note that several drivers still have bugs that + may cause crashes or hangs when correct handling of + notifications is enforced; depending on the subset of + drivers and devices you use, this may or may not work. + + If unsure, say N. + config VIRTIO_PCI tristate "PCI driver for virtio devices" depends on PCI diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 6bace84ae37e..7deeed30d1f3 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -219,6 +219,7 @@ static int virtio_features_ok(struct virtio_device *dev) * */ void virtio_reset_device(struct virtio_device *dev) { +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION /* * The below virtio_synchronize_cbs() guarantees that any * interrupt for this line arriving after @@ -227,6 +228,7 @@ void virtio_reset_device(struct virtio_device *dev) */ virtio_break_device(dev); virtio_synchronize_cbs(dev); +#endif dev->config->reset(dev); } diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index c9bec3813e94..083ff1eb743d 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -62,6 +62,7 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/pm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/virtio.h> @@ -556,6 +557,28 @@ static const struct virtio_config_ops virtio_mmio_config_ops = { .synchronize_cbs = vm_synchronize_cbs, }; +#ifdef CONFIG_PM_SLEEP +static int virtio_mmio_freeze(struct device *dev) +{ + struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev); + + return virtio_device_freeze(&vm_dev->vdev); +} + +static int virtio_mmio_restore(struct device *dev) +{ + struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev); + + if (vm_dev->version == 1) + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); + + return virtio_device_restore(&vm_dev->vdev); +} + +static const struct dev_pm_ops virtio_mmio_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore) +}; +#endif static void virtio_mmio_release_dev(struct device *_d) { @@ -799,6 +822,9 @@ static struct platform_driver virtio_mmio_driver = { .name = "virtio-mmio", .of_match_table = virtio_mmio_match, .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match), +#ifdef CONFIG_PM_SLEEP + .pm = &virtio_mmio_pm_ops, +#endif }, }; diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index b790f30b2b56..fa2a9445bb18 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -220,8 +220,6 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev) check_offsets(); - mdev->pci_dev = pci_dev; - /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) return -ENODEV; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 13a7348cedff..643ca779fcc6 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -111,7 +111,12 @@ struct vring_virtqueue { /* Number we've added since last sync. */ unsigned int num_added; - /* Last used index we've seen. */ + /* Last used index we've seen. + * for split ring, it just contains last used index + * for packed ring: + * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index. + * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter. + */ u16 last_used_idx; /* Hint for event idx: already triggered no need to disable. */ @@ -154,9 +159,6 @@ struct vring_virtqueue { /* Driver ring wrap counter. */ bool avail_wrap_counter; - /* Device ring wrap counter. */ - bool used_wrap_counter; - /* Avail used flags. */ u16 avail_used_flags; @@ -933,7 +935,7 @@ static struct virtqueue *vring_create_virtqueue_split( for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { queue = vring_alloc_queue(vdev, vring_size(num, vring_align), &dma_addr, - GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (queue) break; if (!may_reduce_num) @@ -973,6 +975,15 @@ static struct virtqueue *vring_create_virtqueue_split( /* * Packed ring specific functions - *_packed(). */ +static inline bool packed_used_wrap_counter(u16 last_used_idx) +{ + return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR)); +} + +static inline u16 packed_last_used(u16 last_used_idx) +{ + return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR)); +} static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, struct vring_desc_extra *extra) @@ -1406,8 +1417,14 @@ static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, static inline bool more_used_packed(const struct vring_virtqueue *vq) { - return is_used_desc_packed(vq, vq->last_used_idx, - vq->packed.used_wrap_counter); + u16 last_used; + u16 last_used_idx; + bool used_wrap_counter; + + last_used_idx = READ_ONCE(vq->last_used_idx); + last_used = packed_last_used(last_used_idx); + used_wrap_counter = packed_used_wrap_counter(last_used_idx); + return is_used_desc_packed(vq, last_used, used_wrap_counter); } static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, @@ -1415,7 +1432,8 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, void **ctx) { struct vring_virtqueue *vq = to_vvq(_vq); - u16 last_used, id; + u16 last_used, id, last_used_idx; + bool used_wrap_counter; void *ret; START_USE(vq); @@ -1434,7 +1452,9 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, /* Only get used elements after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); - last_used = vq->last_used_idx; + last_used_idx = READ_ONCE(vq->last_used_idx); + used_wrap_counter = packed_used_wrap_counter(last_used_idx); + last_used = packed_last_used(last_used_idx); id = le16_to_cpu(vq->packed.vring.desc[last_used].id); *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); @@ -1451,12 +1471,15 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, ret = vq->packed.desc_state[id].data; detach_buf_packed(vq, id, ctx); - vq->last_used_idx += vq->packed.desc_state[id].num; - if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) { - vq->last_used_idx -= vq->packed.vring.num; - vq->packed.used_wrap_counter ^= 1; + last_used += vq->packed.desc_state[id].num; + if (unlikely(last_used >= vq->packed.vring.num)) { + last_used -= vq->packed.vring.num; + used_wrap_counter ^= 1; } + last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); + WRITE_ONCE(vq->last_used_idx, last_used); + /* * If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before @@ -1465,9 +1488,7 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) virtio_store_mb(vq->weak_barriers, &vq->packed.vring.driver->off_wrap, - cpu_to_le16(vq->last_used_idx | - (vq->packed.used_wrap_counter << - VRING_PACKED_EVENT_F_WRAP_CTR))); + cpu_to_le16(vq->last_used_idx)); LAST_ADD_TIME_INVALID(vq); @@ -1499,9 +1520,7 @@ static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) if (vq->event) { vq->packed.vring.driver->off_wrap = - cpu_to_le16(vq->last_used_idx | - (vq->packed.used_wrap_counter << - VRING_PACKED_EVENT_F_WRAP_CTR)); + cpu_to_le16(vq->last_used_idx); /* * We need to update event offset and event wrap * counter first before updating event flags. @@ -1518,8 +1537,7 @@ static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) } END_USE(vq); - return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter << - VRING_PACKED_EVENT_F_WRAP_CTR); + return vq->last_used_idx; } static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap) @@ -1537,7 +1555,7 @@ static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap) static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); - u16 used_idx, wrap_counter; + u16 used_idx, wrap_counter, last_used_idx; u16 bufs; START_USE(vq); @@ -1550,9 +1568,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) if (vq->event) { /* TODO: tune this threshold */ bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; - wrap_counter = vq->packed.used_wrap_counter; + last_used_idx = READ_ONCE(vq->last_used_idx); + wrap_counter = packed_used_wrap_counter(last_used_idx); - used_idx = vq->last_used_idx + bufs; + used_idx = packed_last_used(last_used_idx) + bufs; if (used_idx >= vq->packed.vring.num) { used_idx -= vq->packed.vring.num; wrap_counter ^= 1; @@ -1582,9 +1601,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) */ virtio_mb(vq->weak_barriers); - if (is_used_desc_packed(vq, - vq->last_used_idx, - vq->packed.used_wrap_counter)) { + last_used_idx = READ_ONCE(vq->last_used_idx); + wrap_counter = packed_used_wrap_counter(last_used_idx); + used_idx = packed_last_used(last_used_idx); + if (is_used_desc_packed(vq, used_idx, wrap_counter)) { END_USE(vq); return false; } @@ -1688,8 +1708,12 @@ static struct virtqueue *vring_create_virtqueue_packed( vq->we_own_ring = true; vq->notify = notify; vq->weak_barriers = weak_barriers; +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION vq->broken = true; - vq->last_used_idx = 0; +#else + vq->broken = false; +#endif + vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); vq->event_triggered = false; vq->num_added = 0; vq->packed_ring = true; @@ -1720,7 +1744,6 @@ static struct virtqueue *vring_create_virtqueue_packed( vq->packed.next_avail_idx = 0; vq->packed.avail_wrap_counter = 1; - vq->packed.used_wrap_counter = 1; vq->packed.event_flags_shadow = 0; vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; @@ -2135,9 +2158,13 @@ irqreturn_t vring_interrupt(int irq, void *_vq) } if (unlikely(vq->broken)) { +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION dev_warn_once(&vq->vq.vdev->dev, "virtio vring IRQ raised before DRIVER_OK"); return IRQ_NONE; +#else + return IRQ_HANDLED; +#endif } /* Just a hint for performance: so it's ok that this can be racy! */ @@ -2180,7 +2207,11 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, vq->we_own_ring = false; vq->notify = notify; vq->weak_barriers = weak_barriers; +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION vq->broken = true; +#else + vq->broken = false; +#endif vq->last_used_idx = 0; vq->event_triggered = false; vq->num_added = 0; diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 46d9295d9a6e..5e8321f43cbd 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -528,9 +528,10 @@ static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu, BUG_ON(irq == -1); if (IS_ENABLED(CONFIG_SMP) && force_affinity) { - cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); - cpumask_copy(irq_get_effective_affinity_mask(irq), - cpumask_of(cpu)); + struct irq_data *data = irq_get_irq_data(irq); + + irq_data_update_affinity(data, cpumask_of(cpu)); + irq_data_update_effective_affinity(data, cpumask_of(cpu)); } xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 4b56c39f766d..84b143eef395 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -396,13 +396,15 @@ static void __unmap_grant_pages_done(int result, unsigned int offset = data->unmap_ops - map->unmap_ops; for (i = 0; i < data->count; i++) { - WARN_ON(map->unmap_ops[offset+i].status); + WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay && + map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE); pr_debug("unmap handle=%d st=%d\n", map->unmap_ops[offset+i].handle, map->unmap_ops[offset+i].status); map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; if (use_ptemod) { - WARN_ON(map->kunmap_ops[offset+i].status); + WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay && + map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE); pr_debug("kunmap handle=%u st=%d\n", map->kunmap_ops[offset+i].handle, map->kunmap_ops[offset+i].status); |