diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-21 00:40:54 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-21 00:40:54 +0100 |
commit | 14d0e1a09fe97a7524ff36baa695900cb0c10c23 (patch) | |
tree | accca02adb7453b2cec02cb9435b2678c243d6aa /drivers/firmware | |
parent | Merge tag 'soc-dt-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc (diff) | |
parent | Merge tag 'soc_fsl-6.13-1' of https://github.com/chleroy/linux into soc/drivers (diff) | |
download | linux-14d0e1a09fe97a7524ff36baa695900cb0c10c23.tar.xz linux-14d0e1a09fe97a7524ff36baa695900cb0c10c23.zip |
Merge tag 'soc-drivers-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull SoC driver updates from Arnd Bergmann:
"Nothing particular important in the SoC driver updates, just the usual
improvements to for drivers/soc and a couple of subsystems that don't
fit anywhere else:
- The largest set of updates is for Qualcomm SoC drivers, extending
the set of supported features for additional SoCs in the QSEECOM,
LLCC and socinfo drivers.a
- The ti_sci firmware driver gains support for power managment
- The drivers/reset subsystem sees a rework of the microchip sparx5
and amlogic reset drivers to support additional chips, plus a few
minor updates on other platforms
- The SCMI firmware interface driver gains support for two protocol
extensions, allowing more flexible use of the shared memory area
and new DT binding properties for configurability.
- Mediatek SoC drivers gain support for power managment on the MT8188
SoC and a new driver for DVFS.
- The AMD/Xilinx ZynqMP SoC drivers gain support for system reboot
and a few bugfixes
- The Hisilicon Kunpeng HCCS driver gains support for configuring
lanes through sysfs
Finally, there are cleanups and minor fixes for drivers/{soc, bus,
memory}, including changing back the .remove_new callback to .remove,
as well as a few other updates for freescale (powerpc) soc drivers,
NXP i.MX soc drivers, cznic turris platform driver, memory controller
drviers, TI OMAP SoC drivers, and Tegra firmware drivers"
* tag 'soc-drivers-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (116 commits)
soc: fsl: cpm1: qmc: Set the ret error code on platform_get_irq() failure
soc: fsl: rcpm: fix missing of_node_put() in copy_ippdexpcr1_setting()
soc: fsl: cpm1: tsa: switch to for_each_available_child_of_node_scoped()
platform: cznic: turris-omnia-mcu: Rename variable holding GPIO line names
platform: cznic: turris-omnia-mcu: Document the driver private data structure
firmware: turris-mox-rwtm: Document the driver private data structure
bus: Switch back to struct platform_driver::remove()
soc: qcom: ice: Remove the device_link field in qcom_ice
drm/msm/adreno: Setup SMMU aparture for per-process page table
firmware: qcom: scm: Introduce CP_SMMU_APERTURE_ID
firmware: arm_scpi: Check the DVFS OPP count returned by the firmware
soc: qcom: socinfo: add IPQ5424/IPQ5404 SoC ID
dt-bindings: arm: qcom,ids: add SoC ID for IPQ5424/IPQ5404
soc: qcom: llcc: Flip the manual slice configuration condition
dt-bindings: firmware: qcom,scm: Document sm8750 SCM
firmware: qcom: uefisecapp: Allow X1E Devkit devices
misc: lan966x_pci: Fix dtc warn 'Missing interrupt-parent'
misc: lan966x_pci: Fix dtc warns 'missing or empty reg/ranges property'
soc: qcom: llcc: Add LLCC configuration for the QCS8300 platform
dt-bindings: cache: qcom,llcc: Document the QCS8300 LLCC
...
Diffstat (limited to 'drivers/firmware')
-rw-r--r-- | drivers/firmware/arm_scmi/common.h | 45 | ||||
-rw-r--r-- | drivers/firmware/arm_scmi/driver.c | 42 | ||||
-rw-r--r-- | drivers/firmware/arm_scmi/shmem.c | 85 | ||||
-rw-r--r-- | drivers/firmware/arm_scmi/transports/mailbox.c | 15 | ||||
-rw-r--r-- | drivers/firmware/arm_scmi/transports/optee.c | 19 | ||||
-rw-r--r-- | drivers/firmware/arm_scmi/transports/smc.c | 13 | ||||
-rw-r--r-- | drivers/firmware/arm_scmi/transports/virtio.c | 15 | ||||
-rw-r--r-- | drivers/firmware/arm_scpi.c | 3 | ||||
-rw-r--r-- | drivers/firmware/qcom/qcom_scm.c | 30 | ||||
-rw-r--r-- | drivers/firmware/qcom/qcom_scm.h | 1 | ||||
-rw-r--r-- | drivers/firmware/tegra/bpmp.c | 14 | ||||
-rw-r--r-- | drivers/firmware/ti_sci.c | 489 | ||||
-rw-r--r-- | drivers/firmware/ti_sci.h | 143 | ||||
-rw-r--r-- | drivers/firmware/turris-mox-rwtm.c | 23 | ||||
-rw-r--r-- | drivers/firmware/xilinx/zynqmp-debug.c | 162 | ||||
-rw-r--r-- | drivers/firmware/xilinx/zynqmp.c | 153 |
16 files changed, 1178 insertions, 74 deletions
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index cdec50a698a1..48b12f81141d 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -31,6 +31,8 @@ #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) +#define SCMI_SHMEM_MAX_PAYLOAD_SIZE 104 + enum scmi_error_codes { SCMI_SUCCESS = 0, /* Success */ SCMI_ERR_SUPPORT = -1, /* Not supported */ @@ -165,6 +167,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); * channel * @is_p2a: A flag to identify a channel as P2A (RX) * @rx_timeout_ms: The configured RX timeout in milliseconds. + * @max_msg_size: Maximum size of message payload. * @handle: Pointer to SCMI entity handle * @no_completion_irq: Flag to indicate that this channel has no completion * interrupt mechanism for synchronous commands. @@ -177,6 +180,7 @@ struct scmi_chan_info { struct device *dev; bool is_p2a; unsigned int rx_timeout_ms; + unsigned int max_msg_size; struct scmi_handle *handle; bool no_completion_irq; void *transport_info; @@ -224,7 +228,13 @@ struct scmi_transport_ops { * @max_msg: Maximum number of messages for a channel type (tx or rx) that can * be pending simultaneously in the system. May be overridden by the * get_max_msg op. - * @max_msg_size: Maximum size of data per message that can be handled. + * @max_msg_size: Maximum size of data payload per message that can be handled. + * @atomic_threshold: Optional system wide DT-configured threshold, expressed + * in microseconds, for atomic operations. + * Only SCMI synchronous commands reported by the platform + * to have an execution latency lesser-equal to the threshold + * should be considered for atomic mode operation: such + * decision is finally left up to the SCMI drivers. * @force_polling: Flag to force this whole transport to use SCMI core polling * mechanism instead of completion interrupts even if available. * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures @@ -243,6 +253,7 @@ struct scmi_desc { int max_rx_timeout_ms; int max_msg; int max_msg_size; + unsigned int atomic_threshold; const bool force_polling; const bool sync_cmds_completed_on_ret; const bool atomic_enabled; @@ -311,6 +322,26 @@ enum scmi_bad_msg { MSG_MBOX_SPURIOUS = -5, }; +/* Used for compactness and signature validation of the function pointers being + * passed. + */ +typedef void (*shmem_copy_toio_t)(void __iomem *to, const void *from, + size_t count); +typedef void (*shmem_copy_fromio_t)(void *to, const void __iomem *from, + size_t count); + +/** + * struct scmi_shmem_io_ops - I/O operations to read from/write to + * Shared Memory + * + * @toio: Copy data to the shared memory area + * @fromio: Copy data from the shared memory area + */ +struct scmi_shmem_io_ops { + shmem_copy_fromio_t fromio; + shmem_copy_toio_t toio; +}; + /* shmem related declarations */ struct scmi_shared_mem; @@ -331,13 +362,16 @@ struct scmi_shared_mem; struct scmi_shared_mem_operations { void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer, - struct scmi_chan_info *cinfo); + struct scmi_chan_info *cinfo, + shmem_copy_toio_t toio); u32 (*read_header)(struct scmi_shared_mem __iomem *shmem); void (*fetch_response)(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer); + struct scmi_xfer *xfer, + shmem_copy_fromio_t fromio); void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem, - size_t max_len, struct scmi_xfer *xfer); + size_t max_len, struct scmi_xfer *xfer, + shmem_copy_fromio_t fromio); void (*clear_channel)(struct scmi_shared_mem __iomem *shmem); bool (*poll_done)(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); @@ -345,7 +379,8 @@ struct scmi_shared_mem_operations { bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem); void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo, struct device *dev, - bool tx, struct resource *res); + bool tx, struct resource *res, + struct scmi_shmem_io_ops **ops); }; const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index f8934d049d68..1f53ca1f87e3 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -149,12 +149,6 @@ struct scmi_debug_info { * base protocol * @active_protocols: IDR storing device_nodes for protocols actually defined * in the DT and confirmed as implemented by fw. - * @atomic_threshold: Optional system wide DT-configured threshold, expressed - * in microseconds, for atomic operations. - * Only SCMI synchronous commands reported by the platform - * to have an execution latency lesser-equal to the threshold - * should be considered for atomic mode operation: such - * decision is finally left up to the SCMI drivers. * @notify_priv: Pointer to private data structure specific to notifications. * @node: List head * @users: Number of users of this instance @@ -180,7 +174,6 @@ struct scmi_info { struct mutex protocols_mtx; u8 *protocols_imp; struct idr active_protocols; - unsigned int atomic_threshold; void *notify_priv; struct list_head node; int users; @@ -2445,7 +2438,7 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle, ret = info->desc->atomic_enabled && is_transport_polling_capable(info->desc); if (ret && atomic_threshold) - *atomic_threshold = info->atomic_threshold; + *atomic_threshold = info->desc->atomic_threshold; return ret; } @@ -2645,6 +2638,7 @@ static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node, cinfo->is_p2a = !tx; cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; + cinfo->max_msg_size = info->desc->max_msg_size; /* Create a unique name for this transport device */ snprintf(name, 32, "__scmi_transport_device_%s_%02X", @@ -2958,7 +2952,7 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info) (char **)&dbg->name); debugfs_create_u32("atomic_threshold_us", 0400, top_dentry, - &info->atomic_threshold); + (u32 *)&info->desc->atomic_threshold); debugfs_create_str("type", 0400, trans, (char **)&dbg->type); @@ -3053,8 +3047,27 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev) if (ret && ret != -EINVAL) dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n"); - dev_info(dev, "SCMI max-rx-timeout: %dms\n", - trans->desc->max_rx_timeout_ms); + ret = of_property_read_u32(dev->of_node, "arm,max-msg-size", + &trans->desc->max_msg_size); + if (ret && ret != -EINVAL) + dev_err(dev, "Malformed arm,max-msg-size DT property.\n"); + + ret = of_property_read_u32(dev->of_node, "arm,max-msg", + &trans->desc->max_msg); + if (ret && ret != -EINVAL) + dev_err(dev, "Malformed arm,max-msg DT property.\n"); + + dev_info(dev, + "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n", + trans->desc->max_rx_timeout_ms, trans->desc->max_msg_size, + trans->desc->max_msg); + + /* System wide atomic threshold for atomic ops .. if any */ + if (!of_property_read_u32(dev->of_node, "atomic-threshold-us", + &trans->desc->atomic_threshold)) + dev_info(dev, + "SCMI System wide atomic threshold set to %u us\n", + trans->desc->atomic_threshold); return trans->desc; } @@ -3105,13 +3118,6 @@ static int scmi_probe(struct platform_device *pdev) handle->devm_protocol_acquire = scmi_devm_protocol_acquire; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; - - /* System wide atomic threshold for atomic ops .. if any */ - if (!of_property_read_u32(np, "atomic-threshold-us", - &info->atomic_threshold)) - dev_info(dev, - "SCMI System wide atomic threshold set to %d us\n", - info->atomic_threshold); handle->is_transport_atomic = scmi_is_transport_atomic; /* Setup all channels described in the DT at first */ diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 01d8a9398fe8..11c347bff766 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -16,6 +16,8 @@ #include "common.h" +#define SCMI_SHMEM_LAYOUT_OVERHEAD 24 + /* * SCMI specification requires all parameters, message headers, return * arguments or any protocol data to be expressed in little endian @@ -34,9 +36,59 @@ struct scmi_shared_mem { u8 msg_payload[]; }; +static inline void shmem_memcpy_fromio32(void *to, + const void __iomem *from, + size_t count) +{ + WARN_ON(!IS_ALIGNED((unsigned long)from, 4) || + !IS_ALIGNED((unsigned long)to, 4) || + count % 4); + + __ioread32_copy(to, from, count / 4); +} + +static inline void shmem_memcpy_toio32(void __iomem *to, + const void *from, + size_t count) +{ + WARN_ON(!IS_ALIGNED((unsigned long)to, 4) || + !IS_ALIGNED((unsigned long)from, 4) || + count % 4); + + __iowrite32_copy(to, from, count / 4); +} + +static struct scmi_shmem_io_ops shmem_io_ops32 = { + .fromio = shmem_memcpy_fromio32, + .toio = shmem_memcpy_toio32, +}; + +/* Wrappers are needed for proper memcpy_{from,to}_io expansion by the + * pre-processor. + */ +static inline void shmem_memcpy_fromio(void *to, + const void __iomem *from, + size_t count) +{ + memcpy_fromio(to, from, count); +} + +static inline void shmem_memcpy_toio(void __iomem *to, + const void *from, + size_t count) +{ + memcpy_toio(to, from, count); +} + +static struct scmi_shmem_io_ops shmem_io_ops_default = { + .fromio = shmem_memcpy_fromio, + .toio = shmem_memcpy_toio, +}; + static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer, - struct scmi_chan_info *cinfo) + struct scmi_chan_info *cinfo, + shmem_copy_toio_t copy_toio) { ktime_t stop; @@ -73,7 +125,7 @@ static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length); iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header); if (xfer->tx.buf) - memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); + copy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len); } static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) @@ -82,7 +134,8 @@ static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) } static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer) + struct scmi_xfer *xfer, + shmem_copy_fromio_t copy_fromio) { size_t len = ioread32(&shmem->length); @@ -91,11 +144,12 @@ static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0); /* Take a copy to the rx buffer.. */ - memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); + copy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); } static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, - size_t max_len, struct scmi_xfer *xfer) + size_t max_len, struct scmi_xfer *xfer, + shmem_copy_fromio_t copy_fromio) { size_t len = ioread32(&shmem->length); @@ -103,7 +157,7 @@ static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0); /* Take a copy to the rx buffer.. */ - memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); + copy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); } static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem) @@ -139,7 +193,8 @@ static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem) static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo, struct device *dev, bool tx, - struct resource *res) + struct resource *res, + struct scmi_shmem_io_ops **ops) { struct device_node *shmem __free(device_node); const char *desc = tx ? "Tx" : "Rx"; @@ -148,6 +203,7 @@ static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo, struct resource lres = {}; resource_size_t size; void __iomem *addr; + u32 reg_io_width; shmem = of_parse_phandle(cdev->of_node, "shmem", idx); if (!shmem) @@ -167,12 +223,27 @@ static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo, } size = resource_size(res); + if (cinfo->max_msg_size + SCMI_SHMEM_LAYOUT_OVERHEAD > size) { + dev_err(dev, "misconfigured SCMI shared memory\n"); + return IOMEM_ERR_PTR(-ENOSPC); + } + addr = devm_ioremap(dev, res->start, size); if (!addr) { dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc); return IOMEM_ERR_PTR(-EADDRNOTAVAIL); } + of_property_read_u32(shmem, "reg-io-width", ®_io_width); + switch (reg_io_width) { + case 4: + *ops = &shmem_io_ops32; + break; + default: + *ops = &shmem_io_ops_default; + break; + } + return addr; } diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c index e3d5f7560990..b66df2981456 100644 --- a/drivers/firmware/arm_scmi/transports/mailbox.c +++ b/drivers/firmware/arm_scmi/transports/mailbox.c @@ -26,6 +26,7 @@ * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area * @chan_lock: Lock that prevents multiple xfers from being queued + * @io_ops: Transport specific I/O operations */ struct scmi_mailbox { struct mbox_client cl; @@ -35,6 +36,7 @@ struct scmi_mailbox { struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; struct mutex chan_lock; + struct scmi_shmem_io_ops *io_ops; }; #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) @@ -45,7 +47,8 @@ static void tx_prepare(struct mbox_client *cl, void *m) { struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); - core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo); + core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo, + smbox->io_ops->toio); } static void rx_callback(struct mbox_client *cl, void *m) @@ -197,7 +200,8 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!smbox) return -ENOMEM; - smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL); + smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL, + &smbox->io_ops); if (IS_ERR(smbox->shmem)) return PTR_ERR(smbox->shmem); @@ -305,7 +309,7 @@ static void mailbox_fetch_response(struct scmi_chan_info *cinfo, { struct scmi_mailbox *smbox = cinfo->transport_info; - core->shmem->fetch_response(smbox->shmem, xfer); + core->shmem->fetch_response(smbox->shmem, xfer, smbox->io_ops->fromio); } static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, @@ -313,7 +317,8 @@ static void mailbox_fetch_notification(struct scmi_chan_info *cinfo, { struct scmi_mailbox *smbox = cinfo->transport_info; - core->shmem->fetch_notification(smbox->shmem, max_len, xfer); + core->shmem->fetch_notification(smbox->shmem, max_len, xfer, + smbox->io_ops->fromio); } static void mailbox_clear_channel(struct scmi_chan_info *cinfo) @@ -366,7 +371,7 @@ static struct scmi_desc scmi_mailbox_desc = { .ops = &scmi_mailbox_ops, .max_rx_timeout_ms = 30, /* We may increase this if required */ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ - .max_msg_size = 128, + .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE, }; static const struct of_device_id scmi_of_match[] = { diff --git a/drivers/firmware/arm_scmi/transports/optee.c b/drivers/firmware/arm_scmi/transports/optee.c index 56fc63edf51e..3949a877e17d 100644 --- a/drivers/firmware/arm_scmi/transports/optee.c +++ b/drivers/firmware/arm_scmi/transports/optee.c @@ -17,8 +17,6 @@ #include "../common.h" -#define SCMI_OPTEE_MAX_MSG_SIZE 128 - enum scmi_optee_pta_cmd { /* * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities @@ -114,6 +112,7 @@ enum scmi_optee_pta_cmd { * @req.shmem: Virtual base address of the shared memory * @req.msg: Shared memory protocol handle for SCMI request and * synchronous response + * @io_ops: Transport specific I/O operations * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem * @link: Reference in agent's channel list */ @@ -128,6 +127,7 @@ struct scmi_optee_channel { struct scmi_shared_mem __iomem *shmem; struct scmi_msg_payld *msg; } req; + struct scmi_shmem_io_ops *io_ops; struct tee_shm *tee_shm; struct list_head link; }; @@ -297,7 +297,7 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT; param[2].u.memref.shm = channel->tee_shm; - param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; + param[2].u.memref.size = SCMI_SHMEM_MAX_PAYLOAD_SIZE; ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); if (ret < 0 || arg.ret) { @@ -330,7 +330,7 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) { - const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; + const size_t msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE; void *shbuf; channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); @@ -350,7 +350,8 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, struct scmi_optee_channel *channel) { - channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL); + channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL, + &channel->io_ops); if (IS_ERR(channel->req.shmem)) return PTR_ERR(channel->req.shmem); @@ -465,7 +466,8 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo, ret = invoke_process_msg_channel(channel, core->msg->command_size(xfer)); } else { - core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo); + core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo, + channel->io_ops->toio); ret = invoke_process_smt_channel(channel); } @@ -484,7 +486,8 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, core->msg->fetch_response(channel->req.msg, channel->rx_len, xfer); else - core->shmem->fetch_response(channel->req.shmem, xfer); + core->shmem->fetch_response(channel->req.shmem, xfer, + channel->io_ops->fromio); } static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, @@ -514,7 +517,7 @@ static struct scmi_desc scmi_optee_desc = { .ops = &scmi_optee_ops, .max_rx_timeout_ms = 30, .max_msg = 20, - .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, + .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE, .sync_cmds_completed_on_ret = true, }; diff --git a/drivers/firmware/arm_scmi/transports/smc.c b/drivers/firmware/arm_scmi/transports/smc.c index f8dd108777f9..f632a62cfb3e 100644 --- a/drivers/firmware/arm_scmi/transports/smc.c +++ b/drivers/firmware/arm_scmi/transports/smc.c @@ -45,6 +45,7 @@ * @irq: An optional IRQ for completion * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area + * @io_ops: Transport specific I/O operations * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. * Used when NOT operating in atomic mode. * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. @@ -60,6 +61,7 @@ struct scmi_smc { int irq; struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; + struct scmi_shmem_io_ops *io_ops; /* Protect access to shmem area */ struct mutex shmem_lock; #define INFLIGHT_NONE MSG_TOKEN_MAX @@ -144,7 +146,8 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!scmi_info) return -ENOMEM; - scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res); + scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res, + &scmi_info->io_ops); if (IS_ERR(scmi_info->shmem)) return PTR_ERR(scmi_info->shmem); @@ -229,7 +232,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo, */ smc_channel_lock_acquire(scmi_info, xfer); - core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo); + core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo, + scmi_info->io_ops->toio); if (scmi_info->cap_id != ULONG_MAX) arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0, @@ -253,7 +257,8 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo, { struct scmi_smc *scmi_info = cinfo->transport_info; - core->shmem->fetch_response(scmi_info->shmem, xfer); + core->shmem->fetch_response(scmi_info->shmem, xfer, + scmi_info->io_ops->fromio); } static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret, @@ -277,7 +282,7 @@ static struct scmi_desc scmi_smc_desc = { .ops = &scmi_smc_ops, .max_rx_timeout_ms = 30, .max_msg = 20, - .max_msg_size = 128, + .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE, /* * Setting .sync_cmds_atomic_replies to true for SMC assumes that, * once the SMC instruction has completed successfully, the issued diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c index d349766bc0b2..41aea33776a9 100644 --- a/drivers/firmware/arm_scmi/transports/virtio.c +++ b/drivers/firmware/arm_scmi/transports/virtio.c @@ -32,8 +32,8 @@ #define VIRTIO_MAX_RX_TIMEOUT_MS 60000 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ -#define VIRTIO_SCMI_MAX_PDU_SIZE \ - (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) +#define VIRTIO_SCMI_MAX_PDU_SIZE(ci) \ + ((ci)->max_msg_size + SCMI_MSG_MAX_PROT_OVERHEAD) #define DESCRIPTORS_PER_TX_MSG 2 /** @@ -90,6 +90,7 @@ enum poll_states { * @input: SDU used for (delayed) responses and notifications * @list: List which scmi_vio_msg may be part of * @rx_len: Input SDU size in bytes, once input has been received + * @max_len: Maximumm allowed SDU size in bytes * @poll_idx: Last used index registered for polling purposes if this message * transaction reply was configured for polling. * @poll_status: Polling state for this message. @@ -102,6 +103,7 @@ struct scmi_vio_msg { struct scmi_msg_payld *input; struct list_head list; unsigned int rx_len; + unsigned int max_len; unsigned int poll_idx; enum poll_states poll_status; /* Lock to protect access to poll_status */ @@ -234,7 +236,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, unsigned long flags; struct device *dev = &vioch->vqueue->vdev->dev; - sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); + sg_init_one(&sg_in, msg->input, msg->max_len); spin_lock_irqsave(&vioch->lock, flags); @@ -439,9 +441,9 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!msg) return -ENOMEM; + msg->max_len = VIRTIO_SCMI_MAX_PDU_SIZE(cinfo); if (tx) { - msg->request = devm_kzalloc(dev, - VIRTIO_SCMI_MAX_PDU_SIZE, + msg->request = devm_kzalloc(dev, msg->max_len, GFP_KERNEL); if (!msg->request) return -ENOMEM; @@ -449,8 +451,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, refcount_set(&msg->users, 1); } - msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, - GFP_KERNEL); + msg->input = devm_kzalloc(dev, msg->max_len, GFP_KERNEL); if (!msg->input) return -ENOMEM; diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 94a6b4e667de..f4d47577f83e 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -630,6 +630,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) if (ret) return ERR_PTR(ret); + if (!buf.opp_count) + return ERR_PTR(-ENOENT); + info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 2e4260ba5f79..72bf87ddcd96 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -904,6 +904,32 @@ int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) } EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); +#define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0) + +bool qcom_scm_set_gpu_smmu_aperture_is_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, + QCOM_SCM_MP_CP_SMMU_APERTURE_ID); +} +EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available); + +int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID, + .arginfo = QCOM_SCM_ARGS(4), + .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank), + .args[1] = 0xffffffff, + .args[2] = 0xffffffff, + .args[3] = 0xffffffff, + .owner = ARM_SMCCC_OWNER_SIP + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture); + int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { struct qcom_scm_desc desc = { @@ -1742,12 +1768,16 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); + any potential issues with this, only allow validated machines for now. */ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { + { .compatible = "dell,xps13-9345" }, { .compatible = "lenovo,flex-5g" }, { .compatible = "lenovo,thinkpad-t14s" }, { .compatible = "lenovo,thinkpad-x13s", }, + { .compatible = "lenovo,yoga-slim7x" }, + { .compatible = "microsoft,arcata", }, { .compatible = "microsoft,romulus13", }, { .compatible = "microsoft,romulus15", }, { .compatible = "qcom,sc8180x-primus" }, + { .compatible = "qcom,x1e001de-devkit" }, { .compatible = "qcom,x1e80100-crd" }, { .compatible = "qcom,x1e80100-qcp" }, { } diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h index 685b8f59e7a6..e36b2f67607f 100644 --- a/drivers/firmware/qcom/qcom_scm.h +++ b/drivers/firmware/qcom/qcom_scm.h @@ -116,6 +116,7 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void); #define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 #define QCOM_SCM_MP_VIDEO_VAR 0x08 #define QCOM_SCM_MP_ASSIGN 0x16 +#define QCOM_SCM_MP_CP_SMMU_APERTURE_ID 0x1b #define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c #define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d #define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 2bee6e918f81..c3a1dc344961 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -3,7 +3,6 @@ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. */ -#include <linux/cleanup.h> #include <linux/clk/tegra.h> #include <linux/genalloc.h> #include <linux/mailbox_client.h> @@ -35,24 +34,29 @@ channel_to_ops(struct tegra_bpmp_channel *channel) struct tegra_bpmp *tegra_bpmp_get(struct device *dev) { - struct device_node *np __free(device_node); struct platform_device *pdev; struct tegra_bpmp *bpmp; + struct device_node *np; np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0); if (!np) return ERR_PTR(-ENOENT); pdev = of_find_device_by_node(np); - if (!pdev) - return ERR_PTR(-ENODEV); + if (!pdev) { + bpmp = ERR_PTR(-ENODEV); + goto put; + } bpmp = platform_get_drvdata(pdev); if (!bpmp) { + bpmp = ERR_PTR(-EPROBE_DEFER); put_device(&pdev->dev); - return ERR_PTR(-EPROBE_DEFER); + goto put; } +put: + of_node_put(np); return bpmp; } EXPORT_SYMBOL_GPL(tegra_bpmp_get); diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 160968301b1f..806a975fff22 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -2,13 +2,14 @@ /* * Texas Instruments System Control Interface Protocol Driver * - * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/bitmap.h> +#include <linux/cpu.h> #include <linux/debugfs.h> #include <linux/export.h> #include <linux/io.h> @@ -19,11 +20,14 @@ #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_qos.h> #include <linux/property.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/soc/ti/ti-msgmgr.h> #include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/suspend.h> +#include <linux/sys_soc.h> #include <linux/reboot.h> #include "ti_sci.h" @@ -98,6 +102,7 @@ struct ti_sci_desc { * @minfo: Message info * @node: list head * @host_id: Host ID + * @fw_caps: FW/SoC low power capabilities * @users: Number of users of this instance */ struct ti_sci_info { @@ -114,6 +119,7 @@ struct ti_sci_info { struct ti_sci_xfers_info minfo; struct list_head node; u8 host_id; + u64 fw_caps; /* protected by ti_sci_list_mutex */ int users; }; @@ -1651,6 +1657,364 @@ fail: return ret; } +/** + * ti_sci_cmd_prepare_sleep() - Prepare system for system suspend + * @handle: pointer to TI SCI handle + * @mode: Device identifier + * @ctx_lo: Low part of address for context save + * @ctx_hi: High part of address for context save + * @debug_flags: Debug flags to pass to firmware + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, + u32 ctx_lo, u32 ctx_hi, u32 debug_flags) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_prepare_sleep *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + + req = (struct ti_sci_msg_req_prepare_sleep *)xfer->xfer_buf; + req->mode = mode; + req->ctx_lo = ctx_lo; + req->ctx_hi = ctx_hi; + req->debug_flags = debug_flags; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to prepare sleep\n"); + ret = -ENODEV; + } + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_msg_cmd_query_fw_caps() - Get the FW/SoC capabilities + * @handle: Pointer to TI SCI handle + * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability + * + * Check if the firmware supports any optional low power modes. + * Old revisions of TIFS (< 08.04) will NACK the request which results in + * -ENODEV being returned. + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle *handle, + u64 *fw_caps) +{ + struct ti_sci_info *info; + struct ti_sci_xfer *xfer; + struct ti_sci_msg_resp_query_fw_caps *resp; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(struct ti_sci_msg_hdr), + sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_query_fw_caps *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to get capabilities\n"); + ret = -ENODEV; + goto fail; + } + + if (fw_caps) + *fw_caps = resp->fw_caps; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_set_io_isolation() - Enable IO isolation in LPM + * @handle: Pointer to TI SCI handle + * @state: The desired state of the IO isolation + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_set_io_isolation(const struct ti_sci_handle *handle, + u8 state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_set_io_isolation *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_IO_ISOLATION, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_set_io_isolation *)xfer->xfer_buf; + req->state = state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to set IO isolation\n"); + ret = -ENODEV; + } + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_msg_cmd_lpm_wake_reason() - Get the wakeup source from LPM + * @handle: Pointer to TI SCI handle + * @source: The wakeup source that woke the SoC from LPM + * @timestamp: Timestamp of the wakeup event + * @pin: The pin that has triggered wake up + * @mode: The last entered low power mode + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle *handle, + u32 *source, u64 *timestamp, u8 *pin, u8 *mode) +{ + struct ti_sci_info *info; + struct ti_sci_xfer *xfer; + struct ti_sci_msg_resp_lpm_wake_reason *resp; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_WAKE_REASON, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(struct ti_sci_msg_hdr), + sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_lpm_wake_reason *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to get wake reason\n"); + ret = -ENODEV; + goto fail; + } + + if (source) + *source = resp->wake_source; + if (timestamp) + *timestamp = resp->wake_timestamp; + if (pin) + *pin = resp->wake_pin; + if (mode) + *mode = resp->mode; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_set_device_constraint() - Set LPM constraint on behalf of a device + * @handle: pointer to TI SCI handle + * @id: Device identifier + * @state: The desired state of device constraint: set or clear + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_set_device_constraint(const struct ti_sci_handle *handle, + u32 id, u8 state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_lpm_set_device_constraint *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_lpm_set_device_constraint *)xfer->xfer_buf; + req->id = id; + req->state = state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to set device constraint\n"); + ret = -ENODEV; + } + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_set_latency_constraint() - Set LPM resume latency constraint + * @handle: pointer to TI SCI handle + * @latency: maximum acceptable latency (in ms) to wake up from LPM + * @state: The desired state of latency constraint: set or clear + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_set_latency_constraint(const struct ti_sci_handle *handle, + u16 latency, u8 state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_lpm_set_latency_constraint *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_lpm_set_latency_constraint *)xfer->xfer_buf; + req->latency = latency; + req->state = state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to set device constraint\n"); + ret = -ENODEV; + } + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) { struct ti_sci_info *info; @@ -2793,6 +3157,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) struct ti_sci_core_ops *core_ops = &ops->core_ops; struct ti_sci_dev_ops *dops = &ops->dev_ops; struct ti_sci_clk_ops *cops = &ops->clk_ops; + struct ti_sci_pm_ops *pmops = &ops->pm_ops; struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops; struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops; struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops; @@ -2832,6 +3197,13 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) cops->set_freq = ti_sci_cmd_clk_set_freq; cops->get_freq = ti_sci_cmd_clk_get_freq; + if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) { + pr_debug("detected DM managed LPM in fw_caps\n"); + pmops->lpm_wake_reason = ti_sci_msg_cmd_lpm_wake_reason; + pmops->set_device_constraint = ti_sci_cmd_set_device_constraint; + pmops->set_latency_constraint = ti_sci_cmd_set_latency_constraint; + } + rm_core_ops->get_range = ti_sci_cmd_get_resource_range; rm_core_ops->get_range_from_shost = ti_sci_cmd_get_resource_range_from_shost; @@ -3262,6 +3634,111 @@ static int tisci_reboot_handler(struct sys_off_data *data) return NOTIFY_BAD; } +static int ti_sci_prepare_system_suspend(struct ti_sci_info *info) +{ + /* + * Map and validate the target Linux suspend state to TISCI LPM. + * Default is to let Device Manager select the low power mode. + */ + switch (pm_suspend_target_state) { + case PM_SUSPEND_MEM: + if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) { + /* + * For the DM_MANAGED mode the context is reserved for + * internal use and can be 0 + */ + return ti_sci_cmd_prepare_sleep(&info->handle, + TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED, + 0, 0, 0); + } else { + /* DM Managed is not supported by the firmware. */ + dev_err(info->dev, "Suspend to memory is not supported by the firmware\n"); + return -EOPNOTSUPP; + } + break; + default: + /* + * Do not fail if we don't have action to take for a + * specific suspend mode. + */ + return 0; + } +} + +static int __maybe_unused ti_sci_suspend(struct device *dev) +{ + struct ti_sci_info *info = dev_get_drvdata(dev); + struct device *cpu_dev, *cpu_dev_max = NULL; + s32 val, cpu_lat = 0; + int i, ret; + + if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) { + for_each_possible_cpu(i) { + cpu_dev = get_cpu_device(i); + val = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_RESUME_LATENCY); + if (val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) { + cpu_lat = max(cpu_lat, val); + cpu_dev_max = cpu_dev; + } + } + if (cpu_dev_max) { + dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u\n", __func__, cpu_lat); + ret = ti_sci_cmd_set_latency_constraint(&info->handle, + cpu_lat, TISCI_MSG_CONSTRAINT_SET); + if (ret) + return ret; + } + } + + ret = ti_sci_prepare_system_suspend(info); + if (ret) + return ret; + + return 0; +} + +static int __maybe_unused ti_sci_suspend_noirq(struct device *dev) +{ + struct ti_sci_info *info = dev_get_drvdata(dev); + int ret = 0; + + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE); + if (ret) + return ret; + + return 0; +} + +static int __maybe_unused ti_sci_resume_noirq(struct device *dev) +{ + struct ti_sci_info *info = dev_get_drvdata(dev); + int ret = 0; + u32 source; + u64 time; + u8 pin; + u8 mode; + + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE); + if (ret) + return ret; + + ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode); + /* Do not fail to resume on error as the wake reason is not critical */ + if (!ret) + dev_info(dev, "ti_sci: wakeup source:0x%x, pin:0x%x, mode:0x%x\n", + source, pin, mode); + + return 0; +} + +static const struct dev_pm_ops ti_sci_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = ti_sci_suspend, + .suspend_noirq = ti_sci_suspend_noirq, + .resume_noirq = ti_sci_resume_noirq, +#endif +}; + /* Description for K2G */ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { .default_host_id = 2, @@ -3390,6 +3867,13 @@ static int ti_sci_probe(struct platform_device *pdev) goto out; } + ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps); + dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n", + info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "", + info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "", + info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "" + ); + ti_sci_setup_ops(info); ret = devm_register_restart_handler(dev, tisci_reboot_handler, info); @@ -3421,8 +3905,9 @@ static struct platform_driver ti_sci_driver = { .probe = ti_sci_probe, .driver = { .name = "ti-sci", - .of_match_table = of_match_ptr(ti_sci_of_match), + .of_match_table = ti_sci_of_match, .suppress_bind_attrs = true, + .pm = &ti_sci_pm_ops, }, }; module_platform_driver(ti_sci_driver); diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index 5846c60220f5..053387d7baa0 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -6,7 +6,7 @@ * The system works in a message response protocol * See: https://software-dl.ti.com/tisci/esd/latest/index.html for details * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __TI_SCI_H @@ -19,6 +19,7 @@ #define TI_SCI_MSG_WAKE_REASON 0x0003 #define TI_SCI_MSG_GOODBYE 0x0004 #define TI_SCI_MSG_SYS_RESET 0x0005 +#define TI_SCI_MSG_QUERY_FW_CAPS 0x0022 /* Device requests */ #define TI_SCI_MSG_SET_DEVICE_STATE 0x0200 @@ -35,6 +36,13 @@ #define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d #define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e +/* Low Power Mode Requests */ +#define TI_SCI_MSG_PREPARE_SLEEP 0x0300 +#define TI_SCI_MSG_LPM_WAKE_REASON 0x0306 +#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307 +#define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309 +#define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A + /* Resource Management Requests */ #define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500 @@ -133,6 +141,27 @@ struct ti_sci_msg_req_reboot { } __packed; /** + * struct ti_sci_msg_resp_query_fw_caps - Response for query firmware caps + * @hdr: Generic header + * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability + * MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported) + * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM + * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM + * + * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS + * providing currently available SOC/firmware capabilities. SoC that don't + * support low power modes return only MSG_FLAG_CAPS_GENERIC capability. + */ +struct ti_sci_msg_resp_query_fw_caps { + struct ti_sci_msg_hdr hdr; +#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0) +#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4) +#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5) +#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1) + u64 fw_caps; +} __packed; + +/** * struct ti_sci_msg_req_set_device_state - Set the desired state of the device * @hdr: Generic header * @id: Indicates which device to modify @@ -545,6 +574,118 @@ struct ti_sci_msg_resp_get_clock_freq { u64 freq_hz; } __packed; +/** + * struct tisci_msg_req_prepare_sleep - Request for TISCI_MSG_PREPARE_SLEEP. + * + * @hdr TISCI header to provide ACK/NAK flags to the host. + * @mode Low power mode to enter. + * @ctx_lo Low 32-bits of physical pointer to address to use for context save. + * @ctx_hi High 32-bits of physical pointer to address to use for context save. + * @debug_flags Flags that can be set to halt the sequence during suspend or + * resume to allow JTAG connection and debug. + * + * This message is used as the first step of entering a low power mode. It + * allows configurable information, including which state to enter to be + * easily shared from the application, as this is a non-secure message and + * therefore can be sent by anyone. + */ +struct ti_sci_msg_req_prepare_sleep { + struct ti_sci_msg_hdr hdr; + +#define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd + u8 mode; + u32 ctx_lo; + u32 ctx_hi; + u32 debug_flags; +} __packed; + +/** + * struct tisci_msg_set_io_isolation_req - Request for TI_SCI_MSG_SET_IO_ISOLATION. + * + * @hdr: Generic header + * @state: The deseared state of the IO isolation. + * + * This message is used to enable/disable IO isolation for low power modes. + * Response is generic ACK / NACK message. + */ +struct ti_sci_msg_req_set_io_isolation { + struct ti_sci_msg_hdr hdr; + u8 state; +} __packed; + +/** + * struct ti_sci_msg_resp_lpm_wake_reason - Response for TI_SCI_MSG_LPM_WAKE_REASON. + * + * @hdr: Generic header. + * @wake_source: The wake up source that woke soc from LPM. + * @wake_timestamp: Timestamp at which soc woke. + * @wake_pin: The pin that has triggered wake up. + * @mode: The last entered low power mode. + * @rsvd: Reserved for future use. + * + * Response to a generic message with message type TI_SCI_MSG_LPM_WAKE_REASON, + * used to query the wake up source, pin and entered low power mode. + */ +struct ti_sci_msg_resp_lpm_wake_reason { + struct ti_sci_msg_hdr hdr; + u32 wake_source; + u64 wake_timestamp; + u8 wake_pin; + u8 mode; + u32 rsvd[2]; +} __packed; + +/** + * struct ti_sci_msg_req_lpm_set_device_constraint - Request for + * TISCI_MSG_LPM_SET_DEVICE_CONSTRAINT. + * + * @hdr: TISCI header to provide ACK/NAK flags to the host. + * @id: Device ID of device whose constraint has to be modified. + * @state: The desired state of device constraint: set or clear. + * @rsvd: Reserved for future use. + * + * This message is used by host to set constraint on the device. This can be + * sent anytime after boot before prepare sleep message. Any device can set a + * constraint on the low power mode that the SoC can enter. It allows + * configurable information to be easily shared from the application, as this + * is a non-secure message and therefore can be sent by anyone. By setting a + * constraint, the device ensures that it will not be powered off or reset in + * the selected mode. Note: Access Restriction: Exclusivity flag of Device will + * be honored. If some other host already has constraint on this device ID, + * NACK will be returned. + */ +struct ti_sci_msg_req_lpm_set_device_constraint { + struct ti_sci_msg_hdr hdr; + u32 id; + u8 state; + u32 rsvd[2]; +} __packed; + +/** + * struct ti_sci_msg_req_lpm_set_latency_constraint - Request for + * TISCI_MSG_LPM_SET_LATENCY_CONSTRAINT. + * + * @hdr: TISCI header to provide ACK/NAK flags to the host. + * @wkup_latency: The maximum acceptable latency to wake up from low power mode + * in milliseconds. The deeper the state, the higher the latency. + * @state: The desired state of wakeup latency constraint: set or clear. + * @rsvd: Reserved for future use. + * + * This message is used by host to set wakeup latency from low power mode. This can + * be sent anytime after boot before prepare sleep message, and can be sent after + * current low power mode is exited. Any device can set a constraint on the low power + * mode that the SoC can enter. It allows configurable information to be easily shared + * from the application, as this is a non-secure message and therefore can be sent by + * anyone. By setting a wakeup latency constraint, the host ensures that the resume time + * from selected low power mode will be less than the constraint value. + */ +struct ti_sci_msg_req_lpm_set_latency_constraint { + struct ti_sci_msg_hdr hdr; + u16 latency; + u8 state; + u32 rsvd; +} __packed; + #define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff /** diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index f3bc0d427825..47fe6261f5a3 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -61,6 +61,27 @@ enum mbox_cmd { MBOX_CMD_OTP_WRITE = 8, }; +/** + * struct mox_rwtm - driver private data structure + * @mbox_client: rWTM mailbox client + * @mbox: rWTM mailbox channel + * @hwrng: RNG driver structure + * @reply: last mailbox reply, filled in receive callback + * @buf: DMA buffer + * @buf_phys: physical address of the DMA buffer + * @busy: mutex to protect mailbox command execution + * @cmd_done: command done completion + * @has_board_info: whether board information is present + * @serial_number: serial number of the device + * @board_version: board version / revision of the device + * @ram_size: RAM size of the device + * @mac_address1: first MAC address of the device + * @mac_address2: second MAC address of the device + * @has_pubkey: whether board ECDSA public key is present + * @pubkey: board ECDSA public key + * @last_sig: last ECDSA signature generated with board ECDSA private key + * @last_sig_done: whether the last ECDSA signing is complete + */ struct mox_rwtm { struct mbox_client mbox_client; struct mbox_chan *mbox; @@ -74,13 +95,11 @@ struct mox_rwtm { struct mutex busy; struct completion cmd_done; - /* board information */ bool has_board_info; u64 serial_number; int board_version, ram_size; u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN]; - /* public key burned in eFuse */ bool has_pubkey; u8 pubkey[135]; diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c index 8528850af889..22853ae0efdf 100644 --- a/drivers/firmware/xilinx/zynqmp-debug.c +++ b/drivers/firmware/xilinx/zynqmp-debug.c @@ -31,13 +31,51 @@ static char debugfs_buf[PAGE_SIZE]; #define PM_API(id) {id, #id, strlen(#id)} static struct pm_api_info pm_api_list[] = { + PM_API(PM_FORCE_POWERDOWN), + PM_API(PM_REQUEST_WAKEUP), + PM_API(PM_SYSTEM_SHUTDOWN), + PM_API(PM_REQUEST_NODE), + PM_API(PM_RELEASE_NODE), + PM_API(PM_SET_REQUIREMENT), PM_API(PM_GET_API_VERSION), + PM_API(PM_REGISTER_NOTIFIER), + PM_API(PM_RESET_ASSERT), + PM_API(PM_RESET_GET_STATUS), + PM_API(PM_GET_CHIPID), + PM_API(PM_PINCTRL_SET_FUNCTION), + PM_API(PM_PINCTRL_CONFIG_PARAM_GET), + PM_API(PM_PINCTRL_CONFIG_PARAM_SET), + PM_API(PM_IOCTL), + PM_API(PM_CLOCK_ENABLE), + PM_API(PM_CLOCK_DISABLE), + PM_API(PM_CLOCK_GETSTATE), + PM_API(PM_CLOCK_SETDIVIDER), + PM_API(PM_CLOCK_GETDIVIDER), + PM_API(PM_CLOCK_SETPARENT), + PM_API(PM_CLOCK_GETPARENT), PM_API(PM_QUERY_DATA), }; static struct dentry *firmware_debugfs_root; /** + * zynqmp_pm_ioctl - PM IOCTL for device control and configs + * @node: Node ID of the device + * @ioctl: ID of the requested IOCTL + * @arg1: Argument 1 of requested IOCTL call + * @arg2: Argument 2 of requested IOCTL call + * @arg3: Argument 3 of requested IOCTL call + * @out: Returned output value + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_ioctl(const u32 node, const u32 ioctl, const u32 arg1, + const u32 arg2, const u32 arg3, u32 *out) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, out, 5, node, ioctl, arg1, arg2, arg3); +} + +/** * zynqmp_pm_argument_value() - Extract argument value from a PM-API request * @arg: Entered PM-API argument in string format * @@ -95,6 +133,128 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret) sprintf(debugfs_buf, "PM-API Version = %d.%d\n", pm_api_version >> 16, pm_api_version & 0xffff); break; + case PM_FORCE_POWERDOWN: + ret = zynqmp_pm_force_pwrdwn(pm_api_arg[0], + pm_api_arg[1] ? pm_api_arg[1] : + ZYNQMP_PM_REQUEST_ACK_NO); + break; + case PM_REQUEST_WAKEUP: + ret = zynqmp_pm_request_wake(pm_api_arg[0], + pm_api_arg[1], pm_api_arg[2], + pm_api_arg[3] ? pm_api_arg[3] : + ZYNQMP_PM_REQUEST_ACK_NO); + break; + case PM_SYSTEM_SHUTDOWN: + ret = zynqmp_pm_system_shutdown(pm_api_arg[0], pm_api_arg[1]); + break; + case PM_REQUEST_NODE: + ret = zynqmp_pm_request_node(pm_api_arg[0], + pm_api_arg[1] ? pm_api_arg[1] : + ZYNQMP_PM_CAPABILITY_ACCESS, + pm_api_arg[2] ? pm_api_arg[2] : 0, + pm_api_arg[3] ? pm_api_arg[3] : + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + break; + case PM_RELEASE_NODE: + ret = zynqmp_pm_release_node(pm_api_arg[0]); + break; + case PM_SET_REQUIREMENT: + ret = zynqmp_pm_set_requirement(pm_api_arg[0], + pm_api_arg[1] ? pm_api_arg[1] : + ZYNQMP_PM_CAPABILITY_CONTEXT, + pm_api_arg[2] ? + pm_api_arg[2] : 0, + pm_api_arg[3] ? pm_api_arg[3] : + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + break; + case PM_REGISTER_NOTIFIER: + ret = zynqmp_pm_register_notifier(pm_api_arg[0], + pm_api_arg[1] ? + pm_api_arg[1] : 0, + pm_api_arg[2] ? + pm_api_arg[2] : 0, + pm_api_arg[3] ? + pm_api_arg[3] : 0); + break; + case PM_RESET_ASSERT: + ret = zynqmp_pm_reset_assert(pm_api_arg[0], pm_api_arg[1]); + break; + case PM_RESET_GET_STATUS: + ret = zynqmp_pm_reset_get_status(pm_api_arg[0], &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, "Reset status: %u\n", + pm_api_ret[0]); + break; + case PM_GET_CHIPID: + ret = zynqmp_pm_get_chipid(&pm_api_ret[0], &pm_api_ret[1]); + if (!ret) + sprintf(debugfs_buf, "Idcode: %#x, Version:%#x\n", + pm_api_ret[0], pm_api_ret[1]); + break; + case PM_PINCTRL_SET_FUNCTION: + ret = zynqmp_pm_pinctrl_set_function(pm_api_arg[0], + pm_api_arg[1]); + break; + case PM_PINCTRL_CONFIG_PARAM_GET: + ret = zynqmp_pm_pinctrl_get_config(pm_api_arg[0], pm_api_arg[1], + &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, + "Pin: %llu, Param: %llu, Value: %u\n", + pm_api_arg[0], pm_api_arg[1], + pm_api_ret[0]); + break; + case PM_PINCTRL_CONFIG_PARAM_SET: + ret = zynqmp_pm_pinctrl_set_config(pm_api_arg[0], + pm_api_arg[1], + pm_api_arg[2]); + break; + case PM_IOCTL: + ret = zynqmp_pm_ioctl(pm_api_arg[0], pm_api_arg[1], + pm_api_arg[2], pm_api_arg[3], + pm_api_arg[4], &pm_api_ret[0]); + if (!ret && (pm_api_arg[1] == IOCTL_GET_RPU_OPER_MODE || + pm_api_arg[1] == IOCTL_GET_PLL_FRAC_MODE || + pm_api_arg[1] == IOCTL_GET_PLL_FRAC_DATA || + pm_api_arg[1] == IOCTL_READ_GGS || + pm_api_arg[1] == IOCTL_READ_PGGS || + pm_api_arg[1] == IOCTL_READ_REG)) + sprintf(debugfs_buf, "IOCTL return value: %u\n", + pm_api_ret[1]); + if (!ret && pm_api_arg[1] == IOCTL_GET_QOS) + sprintf(debugfs_buf, "Default QoS: %u\nCurrent QoS: %u\n", + pm_api_ret[1], pm_api_ret[2]); + break; + case PM_CLOCK_ENABLE: + ret = zynqmp_pm_clock_enable(pm_api_arg[0]); + break; + case PM_CLOCK_DISABLE: + ret = zynqmp_pm_clock_disable(pm_api_arg[0]); + break; + case PM_CLOCK_GETSTATE: + ret = zynqmp_pm_clock_getstate(pm_api_arg[0], &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, "Clock state: %u\n", + pm_api_ret[0]); + break; + case PM_CLOCK_SETDIVIDER: + ret = zynqmp_pm_clock_setdivider(pm_api_arg[0], pm_api_arg[1]); + break; + case PM_CLOCK_GETDIVIDER: + ret = zynqmp_pm_clock_getdivider(pm_api_arg[0], &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, "Divider Value: %d\n", + pm_api_ret[0]); + break; + case PM_CLOCK_SETPARENT: + ret = zynqmp_pm_clock_setparent(pm_api_arg[0], pm_api_arg[1]); + break; + case PM_CLOCK_GETPARENT: + ret = zynqmp_pm_clock_getparent(pm_api_arg[0], &pm_api_ret[0]); + if (!ret) + sprintf(debugfs_buf, + "Clock parent Index: %u\n", pm_api_ret[0]); + break; case PM_QUERY_DATA: qdata.qid = pm_api_arg[0]; qdata.arg1 = pm_api_arg[1]; @@ -150,7 +310,7 @@ static ssize_t zynqmp_pm_debugfs_api_write(struct file *file, char *kern_buff, *tmp_buff; char *pm_api_req; u32 pm_id = 0; - u64 pm_api_arg[4] = {0, 0, 0, 0}; + u64 pm_api_arg[5] = {0, 0, 0, 0, 0}; /* Return values from PM APIs calls */ u32 pm_api_ret[4] = {0, 0, 0, 0}; diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index add8acf66a9c..cdb565c41119 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -3,7 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer * * Copyright (C) 2014-2022 Xilinx, Inc. - * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc. + * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. * * Michal Simek <michal.simek@amd.com> * Davorin Mista <davorin.mista@aggios.com> @@ -46,6 +46,7 @@ static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); static u32 ioctl_features[FEATURE_PAYLOAD_SIZE]; static u32 query_features[FEATURE_PAYLOAD_SIZE]; +static u32 sip_svc_version; static struct platform_device *em_dev; /** @@ -151,6 +152,9 @@ static noinline int do_fw_call_smc(u32 *ret_payload, u32 num_args, ...) ret_payload[1] = upper_32_bits(res.a0); ret_payload[2] = lower_32_bits(res.a1); ret_payload[3] = upper_32_bits(res.a1); + ret_payload[4] = lower_32_bits(res.a2); + ret_payload[5] = upper_32_bits(res.a2); + ret_payload[6] = lower_32_bits(res.a3); } return zynqmp_pm_ret_code((enum pm_ret_status)res.a0); @@ -191,6 +195,9 @@ static noinline int do_fw_call_hvc(u32 *ret_payload, u32 num_args, ...) ret_payload[1] = upper_32_bits(res.a0); ret_payload[2] = lower_32_bits(res.a1); ret_payload[3] = upper_32_bits(res.a1); + ret_payload[4] = lower_32_bits(res.a2); + ret_payload[5] = upper_32_bits(res.a2); + ret_payload[6] = lower_32_bits(res.a3); } return zynqmp_pm_ret_code((enum pm_ret_status)res.a0); @@ -218,11 +225,14 @@ static int __do_feature_check_call(const u32 api_id, u32 *ret_payload) * Feature check of TF-A APIs is done in the TF-A layer and it expects for * MODULE_ID_MASK bits of SMC's arg[0] to be the same as PM_MODULE_ID. */ - if (module_id == TF_A_MODULE_ID) + if (module_id == TF_A_MODULE_ID) { module_id = PM_MODULE_ID; + smc_arg[1] = api_id; + } else { + smc_arg[1] = (api_id & API_ID_MASK); + } smc_arg[0] = PM_SIP_SVC | FIELD_PREP(MODULE_ID_MASK, module_id) | feature_check_api_id; - smc_arg[1] = (api_id & API_ID_MASK); ret = do_fw_call(ret_payload, 2, smc_arg[0], smc_arg[1]); if (ret) @@ -332,6 +342,70 @@ int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id) EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported); /** + * zynqmp_pm_invoke_fw_fn() - Invoke the system-level platform management layer + * caller function depending on the configuration + * @pm_api_id: Requested PM-API call + * @ret_payload: Returned value array + * @num_args: Number of arguments to requested PM-API call + * + * Invoke platform management function for SMC or HVC call, depending on + * configuration. + * Following SMC Calling Convention (SMCCC) for SMC64: + * Pm Function Identifier, + * PM_SIP_SVC + PASS_THROUGH_FW_CMD_ID = + * ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) + * ((SMC_64) << FUNCID_CC_SHIFT) + * ((SIP_START) << FUNCID_OEN_SHIFT) + * (PASS_THROUGH_FW_CMD_ID)) + * + * PM_SIP_SVC - Registered ZynqMP SIP Service Call. + * PASS_THROUGH_FW_CMD_ID - Fixed SiP SVC call ID for FW specific calls. + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...) +{ + /* + * Added SIP service call Function Identifier + * Make sure to stay in x0 register + */ + u64 smc_arg[SMC_ARG_CNT_64]; + int ret, i; + va_list arg_list; + u32 args[SMC_ARG_CNT_32] = {0}; + u32 module_id; + + if (num_args > SMC_ARG_CNT_32) + return -EINVAL; + + va_start(arg_list, num_args); + + /* Check if feature is supported or not */ + ret = zynqmp_pm_feature(pm_api_id); + if (ret < 0) + return ret; + + for (i = 0; i < num_args; i++) + args[i] = va_arg(arg_list, u32); + + va_end(arg_list); + + module_id = FIELD_GET(PLM_MODULE_ID_MASK, pm_api_id); + + if (module_id == 0) + module_id = XPM_MODULE_ID; + + smc_arg[0] = PM_SIP_SVC | PASS_THROUGH_FW_CMD_ID; + smc_arg[1] = ((u64)args[0] << 32U) | FIELD_PREP(PLM_MODULE_ID_MASK, module_id) | + (pm_api_id & API_ID_MASK); + for (i = 1; i < (SMC_ARG_CNT_64 - 1); i++) + smc_arg[i + 1] = ((u64)args[(i * 2)] << 32U) | args[(i * 2) - 1]; + + return do_fw_call(ret_payload, 8, smc_arg[0], smc_arg[1], smc_arg[2], smc_arg[3], + smc_arg[4], smc_arg[5], smc_arg[6], smc_arg[7]); +} + +/** * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer * caller function depending on the configuration * @pm_api_id: Requested PM-API call @@ -489,6 +563,35 @@ int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) EXPORT_SYMBOL_GPL(zynqmp_pm_get_family_info); /** + * zynqmp_pm_get_sip_svc_version() - Get SiP service call version + * @version: Returned version value + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_get_sip_svc_version(u32 *version) +{ + struct arm_smccc_res res; + u64 args[SMC_ARG_CNT_64] = {0}; + + if (!version) + return -EINVAL; + + /* Check if SiP SVC version already verified */ + if (sip_svc_version > 0) { + *version = sip_svc_version; + return 0; + } + + args[0] = GET_SIP_SVC_VERSION; + + arm_smccc_smc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res); + + *version = ((lower_32_bits(res.a0) << 16U) | lower_32_bits(res.a1)); + + return zynqmp_pm_ret_code(XST_PM_SUCCESS); +} + +/** * zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version * @version: Returned version value * @@ -552,10 +655,34 @@ static int get_set_conduit_method(struct device_node *np) */ int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out) { - int ret; + int ret, i = 0; + u32 ret_payload[PAYLOAD_ARG_CNT] = {0}; + + if (sip_svc_version >= SIP_SVC_PASSTHROUGH_VERSION) { + ret = zynqmp_pm_invoke_fw_fn(PM_QUERY_DATA, ret_payload, 4, + qdata.qid, qdata.arg1, + qdata.arg2, qdata.arg3); + /* To support backward compatibility */ + if (!ret && !ret_payload[0]) { + /* + * TF-A passes return status on 0th index but + * api to get clock name reads data from 0th + * index so pass data at 0th index instead of + * return status + */ + if (qdata.qid == PM_QID_CLOCK_GET_NAME || + qdata.qid == PM_QID_PINCTRL_GET_FUNCTION_NAME) + i = 1; + + for (; i < PAYLOAD_ARG_CNT; i++, out++) + *out = ret_payload[i]; + + return ret; + } + } - ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, qdata.arg1, qdata.arg2, - qdata.arg3); + ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, + qdata.arg1, qdata.arg2, qdata.arg3); /* * For clock name query, all bytes in SMC response are clock name @@ -920,7 +1047,7 @@ int zynqmp_pm_set_boot_health_status(u32 value) * * Return: Returns status, either success or error+reason */ -int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, +int zynqmp_pm_reset_assert(const u32 reset, const enum zynqmp_pm_reset_action assert_flag) { return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, NULL, 2, reset, assert_flag); @@ -934,7 +1061,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert); * * Return: Returns status, either success or error+reason */ -int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status) +int zynqmp_pm_reset_get_status(const u32 reset, u32 *status) { u32 ret_payload[PAYLOAD_ARG_CNT]; int ret; @@ -1118,8 +1245,11 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, if (pm_family_code == ZYNQMP_FAMILY_CODE && param == PM_PINCTRL_CONFIG_TRI_STATE) { ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET); - if (ret < PM_PINCTRL_PARAM_SET_VERSION) + if (ret < PM_PINCTRL_PARAM_SET_VERSION) { + pr_warn("The requested pinctrl feature is not supported in the current firmware.\n" + "Expected firmware version is 2023.1 and above for this feature to work.\r\n"); return -EOPNOTSUPP; + } } return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, NULL, 3, pin, param, value); @@ -1887,6 +2017,11 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) if (ret) return ret; + /* Get SiP SVC version number */ + ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version); + if (ret) + return ret; + ret = do_feature_check_call(PM_FEATURE_CHECK); if (ret >= 0 && ((ret & FIRMWARE_VERSION_MASK) >= PM_API_VERSION_1)) feature_check_enabled = true; |