diff options
Diffstat (limited to 'drivers')
381 files changed, 20753 insertions, 16859 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 4e16c71c24b7..9561e3d2d428 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -72,6 +72,17 @@ menuconfig DMABUF_HEAPS allows userspace to allocate dma-bufs that can be shared between drivers. +menuconfig DMABUF_SYSFS_STATS + bool "DMA-BUF sysfs statistics" + select DMA_SHARED_BUFFER + help + Choose this option to enable DMA-BUF sysfs statistics + in location /sys/kernel/dmabuf/buffers. + + /sys/kernel/dmabuf/buffers/<inode_number> will contain + statistics for the DMA-BUF with the unique inode number + <inode_number>. + source "drivers/dma-buf/heaps/Kconfig" endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 995e05f609ff..40d81f23cacf 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DMABUF_HEAPS) += heaps/ obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o +obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o dmabuf_selftests-y := \ selftest.o \ diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c new file mode 100644 index 000000000000..053baadcada9 --- /dev/null +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * DMA-BUF sysfs statistics. + * + * Copyright (C) 2021 Google LLC. + */ + +#include <linux/dma-buf.h> +#include <linux/dma-resv.h> +#include <linux/kobject.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/sysfs.h> + +#include "dma-buf-sysfs-stats.h" + +#define to_dma_buf_entry_from_kobj(x) container_of(x, struct dma_buf_sysfs_entry, kobj) + +/** + * DOC: overview + * + * ``/sys/kernel/debug/dma_buf/bufinfo`` provides an overview of every DMA-BUF + * in the system. However, since debugfs is not safe to be mounted in + * production, procfs and sysfs can be used to gather DMA-BUF statistics on + * production systems. + * + * The ``/proc/<pid>/fdinfo/<fd>`` files in procfs can be used to gather + * information about DMA-BUF fds. Detailed documentation about the interface + * is present in Documentation/filesystems/proc.rst. + * + * Unfortunately, the existing procfs interfaces can only provide information + * about the DMA-BUFs for which processes hold fds or have the buffers mmapped + * into their address space. This necessitated the creation of the DMA-BUF sysfs + * statistics interface to provide per-buffer information on production systems. + * + * The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about + * every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled. + * + * The following stats are exposed by the interface: + * + * * ``/sys/kernel/dmabuf/buffers/<inode_number>/exporter_name`` + * * ``/sys/kernel/dmabuf/buffers/<inode_number>/size`` + * + * The information in the interface can also be used to derive per-exporter + * statistics. The data from the interface can be gathered on error conditions + * or other important events to provide a snapshot of DMA-BUF usage. + * It can also be collected periodically by telemetry to monitor various metrics. + * + * Detailed documentation about the interface is present in + * Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers. + */ + +struct dma_buf_stats_attribute { + struct attribute attr; + ssize_t (*show)(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, char *buf); +}; +#define to_dma_buf_stats_attr(x) container_of(x, struct dma_buf_stats_attribute, attr) + +static ssize_t dma_buf_stats_attribute_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct dma_buf_stats_attribute *attribute; + struct dma_buf_sysfs_entry *sysfs_entry; + struct dma_buf *dmabuf; + + attribute = to_dma_buf_stats_attr(attr); + sysfs_entry = to_dma_buf_entry_from_kobj(kobj); + dmabuf = sysfs_entry->dmabuf; + + if (!dmabuf || !attribute->show) + return -EIO; + + return attribute->show(dmabuf, attribute, buf); +} + +static const struct sysfs_ops dma_buf_stats_sysfs_ops = { + .show = dma_buf_stats_attribute_show, +}; + +static ssize_t exporter_name_show(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", dmabuf->exp_name); +} + +static ssize_t size_show(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%zu\n", dmabuf->size); +} + +static struct dma_buf_stats_attribute exporter_name_attribute = + __ATTR_RO(exporter_name); +static struct dma_buf_stats_attribute size_attribute = __ATTR_RO(size); + +static struct attribute *dma_buf_stats_default_attrs[] = { + &exporter_name_attribute.attr, + &size_attribute.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dma_buf_stats_default); + +static void dma_buf_sysfs_release(struct kobject *kobj) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + + sysfs_entry = to_dma_buf_entry_from_kobj(kobj); + kfree(sysfs_entry); +} + +static struct kobj_type dma_buf_ktype = { + .sysfs_ops = &dma_buf_stats_sysfs_ops, + .release = dma_buf_sysfs_release, + .default_groups = dma_buf_stats_default_groups, +}; + +void dma_buf_stats_teardown(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + + sysfs_entry = dmabuf->sysfs_entry; + if (!sysfs_entry) + return; + + kobject_del(&sysfs_entry->kobj); + kobject_put(&sysfs_entry->kobj); +} + + +/* Statistics files do not need to send uevents. */ +static int dmabuf_sysfs_uevent_filter(struct kset *kset, struct kobject *kobj) +{ + return 0; +} + +static const struct kset_uevent_ops dmabuf_sysfs_no_uevent_ops = { + .filter = dmabuf_sysfs_uevent_filter, +}; + +static struct kset *dma_buf_stats_kset; +static struct kset *dma_buf_per_buffer_stats_kset; +int dma_buf_init_sysfs_statistics(void) +{ + dma_buf_stats_kset = kset_create_and_add("dmabuf", + &dmabuf_sysfs_no_uevent_ops, + kernel_kobj); + if (!dma_buf_stats_kset) + return -ENOMEM; + + dma_buf_per_buffer_stats_kset = kset_create_and_add("buffers", + &dmabuf_sysfs_no_uevent_ops, + &dma_buf_stats_kset->kobj); + if (!dma_buf_per_buffer_stats_kset) { + kset_unregister(dma_buf_stats_kset); + return -ENOMEM; + } + + return 0; +} + +void dma_buf_uninit_sysfs_statistics(void) +{ + kset_unregister(dma_buf_per_buffer_stats_kset); + kset_unregister(dma_buf_stats_kset); +} + +int dma_buf_stats_setup(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + int ret; + + if (!dmabuf || !dmabuf->file) + return -EINVAL; + + if (!dmabuf->exp_name) { + pr_err("exporter name must not be empty if stats needed\n"); + return -EINVAL; + } + + sysfs_entry = kzalloc(sizeof(struct dma_buf_sysfs_entry), GFP_KERNEL); + if (!sysfs_entry) + return -ENOMEM; + + sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset; + sysfs_entry->dmabuf = dmabuf; + + dmabuf->sysfs_entry = sysfs_entry; + + /* create the directory for buffer stats */ + ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL, + "%lu", file_inode(dmabuf->file)->i_ino); + if (ret) + goto err_sysfs_dmabuf; + + return 0; + +err_sysfs_dmabuf: + kobject_put(&sysfs_entry->kobj); + dmabuf->sysfs_entry = NULL; + return ret; +} diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h new file mode 100644 index 000000000000..a49c6e2650cc --- /dev/null +++ b/drivers/dma-buf/dma-buf-sysfs-stats.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DMA-BUF sysfs statistics. + * + * Copyright (C) 2021 Google LLC. + */ + +#ifndef _DMA_BUF_SYSFS_STATS_H +#define _DMA_BUF_SYSFS_STATS_H + +#ifdef CONFIG_DMABUF_SYSFS_STATS + +int dma_buf_init_sysfs_statistics(void); +void dma_buf_uninit_sysfs_statistics(void); + +int dma_buf_stats_setup(struct dma_buf *dmabuf); + +void dma_buf_stats_teardown(struct dma_buf *dmabuf); +#else + +static inline int dma_buf_init_sysfs_statistics(void) +{ + return 0; +} + +static inline void dma_buf_uninit_sysfs_statistics(void) {} + +static inline int dma_buf_stats_setup(struct dma_buf *dmabuf) +{ + return 0; +} + +static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {} +#endif +#endif // _DMA_BUF_SYSFS_STATS_H diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 511fe0d217a0..63d32261b63f 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -29,6 +29,8 @@ #include <uapi/linux/dma-buf.h> #include <uapi/linux/magic.h> +#include "dma-buf-sysfs-stats.h" + static inline int is_dma_buf_file(struct file *); struct dma_buf_list { @@ -74,6 +76,7 @@ static void dma_buf_release(struct dentry *dentry) */ BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); + dma_buf_stats_teardown(dmabuf); dmabuf->ops->release(dmabuf); if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) @@ -580,6 +583,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) file->f_mode |= FMODE_LSEEK; dmabuf->file = file; + ret = dma_buf_stats_setup(dmabuf); + if (ret) + goto err_sysfs; + mutex_init(&dmabuf->lock); INIT_LIST_HEAD(&dmabuf->attachments); @@ -589,6 +596,14 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) return dmabuf; +err_sysfs: + /* + * Set file->f_path.dentry->d_fsdata to NULL so that when + * dma_buf_release() gets invoked by dentry_ops, it exits + * early before calling the release() dma_buf op. + */ + file->f_path.dentry->d_fsdata = NULL; + fput(file); err_dmabuf: kfree(dmabuf); err_module: @@ -926,6 +941,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unpin); * the underlying backing storage is pinned for as long as a mapping exists, * therefore users/importers should not hold onto a mapping for undue amounts of * time. + * + * Important: Dynamic importers must wait for the exclusive fence of the struct + * dma_resv attached to the DMA-BUF first. */ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) @@ -992,7 +1010,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } } #endif /* CONFIG_DMA_API_DEBUG */ - return sg_table; } EXPORT_SYMBOL_GPL(dma_buf_map_attachment); @@ -1469,6 +1486,12 @@ static inline void dma_buf_uninit_debugfs(void) static int __init dma_buf_init(void) { + int ret; + + ret = dma_buf_init_sysfs_statistics(); + if (ret) + return ret; + dma_buf_mnt = kern_mount(&dma_buf_fs_type); if (IS_ERR(dma_buf_mnt)) return PTR_ERR(dma_buf_mnt); @@ -1484,5 +1507,6 @@ static void __exit dma_buf_deinit(void) { dma_buf_uninit_debugfs(); kern_unmount(dma_buf_mnt); + dma_buf_uninit_sysfs_statistics(); } __exitcall(dma_buf_deinit); diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 7d129e68ac70..1b4cb3e5cec9 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -137,6 +137,7 @@ static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) struct dma_fence_chain *chain; chain = container_of(cb, typeof(*chain), cb); + init_irq_work(&chain->work, dma_fence_chain_irq_work); irq_work_queue(&chain->work); dma_fence_put(f); } @@ -239,7 +240,6 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, rcu_assign_pointer(chain->prev, prev); chain->fence = fence; chain->prev_seqno = 0; - init_irq_work(&chain->work, dma_fence_chain_irq_work); /* Try to reuse the context of the previous chain node. */ if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index f26c71747d43..e744fd87c63c 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -615,25 +615,21 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) */ bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) { - unsigned int seq, shared_count; + struct dma_fence *fence; + unsigned int seq; int ret; rcu_read_lock(); retry: ret = true; - shared_count = 0; seq = read_seqcount_begin(&obj->seq); if (test_all) { struct dma_resv_list *fobj = dma_resv_shared_list(obj); - unsigned int i; - - if (fobj) - shared_count = fobj->shared_count; + unsigned int i, shared_count; + shared_count = fobj ? fobj->shared_count : 0; for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence; - fence = rcu_dereference(fobj->shared[i]); ret = dma_resv_test_signaled_single(fence); if (ret < 0) @@ -641,24 +637,19 @@ retry: else if (!ret) break; } - - if (read_seqcount_retry(&obj->seq, seq)) - goto retry; } - if (!shared_count) { - struct dma_fence *fence_excl = dma_resv_excl_fence(obj); - - if (fence_excl) { - ret = dma_resv_test_signaled_single(fence_excl); - if (ret < 0) - goto retry; + fence = dma_resv_excl_fence(obj); + if (ret && fence) { + ret = dma_resv_test_signaled_single(fence); + if (ret < 0) + goto retry; - if (read_seqcount_retry(&obj->seq, seq)) - goto retry; - } } + if (read_seqcount_retry(&obj->seq, seq)) + goto retry; + rcu_read_unlock(); return ret; } diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c index 9525f7f56119..8ce1ea59d31b 100644 --- a/drivers/dma-buf/st-dma-fence-chain.c +++ b/drivers/dma-buf/st-dma-fence-chain.c @@ -58,28 +58,20 @@ static struct dma_fence *mock_fence(void) return &f->base; } -static inline struct mock_chain { - struct dma_fence_chain base; -} *to_mock_chain(struct dma_fence *f) { - return container_of(f, struct mock_chain, base.base); -} - static struct dma_fence *mock_chain(struct dma_fence *prev, struct dma_fence *fence, u64 seqno) { - struct mock_chain *f; + struct dma_fence_chain *f; - f = kmalloc(sizeof(*f), GFP_KERNEL); + f = dma_fence_chain_alloc(); if (!f) return NULL; - dma_fence_chain_init(&f->base, - dma_fence_get(prev), - dma_fence_get(fence), + dma_fence_chain_init(f, dma_fence_get(prev), dma_fence_get(fence), seqno); - return &f->base.base; + return &f->base; } static int sanitycheck(void *arg) diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index db732f71e59a..8df761a10251 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -11,9 +11,15 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/udmabuf.h> +#include <linux/hugetlb.h> -static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */ -static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ +static int list_limit = 1024; +module_param(list_limit, int, 0644); +MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024."); + +static int size_limit_mb = 64; +module_param(size_limit_mb, int, 0644); +MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64."); struct udmabuf { pgoff_t pagecount; @@ -160,10 +166,13 @@ static long udmabuf_create(struct miscdevice *device, { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct file *memfd = NULL; + struct address_space *mapping = NULL; struct udmabuf *ubuf; struct dma_buf *buf; pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page; + struct page *page, *hpage = NULL; + pgoff_t subpgoff, maxsubpgs; + struct hstate *hpstate; int seals, ret = -EINVAL; u32 i, flags; @@ -194,7 +203,8 @@ static long udmabuf_create(struct miscdevice *device, memfd = fget(list[i].memfd); if (!memfd) goto err; - if (!shmem_mapping(file_inode(memfd)->i_mapping)) + mapping = file_inode(memfd)->i_mapping; + if (!shmem_mapping(mapping) && !is_file_hugepages(memfd)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -205,17 +215,48 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; + if (is_file_hugepages(memfd)) { + hpstate = hstate_file(memfd); + pgoff = list[i].offset >> huge_page_shift(hpstate); + subpgoff = (list[i].offset & + ~huge_page_mask(hpstate)) >> PAGE_SHIFT; + maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; + } for (pgidx = 0; pgidx < pgcnt; pgidx++) { - page = shmem_read_mapping_page( - file_inode(memfd)->i_mapping, pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err; + if (is_file_hugepages(memfd)) { + if (!hpage) { + hpage = find_get_page_flags(mapping, pgoff, + FGP_ACCESSED); + if (IS_ERR(hpage)) { + ret = PTR_ERR(hpage); + goto err; + } + } + page = hpage + subpgoff; + get_page(page); + subpgoff++; + if (subpgoff == maxsubpgs) { + put_page(hpage); + hpage = NULL; + subpgoff = 0; + pgoff++; + } + } else { + page = shmem_read_mapping_page(mapping, + pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; + } } ubuf->pages[pgbuf++] = page; } fput(memfd); memfd = NULL; + if (hpage) { + put_page(hpage); + hpage = NULL; + } } exp_info.ops = &udmabuf_ops; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 1db738d5b301..5d3fd803d2bb 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -251,6 +251,38 @@ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT Say Y here to enable "download mode" by default. +config SYSFB + bool + default y + depends on X86 || EFI + +config SYSFB_SIMPLEFB + bool "Mark VGA/VBE/EFI FB as generic system framebuffer" + depends on SYSFB + help + Firmwares often provide initial graphics framebuffers so the BIOS, + bootloader or kernel can show basic video-output during boot for + user-guidance and debugging. Historically, x86 used the VESA BIOS + Extensions and EFI-framebuffers for this, which are mostly limited + to x86 BIOS or EFI systems. + This option, if enabled, marks VGA/VBE/EFI framebuffers as generic + framebuffers so the new generic system-framebuffer drivers can be + used instead. If the framebuffer is not compatible with the generic + modes, it is advertised as fallback platform framebuffer so legacy + drivers like efifb, vesafb and uvesafb can pick it up. + If this option is not selected, all system framebuffers are always + marked as fallback platform framebuffers as usual. + + Note: Legacy fbdev drivers, including vesafb, efifb, uvesafb, will + not be able to pick up generic system framebuffers if this option + is selected. You are highly encouraged to enable simplefb as + replacement if you select this option. simplefb can correctly deal + with generic system framebuffers. But you should still keep vesafb + and others enabled as fallback if a system framebuffer is + incompatible with simplefb. + + If unsure, say Y. + config TI_SCI_PROTOCOL tristate "TI System Control Interface (TISCI) Message Protocol" depends on TI_MESSAGE_MANAGER diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 546ac8e7f6d0..705fabe88156 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -18,6 +18,8 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o +obj-$(CONFIG_SYSFB) += sysfb.o +obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 467e94259679..c02ff25dd477 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -36,6 +36,8 @@ obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o fake_map-y += fake_mem.o fake_map-$(CONFIG_X86) += x86_fake_mem.o +obj-$(CONFIG_SYSFB) += sysfb_efi.o + arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o obj-$(CONFIG_ARM) += $(arm-obj-y) obj-$(CONFIG_ARM64) += $(arm-obj-y) diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c index a552a08a1741..b19ce1a83f91 100644 --- a/drivers/firmware/efi/efi-init.c +++ b/drivers/firmware/efi/efi-init.c @@ -275,93 +275,3 @@ void __init efi_init(void) } #endif } - -static bool efifb_overlaps_pci_range(const struct of_pci_range *range) -{ - u64 fb_base = screen_info.lfb_base; - - if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) - fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32; - - return fb_base >= range->cpu_addr && - fb_base < (range->cpu_addr + range->size); -} - -static struct device_node *find_pci_overlap_node(void) -{ - struct device_node *np; - - for_each_node_by_type(np, "pci") { - struct of_pci_range_parser parser; - struct of_pci_range range; - int err; - - err = of_pci_range_parser_init(&parser, np); - if (err) { - pr_warn("of_pci_range_parser_init() failed: %d\n", err); - continue; - } - - for_each_of_pci_range(&parser, &range) - if (efifb_overlaps_pci_range(&range)) - return np; - } - return NULL; -} - -/* - * If the efifb framebuffer is backed by a PCI graphics controller, we have - * to ensure that this relation is expressed using a device link when - * running in DT mode, or the probe order may be reversed, resulting in a - * resource reservation conflict on the memory window that the efifb - * framebuffer steals from the PCIe host bridge. - */ -static int efifb_add_links(struct fwnode_handle *fwnode) -{ - struct device_node *sup_np; - - sup_np = find_pci_overlap_node(); - - /* - * If there's no PCI graphics controller backing the efifb, we are - * done here. - */ - if (!sup_np) - return 0; - - fwnode_link_add(fwnode, of_fwnode_handle(sup_np)); - of_node_put(sup_np); - - return 0; -} - -static const struct fwnode_operations efifb_fwnode_ops = { - .add_links = efifb_add_links, -}; - -static struct fwnode_handle efifb_fwnode; - -static int __init register_gop_device(void) -{ - struct platform_device *pd; - int err; - - if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) - return 0; - - pd = platform_device_alloc("efi-framebuffer", 0); - if (!pd) - return -ENOMEM; - - if (IS_ENABLED(CONFIG_PCI)) { - fwnode_init(&efifb_fwnode, &efifb_fwnode_ops); - pd->dev.fwnode = &efifb_fwnode; - } - - err = platform_device_add_data(pd, &screen_info, sizeof(screen_info)); - if (err) - return err; - - return platform_device_add(pd); -} -subsys_initcall(register_gop_device); diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c new file mode 100644 index 000000000000..4c7c9dd7733f --- /dev/null +++ b/drivers/firmware/efi/sysfb_efi.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Generic System Framebuffers + * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com> + * + * EFI Quirks Copyright (c) 2006 Edgar Hucek <gimli@dark-green.com> + */ + +/* + * EFI Quirks + * Several EFI systems do not correctly advertise their boot framebuffers. + * Hence, we use this static table of known broken machines and fix up the + * information so framebuffer drivers can load correctly. + */ + +#include <linux/dmi.h> +#include <linux/err.h> +#include <linux/efi.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/of_address.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/screen_info.h> +#include <linux/sysfb.h> +#include <video/vga.h> + +#include <asm/efi.h> + +enum { + OVERRIDE_NONE = 0x0, + OVERRIDE_BASE = 0x1, + OVERRIDE_STRIDE = 0x2, + OVERRIDE_HEIGHT = 0x4, + OVERRIDE_WIDTH = 0x8, +}; + +struct efifb_dmi_info efifb_dmi_list[] = { + [M_I17] = { "i17", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE }, + [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE }, /* guess */ + [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE }, + [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, /* guess */ + [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, + [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080, OVERRIDE_NONE }, + [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440, OVERRIDE_NONE }, + [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768, OVERRIDE_NONE }, + [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768, OVERRIDE_NONE }, + [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, + [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE }, + [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE }, + [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE }, + [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE }, + [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE }, + /* 11" Macbook Air 3,1 passes the wrong stride */ + [M_MBA_3] = { "mba3", 0, 2048 * 4, 0, 0, OVERRIDE_STRIDE }, + [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE }, + [M_MBP_2] = { "mbp2", 0, 0, 0, 0, OVERRIDE_NONE }, /* placeholder */ + [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE }, + [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900, OVERRIDE_NONE }, + [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, + [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE }, + [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, + [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE }, + [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, + [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050, OVERRIDE_NONE }, + [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800, OVERRIDE_NONE }, + [M_MBP_8_2] = { "mbp82", 0x90010000, 1472 * 4, 1440, 900, OVERRIDE_NONE }, + [M_UNKNOWN] = { NULL, 0, 0, 0, 0, OVERRIDE_NONE } +}; + +void efifb_setup_from_dmi(struct screen_info *si, const char *opt) +{ + int i; + + for (i = 0; i < M_UNKNOWN; i++) { + if (efifb_dmi_list[i].base != 0 && + !strcmp(opt, efifb_dmi_list[i].optname)) { + si->lfb_base = efifb_dmi_list[i].base; + si->lfb_linelength = efifb_dmi_list[i].stride; + si->lfb_width = efifb_dmi_list[i].width; + si->lfb_height = efifb_dmi_list[i].height; + } + } +} + +#define choose_value(dmivalue, fwvalue, field, flags) ({ \ + typeof(fwvalue) _ret_ = fwvalue; \ + if ((flags) & (field)) \ + _ret_ = dmivalue; \ + else if ((fwvalue) == 0) \ + _ret_ = dmivalue; \ + _ret_; \ + }) + +static int __init efifb_set_system(const struct dmi_system_id *id) +{ + struct efifb_dmi_info *info = id->driver_data; + + if (info->base == 0 && info->height == 0 && info->width == 0 && + info->stride == 0) + return 0; + + /* Trust the bootloader over the DMI tables */ + if (screen_info.lfb_base == 0) { +#if defined(CONFIG_PCI) + struct pci_dev *dev = NULL; + int found_bar = 0; +#endif + if (info->base) { + screen_info.lfb_base = choose_value(info->base, + screen_info.lfb_base, OVERRIDE_BASE, + info->flags); + +#if defined(CONFIG_PCI) + /* make sure that the address in the table is actually + * on a VGA device's PCI BAR */ + + for_each_pci_dev(dev) { + int i; + if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) + continue; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + resource_size_t start, end; + unsigned long flags; + + flags = pci_resource_flags(dev, i); + if (!(flags & IORESOURCE_MEM)) + continue; + + if (flags & IORESOURCE_UNSET) + continue; + + if (pci_resource_len(dev, i) == 0) + continue; + + start = pci_resource_start(dev, i); + end = pci_resource_end(dev, i); + if (screen_info.lfb_base >= start && + screen_info.lfb_base < end) { + found_bar = 1; + break; + } + } + } + if (!found_bar) + screen_info.lfb_base = 0; +#endif + } + } + if (screen_info.lfb_base) { + screen_info.lfb_linelength = choose_value(info->stride, + screen_info.lfb_linelength, OVERRIDE_STRIDE, + info->flags); + screen_info.lfb_width = choose_value(info->width, + screen_info.lfb_width, OVERRIDE_WIDTH, + info->flags); + screen_info.lfb_height = choose_value(info->height, + screen_info.lfb_height, OVERRIDE_HEIGHT, + info->flags); + if (screen_info.orig_video_isVGA == 0) + screen_info.orig_video_isVGA = VIDEO_TYPE_EFI; + } else { + screen_info.lfb_linelength = 0; + screen_info.lfb_width = 0; + screen_info.lfb_height = 0; + screen_info.orig_video_isVGA = 0; + return 0; + } + + printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x " + "(%dx%d, stride %d)\n", id->ident, + screen_info.lfb_base, screen_info.lfb_width, + screen_info.lfb_height, screen_info.lfb_linelength); + + return 1; +} + +#define EFIFB_DMI_SYSTEM_ID(vendor, name, enumid) \ + { \ + efifb_set_system, \ + name, \ + { \ + DMI_MATCH(DMI_BIOS_VENDOR, vendor), \ + DMI_MATCH(DMI_PRODUCT_NAME, name) \ + }, \ + &efifb_dmi_list[enumid] \ + } + +static const struct dmi_system_id efifb_dmi_system_table[] __initconst = { + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac4,1", M_I17), + /* At least one of these two will be right; maybe both? */ + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac5,1", M_I20), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac5,1", M_I20), + /* At least one of these two will be right; maybe both? */ + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1), + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1), + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB), + /* At least one of these two will be right; maybe both? */ + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook2,1", M_MB), + /* At least one of these two will be right; maybe both? */ + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir3,1", M_MBA_3), + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP), + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2), + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2), + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro8,2", M_MBP_8_2), + {}, +}; + +/* + * Some devices have a portrait LCD but advertise a landscape resolution (and + * pitch). We simply swap width and height for these devices so that we can + * correctly deal with some of them coming with multiple resolutions. + */ +static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = { + { + /* + * Lenovo MIIX310-10ICR, only some batches have the troublesome + * 800x1280 portrait screen. Luckily the portrait version has + * its own BIOS version, so we match on that. + */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"), + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"), + }, + }, + { + /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, + "Lenovo MIIX 320-10ICR"), + }, + }, + { + /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, + "Lenovo ideapad D330-10IGM"), + }, + }, + {}, +}; + +static bool efifb_overlaps_pci_range(const struct of_pci_range *range) +{ + u64 fb_base = screen_info.lfb_base; + + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) + fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32; + + return fb_base >= range->cpu_addr && + fb_base < (range->cpu_addr + range->size); +} + +static struct device_node *find_pci_overlap_node(void) +{ + struct device_node *np; + + for_each_node_by_type(np, "pci") { + struct of_pci_range_parser parser; + struct of_pci_range range; + int err; + + err = of_pci_range_parser_init(&parser, np); + if (err) { + pr_warn("of_pci_range_parser_init() failed: %d\n", err); + continue; + } + + for_each_of_pci_range(&parser, &range) + if (efifb_overlaps_pci_range(&range)) + return np; + } + return NULL; +} + +/* + * If the efifb framebuffer is backed by a PCI graphics controller, we have + * to ensure that this relation is expressed using a device link when + * running in DT mode, or the probe order may be reversed, resulting in a + * resource reservation conflict on the memory window that the efifb + * framebuffer steals from the PCIe host bridge. + */ +static int efifb_add_links(struct fwnode_handle *fwnode) +{ + struct device_node *sup_np; + + sup_np = find_pci_overlap_node(); + + /* + * If there's no PCI graphics controller backing the efifb, we are + * done here. + */ + if (!sup_np) + return 0; + + fwnode_link_add(fwnode, of_fwnode_handle(sup_np)); + of_node_put(sup_np); + + return 0; +} + +static const struct fwnode_operations efifb_fwnode_ops = { + .add_links = efifb_add_links, +}; + +#ifdef CONFIG_EFI +static struct fwnode_handle efifb_fwnode; + +__init void sysfb_apply_efi_quirks(struct platform_device *pd) +{ + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || + !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) + dmi_check_system(efifb_dmi_system_table); + + if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && + dmi_check_system(efifb_dmi_swap_width_height)) { + u16 temp = screen_info.lfb_width; + + screen_info.lfb_width = screen_info.lfb_height; + screen_info.lfb_height = temp; + screen_info.lfb_linelength = 4 * screen_info.lfb_width; + } + + if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) { + fwnode_init(&efifb_fwnode, &efifb_fwnode_ops); + pd->dev.fwnode = &efifb_fwnode; + } +} +#endif diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c new file mode 100644 index 000000000000..2bfbb05f7d89 --- /dev/null +++ b/drivers/firmware/sysfb.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Generic System Framebuffers + * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com> + */ + +/* + * Simple-Framebuffer support + * Create a platform-device for any available boot framebuffer. The + * simple-framebuffer platform device is already available on DT systems, so + * this module parses the global "screen_info" object and creates a suitable + * platform device compatible with the "simple-framebuffer" DT object. If + * the framebuffer is incompatible, we instead create a legacy + * "vesa-framebuffer", "efi-framebuffer" or "platform-framebuffer" device and + * pass the screen_info as platform_data. This allows legacy drivers + * to pick these devices up without messing with simple-framebuffer drivers. + * The global "screen_info" is still valid at all times. + * + * If CONFIG_SYSFB_SIMPLEFB is not selected, never register "simple-framebuffer" + * platform devices, but only use legacy framebuffer devices for + * backwards compatibility. + * + * TODO: We set the dev_id field of all platform-devices to 0. This allows + * other OF/DT parsers to create such devices, too. However, they must + * start at offset 1 for this to work. + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/platform_data/simplefb.h> +#include <linux/platform_device.h> +#include <linux/screen_info.h> +#include <linux/sysfb.h> + +static __init int sysfb_init(void) +{ + struct screen_info *si = &screen_info; + struct simplefb_platform_data mode; + struct platform_device *pd; + const char *name; + bool compatible; + int ret; + + /* try to create a simple-framebuffer device */ + compatible = sysfb_parse_mode(si, &mode); + if (compatible) { + ret = sysfb_create_simplefb(si, &mode); + if (!ret) + return 0; + } + + /* if the FB is incompatible, create a legacy framebuffer device */ + if (si->orig_video_isVGA == VIDEO_TYPE_EFI) + name = "efi-framebuffer"; + else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) + name = "vesa-framebuffer"; + else + name = "platform-framebuffer"; + + pd = platform_device_alloc(name, 0); + if (!pd) + return -ENOMEM; + + sysfb_apply_efi_quirks(pd); + + ret = platform_device_add_data(pd, si, sizeof(*si)); + if (ret) + goto err; + + ret = platform_device_add(pd); + if (ret) + goto err; + + return 0; +err: + platform_device_put(pd); + return ret; +} + +/* must execute after PCI subsystem for EFI quirks */ +device_initcall(sysfb_init); diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c new file mode 100644 index 000000000000..b86761904949 --- /dev/null +++ b/drivers/firmware/sysfb_simplefb.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Generic System Framebuffers + * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com> + */ + +/* + * simple-framebuffer probing + * Try to convert "screen_info" into a "simple-framebuffer" compatible mode. + * If the mode is incompatible, we return "false" and let the caller create + * legacy nodes instead. + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/platform_data/simplefb.h> +#include <linux/platform_device.h> +#include <linux/screen_info.h> +#include <linux/sysfb.h> + +static const char simplefb_resname[] = "BOOTFB"; +static const struct simplefb_format formats[] = SIMPLEFB_FORMATS; + +/* try parsing screen_info into a simple-framebuffer mode struct */ +__init bool sysfb_parse_mode(const struct screen_info *si, + struct simplefb_platform_data *mode) +{ + const struct simplefb_format *f; + __u8 type; + unsigned int i; + + type = si->orig_video_isVGA; + if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI) + return false; + + for (i = 0; i < ARRAY_SIZE(formats); ++i) { + f = &formats[i]; + if (si->lfb_depth == f->bits_per_pixel && + si->red_size == f->red.length && + si->red_pos == f->red.offset && + si->green_size == f->green.length && + si->green_pos == f->green.offset && + si->blue_size == f->blue.length && + si->blue_pos == f->blue.offset && + si->rsvd_size == f->transp.length && + si->rsvd_pos == f->transp.offset) { + mode->format = f->name; + mode->width = si->lfb_width; + mode->height = si->lfb_height; + mode->stride = si->lfb_linelength; + return true; + } + } + + return false; +} + +__init int sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode) +{ + struct platform_device *pd; + struct resource res; + u64 base, size; + u32 length; + int ret; + + /* + * If the 64BIT_BASE capability is set, ext_lfb_base will contain the + * upper half of the base address. Assemble the address, then make sure + * it is valid and we can actually access it. + */ + base = si->lfb_base; + if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE) + base |= (u64)si->ext_lfb_base << 32; + if (!base || (u64)(resource_size_t)base != base) { + printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n"); + return -EINVAL; + } + + /* + * Don't use lfb_size as IORESOURCE size, since it may contain the + * entire VMEM, and thus require huge mappings. Use just the part we + * need, that is, the part where the framebuffer is located. But verify + * that it does not exceed the advertised VMEM. + * Note that in case of VBE, the lfb_size is shifted by 16 bits for + * historical reasons. + */ + size = si->lfb_size; + if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) + size <<= 16; + length = mode->height * mode->stride; + if (length > size) { + printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); + return -EINVAL; + } + length = PAGE_ALIGN(length); + + /* setup IORESOURCE_MEM as framebuffer memory */ + memset(&res, 0, sizeof(res)); + res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + res.name = simplefb_resname; + res.start = base; + res.end = res.start + length - 1; + if (res.end <= res.start) + return -EINVAL; + + pd = platform_device_alloc("simple-framebuffer", 0); + if (!pd) + return -ENOMEM; + + sysfb_apply_efi_quirks(pd); + + ret = platform_device_add_resources(pd, &res, 1); + if (ret) + return ret; + + ret = platform_device_add_data(pd, mode, sizeof(*mode)); + if (ret) + return ret; + + return platform_device_add(pd); +} diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7ff89690a976..0d372354c2d0 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -35,6 +35,11 @@ config DRM_MIPI_DSI bool depends on DRM +config DRM_DP_AUX_BUS + tristate + depends on DRM + depends on OF + config DRM_DP_AUX_CHARDEV bool "DRM DP AUX Interface" depends on DRM @@ -317,8 +322,6 @@ source "drivers/gpu/drm/tilcdc/Kconfig" source "drivers/gpu/drm/qxl/Kconfig" -source "drivers/gpu/drm/bochs/Kconfig" - source "drivers/gpu/drm/virtio/Kconfig" source "drivers/gpu/drm/msm/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a118692a6df7..ad1112154898 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -33,6 +33,8 @@ drm-$(CONFIG_PCI) += drm_pci.o drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o +obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o + drm_vram_helper-y := drm_gem_vram_helper.o obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o @@ -96,7 +98,6 @@ obj-y += omapdrm/ obj-$(CONFIG_DRM_SUN4I) += sun4i/ obj-y += tilcdc/ obj-$(CONFIG_DRM_QXL) += qxl/ -obj-$(CONFIG_DRM_BOCHS) += bochs/ obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/ obj-$(CONFIG_DRM_MSM) += msm/ obj-$(CONFIG_DRM_TEGRA) += tegra/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index a130e766cbdb..c905a4cfc173 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -34,6 +34,7 @@ struct amdgpu_fpriv; struct amdgpu_bo_list_entry { struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; + struct dma_fence_chain *chain; uint32_t priority; struct page **user_pages; bool user_invalidated; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 30fa1f61e0e5..a152363b0254 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -572,6 +572,20 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto out; } + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + + e->bo_va = amdgpu_vm_bo_find(vm, bo); + + if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) { + e->chain = dma_fence_chain_alloc(); + if (!e->chain) { + r = -ENOMEM; + goto error_validate; + } + } + } + amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, &p->bytes_moved_vis_threshold); p->bytes_moved = 0; @@ -599,15 +613,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, gws = p->bo_list->gws_obj; oa = p->bo_list->oa_obj; - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - - /* Make sure we use the exclusive slot for shared BOs */ - if (bo->prime_shared_count) - e->tv.num_shared = 0; - e->bo_va = amdgpu_vm_bo_find(vm, bo); - } - if (gds) { p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; @@ -629,8 +634,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } error_validate: - if (r) + if (r) { + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + dma_fence_chain_free(e->chain); + e->chain = NULL; + } ttm_eu_backoff_reservation(&p->ticket, &p->validated); + } out: return r; } @@ -670,9 +680,17 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, { unsigned i; - if (error && backoff) + if (error && backoff) { + struct amdgpu_bo_list_entry *e; + + amdgpu_bo_list_for_each_entry(e, parser->bo_list) { + dma_fence_chain_free(e->chain); + e->chain = NULL; + } + ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); + } for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj); @@ -1109,7 +1127,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p dep->chain = NULL; if (syncobj_deps[i].point) { - dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL); + dep->chain = dma_fence_chain_alloc(); if (!dep->chain) return -ENOMEM; } @@ -1117,7 +1135,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p dep->syncobj = drm_syncobj_find(p->filp, syncobj_deps[i].handle); if (!dep->syncobj) { - kfree(dep->chain); + dma_fence_chain_free(dep->chain); return -EINVAL; } dep->point = syncobj_deps[i].point; @@ -1245,6 +1263,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + struct dma_resv *resv = e->tv.bo->base.resv; + struct dma_fence_chain *chain = e->chain; + + if (!chain) + continue; + + /* + * Work around dma_resv shortcommings by wrapping up the + * submission in a dma_fence_chain and add it as exclusive + * fence, but first add the submission as shared fence to make + * sure that shared fences never signal before the exclusive + * one. + */ + dma_fence_chain_init(chain, dma_resv_excl_fence(resv), + dma_fence_get(p->fence), 1); + + dma_resv_add_shared_fence(resv, p->fence); + rcu_assign_pointer(resv->fence_excl, &chain->base); + e->chain = NULL; + } + ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d303e88e3c23..8398daa0c06a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1266,15 +1266,16 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) /** * amdgpu_device_vga_set_decode - enable/disable vga decode * - * @cookie: amdgpu_device pointer + * @pdev: PCI device pointer * @state: enable/disable vga decode * * Enable/disable vga decode (all asics). * Returns VGA resource flags. */ -static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) +static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, + bool state) { - struct amdgpu_device *adev = cookie; + struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); amdgpu_asic_set_vga_state(adev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | @@ -3715,7 +3716,7 @@ fence_driver_init: /* this will fail for cards that aren't VGA class devices, just * ignore it */ if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) - vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); + vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); if (amdgpu_device_supports_px(ddev)) { px = true; @@ -3838,7 +3839,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) vga_switcheroo_fini_domain_pm_ops(adev->dev); } if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) - vga_client_register(adev->pdev, NULL, NULL, NULL); + vga_client_unregister(adev->pdev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index a9475b207510..ae6ab93c868b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -42,48 +42,6 @@ #include <linux/pci-p2pdma.h> #include <linux/pm_runtime.h> -static int -__dma_resv_make_exclusive(struct dma_resv *obj) -{ - struct dma_fence **fences; - unsigned int count; - int r; - - if (!dma_resv_shared_list(obj)) /* no shared fences to convert */ - return 0; - - r = dma_resv_get_fences(obj, NULL, &count, &fences); - if (r) - return r; - - if (count == 0) { - /* Now that was unexpected. */ - } else if (count == 1) { - dma_resv_add_excl_fence(obj, fences[0]); - dma_fence_put(fences[0]); - kfree(fences); - } else { - struct dma_fence_array *array; - - array = dma_fence_array_create(count, fences, - dma_fence_context_alloc(1), 0, - false); - if (!array) - goto err_fences_put; - - dma_resv_add_excl_fence(obj, &array->base); - dma_fence_put(&array->base); - } - - return 0; - -err_fences_put: - while (count--) - dma_fence_put(fences[count]); - kfree(fences); - return -ENOMEM; -} - /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation * @@ -110,24 +68,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, if (r < 0) goto out; - r = amdgpu_bo_reserve(bo, false); - if (unlikely(r != 0)) - goto out; - - /* - * We only create shared fences for internal use, but importers - * of the dmabuf rely on exclusive fences for implicitly - * tracking write hazards. As any of the current fences may - * correspond to a write, we need to convert all existing - * fences on the reservation object into a single exclusive - * fence. - */ - r = __dma_resv_make_exclusive(bo->tbo.base.resv); - if (r) - goto out; - - bo->prime_shared_count++; - amdgpu_bo_unreserve(bo); return 0; out: @@ -150,9 +90,6 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) - bo->prime_shared_count--; - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } @@ -418,8 +355,6 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) bo = gem_to_amdgpu_bo(gobj); bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; - if (dma_buf->ops != &amdgpu_dmabuf_ops) - bo->prime_shared_count = 1; dma_resv_unlock(resv); return gobj; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 361b86b71b56..2bd13fc2541a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1285,7 +1285,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, #endif /* Get rid of things like offb */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 72d9b92b1754..d4547d195173 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -490,7 +490,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, num_hw_submission, amdgpu_job_hang_limit, - timeout, sched_score, ring->name); + timeout, NULL, sched_score, ring->name); if (r) { DRM_ERROR("Failed to create scheduler on ring %s.\n", ring->name); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 854fc497844b..611fd10c3410 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -838,7 +838,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, break; } case AMDGPU_GEM_OP_SET_PLACEMENT: - if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) { + if (robj->tbo.base.import_attach && + args->value & AMDGPU_GEM_DOMAIN_VRAM) { r = -EINVAL; amdgpu_bo_unreserve(robj); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index ec96e0b26b11..543000304a1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -132,14 +132,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct amdgpu_gtt_node *node; int r; - spin_lock(&mgr->lock); - if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT && - atomic64_read(&mgr->available) < num_pages) { - spin_unlock(&mgr->lock); + if (!(place->flags & TTM_PL_FLAG_TEMPORARY) && + atomic64_add_return(num_pages, &mgr->used) > man->size) { + atomic64_sub(num_pages, &mgr->used); return -ENOSPC; } - atomic64_sub(num_pages, &mgr->available); - spin_unlock(&mgr->lock); node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); if (!node) { @@ -175,7 +172,8 @@ err_free: kfree(node); err_out: - atomic64_add(num_pages, &mgr->available); + if (!(place->flags & TTM_PL_FLAG_TEMPORARY)) + atomic64_sub(num_pages, &mgr->used); return r; } @@ -198,7 +196,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, if (drm_mm_node_allocated(&node->base.mm_nodes[0])) drm_mm_remove_node(&node->base.mm_nodes[0]); spin_unlock(&mgr->lock); - atomic64_add(res->num_pages, &mgr->available); + + if (!(res->placement & TTM_PL_FLAG_TEMPORARY)) + atomic64_sub(res->num_pages, &mgr->used); kfree(node); } @@ -213,9 +213,8 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); - s64 result = man->size - atomic64_read(&mgr->available); - return (result > 0 ? result : 0) * PAGE_SIZE; + return atomic64_read(&mgr->used) * PAGE_SIZE; } /** @@ -265,9 +264,8 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man, drm_mm_print(&mgr->mm, printer); spin_unlock(&mgr->lock); - drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n", - man->size, (u64)atomic64_read(&mgr->available), - amdgpu_gtt_mgr_usage(man) >> 20); + drm_printf(printer, "man size:%llu pages, gtt used:%llu pages\n", + man->size, atomic64_read(&mgr->used)); } static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = { @@ -299,7 +297,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); - atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT); + atomic64_set(&mgr->used, 0); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager); ttm_resource_manager_set_used(man, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 83af307e97cd..0d01cfaca77e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -617,7 +617,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev_to_drm(adev)->irq_enabled) + if (!adev->irq.installed) return -ENOENT; if (type >= src->num_types) @@ -647,7 +647,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev_to_drm(adev)->irq_enabled) + if (!adev->irq.installed) return -ENOENT; if (type >= src->num_types) @@ -678,7 +678,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev_to_drm(adev)->irq_enabled) + if (!adev->irq.installed) return false; if (type >= src->num_types) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 795fa7445abe..261283f5b424 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -196,7 +196,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) c++; } - BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS); + BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS); placement->num_placement = c; placement->placement = places; @@ -913,7 +913,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; /* A shared bo cannot be migrated to VRAM */ - if (bo->prime_shared_count || bo->tbo.base.import_attach) { + if (bo->tbo.base.import_attach) { if (domain & AMDGPU_GEM_DOMAIN_GTT) domain = AMDGPU_GEM_DOMAIN_GTT; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 38c834d0f930..e72f329e7f18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -100,7 +100,6 @@ struct amdgpu_bo { struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; u64 flags; - unsigned prime_shared_count; /* per VM structure for page tables and with virtual addresses */ struct amdgpu_vm_bo_base *vm_bo; /* Constant after initialization */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 1b2ceccaf5b0..862eb3c1c4c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -28,6 +28,8 @@ * Christian König <christian.koenig@amd.com> */ +#include <linux/dma-fence-chain.h> + #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" @@ -186,6 +188,55 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence) return amdgpu_sync_fence(sync, fence); } +/* Determine based on the owner and mode if we should sync to a fence or not */ +static bool amdgpu_sync_test_fence(struct amdgpu_device *adev, + enum amdgpu_sync_mode mode, + void *owner, struct dma_fence *f) +{ + void *fence_owner = amdgpu_sync_get_owner(f); + + /* Always sync to moves, no matter what */ + if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) + return true; + + /* We only want to trigger KFD eviction fences on + * evict or move jobs. Skip KFD fences otherwise. + */ + if (fence_owner == AMDGPU_FENCE_OWNER_KFD && + owner != AMDGPU_FENCE_OWNER_UNDEFINED) + return false; + + /* Never sync to VM updates either. */ + if (fence_owner == AMDGPU_FENCE_OWNER_VM && + owner != AMDGPU_FENCE_OWNER_UNDEFINED) + return false; + + /* Ignore fences depending on the sync mode */ + switch (mode) { + case AMDGPU_SYNC_ALWAYS: + return true; + + case AMDGPU_SYNC_NE_OWNER: + if (amdgpu_sync_same_dev(adev, f) && + fence_owner == owner) + return false; + break; + + case AMDGPU_SYNC_EQ_OWNER: + if (amdgpu_sync_same_dev(adev, f) && + fence_owner != owner) + return false; + break; + + case AMDGPU_SYNC_EXPLICIT: + return false; + } + + WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD, + "Adding eviction fence to sync obj"); + return true; +} + /** * amdgpu_sync_resv - sync to a reservation object * @@ -211,67 +262,34 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, /* always sync to the exclusive fence */ f = dma_resv_excl_fence(resv); - r = amdgpu_sync_fence(sync, f); + dma_fence_chain_for_each(f, f) { + struct dma_fence_chain *chain = to_dma_fence_chain(f); + + if (amdgpu_sync_test_fence(adev, mode, owner, chain ? + chain->fence : f)) { + r = amdgpu_sync_fence(sync, f); + dma_fence_put(f); + if (r) + return r; + break; + } + } flist = dma_resv_shared_list(resv); - if (!flist || r) - return r; + if (!flist) + return 0; for (i = 0; i < flist->shared_count; ++i) { - void *fence_owner; - f = rcu_dereference_protected(flist->shared[i], dma_resv_held(resv)); - fence_owner = amdgpu_sync_get_owner(f); - - /* Always sync to moves, no matter what */ - if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) { + if (amdgpu_sync_test_fence(adev, mode, owner, f)) { r = amdgpu_sync_fence(sync, f); if (r) - break; - } - - /* We only want to trigger KFD eviction fences on - * evict or move jobs. Skip KFD fences otherwise. - */ - if (fence_owner == AMDGPU_FENCE_OWNER_KFD && - owner != AMDGPU_FENCE_OWNER_UNDEFINED) - continue; - - /* Never sync to VM updates either. */ - if (fence_owner == AMDGPU_FENCE_OWNER_VM && - owner != AMDGPU_FENCE_OWNER_UNDEFINED) - continue; - - /* Ignore fences depending on the sync mode */ - switch (mode) { - case AMDGPU_SYNC_ALWAYS: - break; - - case AMDGPU_SYNC_NE_OWNER: - if (amdgpu_sync_same_dev(adev, f) && - fence_owner == owner) - continue; - break; - - case AMDGPU_SYNC_EQ_OWNER: - if (amdgpu_sync_same_dev(adev, f) && - fence_owner != owner) - continue; - break; - - case AMDGPU_SYNC_EXPLICIT: - continue; + return r; } - - WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD, - "Adding eviction fence to sync obj"); - r = amdgpu_sync_fence(sync, f); - if (r) - break; } - return r; + return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3a55f08e00e1..acd95d3a4434 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -149,14 +149,16 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, * BOs to be evicted from VRAM */ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT); + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; abo->placements[0].lpfn = 0; abo->placement.busy_placement = &abo->placements[1]; abo->placement.num_busy_placement = 1; } else { /* Move to GTT memory */ - amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); } break; case TTM_PL_TT: @@ -521,7 +523,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, hop->fpfn = 0; hop->lpfn = 0; hop->mem_type = TTM_PL_TT; - hop->flags = 0; + hop->flags = TTM_PL_FLAG_TEMPORARY; return -EMULTIHOP; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index e69f3e8e06e5..3205fd520060 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -52,7 +52,7 @@ struct amdgpu_gtt_mgr { struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; - atomic64_t available; + atomic64_t used; }; struct amdgpu_preempt_mgr { diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index ff45f23f3d56..93b7f09b96ca 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -13,7 +13,6 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_irq.h> #include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -301,8 +300,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) if (err) goto free_component_binding; - drm->irq_enabled = true; - drm_kms_helper_poll_init(drm); err = drm_dev_register(drm, 0); @@ -313,7 +310,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) free_interrupts: drm_kms_helper_poll_fini(drm); - drm->irq_enabled = false; free_component_binding: component_unbind_all(mdev->dev, drm); cleanup_mode_config: @@ -331,7 +327,6 @@ void komeda_kms_detach(struct komeda_kms_dev *kms) drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); - drm->irq_enabled = false; component_unbind_all(mdev->dev, drm); drm_mode_config_cleanup(drm); komeda_kms_cleanup_private_objs(kms); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index de59f3302516..78d15b04b105 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -847,8 +847,6 @@ static int malidp_bind(struct device *dev) if (ret < 0) goto irq_init_fail; - drm->irq_enabled = true; - ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); @@ -874,7 +872,6 @@ register_fail: vblank_fail: malidp_se_irq_fini(hwdev); malidp_de_irq_fini(hwdev); - drm->irq_enabled = false; irq_init_fail: drm_atomic_helper_shutdown(drm); component_unbind_all(dev, drm); @@ -909,7 +906,6 @@ static void malidp_unbind(struct device *dev) drm_atomic_helper_shutdown(drm); malidp_se_irq_fini(hwdev); malidp_de_irq_fini(hwdev); - drm->irq_enabled = false; component_unbind_all(dev, drm); of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index dab0a1f0983b..8e3e98f13db4 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -95,7 +95,7 @@ static int armada_drm_bind(struct device *dev) } /* Remove early framebuffers */ - ret = drm_aperture_remove_framebuffers(false, "armada-drm-fb"); + ret = drm_aperture_remove_framebuffers(false, &armada_drm_driver); if (ret) { dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n", __func__, ret); @@ -130,8 +130,6 @@ static int armada_drm_bind(struct device *dev) if (ret) goto err_comp; - priv->drm.irq_enabled = true; - drm_mode_config_reset(&priv->drm); ret = armada_fbdev_init(&priv->drm); diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index d3e3e5fdc390..424250535fed 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -247,8 +247,6 @@ static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs armada_overlay_plane_helper_funcs = { - .prepare_fb = armada_drm_plane_prepare_fb, - .cleanup_fb = armada_drm_plane_cleanup_fb, .atomic_check = armada_drm_plane_atomic_check, .atomic_update = armada_drm_overlay_plane_atomic_update, .atomic_disable = armada_drm_overlay_plane_atomic_disable, diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c index 40209e49f34a..959d7f0a5108 100644 --- a/drivers/gpu/drm/armada/armada_plane.c +++ b/drivers/gpu/drm/armada/armada_plane.c @@ -78,33 +78,6 @@ void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3], } } -int armada_drm_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state) -{ - DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n", - plane->base.id, plane->name, - state->fb ? state->fb->base.id : 0); - - /* - * Take a reference on the new framebuffer - we want to - * hold on to it while the hardware is displaying it. - */ - if (state->fb) - drm_framebuffer_get(state->fb); - return 0; -} - -void armada_drm_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n", - plane->base.id, plane->name, - old_state->fb ? old_state->fb->base.id : 0); - - if (old_state->fb) - drm_framebuffer_put(old_state->fb); -} - int armada_drm_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -282,8 +255,6 @@ static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs armada_primary_plane_helper_funcs = { - .prepare_fb = armada_drm_plane_prepare_fb, - .cleanup_fb = armada_drm_plane_cleanup_fb, .atomic_check = armada_drm_plane_atomic_check, .atomic_update = armada_drm_primary_plane_atomic_update, .atomic_disable = armada_drm_primary_plane_atomic_disable, diff --git a/drivers/gpu/drm/armada/armada_plane.h b/drivers/gpu/drm/armada/armada_plane.h index 51dab8d8da22..368415c609a6 100644 --- a/drivers/gpu/drm/armada/armada_plane.h +++ b/drivers/gpu/drm/armada/armada_plane.h @@ -21,8 +21,6 @@ struct armada_plane_state { void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3], u16 pitches[3], bool interlaced); -int armada_drm_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state); void armada_drm_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state); int armada_drm_plane_atomic_check(struct drm_plane *plane, diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c index 098f96d4d50d..827e62c1daba 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c @@ -220,7 +220,6 @@ static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = { .enable = aspeed_gfx_pipe_enable, .disable = aspeed_gfx_pipe_disable, .update = aspeed_gfx_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, .enable_vblank = aspeed_gfx_enable_vblank, .disable_vblank = aspeed_gfx_disable_vblank, }; diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 5aa452b4efe6..86d5cd7b6318 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -100,7 +100,7 @@ static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev) primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif - return drm_aperture_remove_conflicting_framebuffers(base, size, primary, "astdrmfb"); + return drm_aperture_remove_conflicting_framebuffers(base, size, primary, &ast_driver); } static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 911f9f414774..39ca338eb80b 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -337,6 +337,11 @@ int ast_mode_config_init(struct ast_private *ast); #define AST_DP501_LINKRATE 0xf014 #define AST_DP501_EDID_DATA 0xf020 +/* Define for Soc scratched reg */ +#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6) +//#define AST_VRAM_INIT_BY_BMC BIT(7) +//#define AST_VRAM_INIT_READY BIT(6) + int ast_mm_init(struct ast_private *ast); /* ast post */ @@ -346,6 +351,7 @@ bool ast_is_vga_enabled(struct drm_device *dev); void ast_post_gpu(struct drm_device *dev); u32 ast_mindwm(struct ast_private *ast, u32 r); void ast_moutdwm(struct ast_private *ast, u32 r, u32 v); +void ast_patch_ahb_2500(struct ast_private *ast); /* ast dp501 */ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 2aff2e6cf450..79a361867955 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -97,6 +97,11 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + /* Patch AST2500 */ + if (((pdev->revision & 0xF0) == 0x40) + && ((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0)) + ast_patch_ahb_2500(ast); + /* Double check it's actually working */ data = ast_read32(ast, 0xf004); if ((data != 0xFFFFFFFF) && (data != 0x00)) { diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 36d9575aa27b..f32da620a123 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -612,8 +612,7 @@ ast_primary_plane_helper_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = { - .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, - .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, + DRM_GEM_VRAM_PLANE_HELPER_FUNCS, .atomic_check = ast_primary_plane_helper_atomic_check, .atomic_update = ast_primary_plane_helper_atomic_update, .atomic_disable = ast_primary_plane_helper_atomic_disable, @@ -1293,6 +1292,18 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector, return flags; } +static enum drm_connector_status ast_connector_detect(struct drm_connector + *connector, bool force) +{ + int r; + + r = ast_get_modes(connector); + if (r <= 0) + return connector_status_disconnected; + + return connector_status_connected; +} + static void ast_connector_destroy(struct drm_connector *connector) { struct ast_connector *ast_connector = to_ast_connector(connector); @@ -1307,6 +1318,7 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { static const struct drm_connector_funcs ast_connector_funcs = { .reset = drm_atomic_helper_connector_reset, + .detect = ast_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = ast_connector_destroy, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -1334,7 +1346,8 @@ static int ast_connector_init(struct drm_device *dev) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; drm_connector_attach_encoder(connector, encoder); @@ -1403,6 +1416,8 @@ int ast_mode_config_init(struct ast_private *ast) drm_mode_config_reset(dev); + drm_kms_helper_poll_init(dev); + return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 0607658dde51..b5d92f652fd8 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -2028,6 +2028,40 @@ static bool ast_dram_init_2500(struct ast_private *ast) return true; } +void ast_patch_ahb_2500(struct ast_private *ast) +{ + u32 data; + + /* Clear bus lock condition */ + ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); + ast_moutdwm(ast, 0x1e600084, 0x00010000); + ast_moutdwm(ast, 0x1e600088, 0x00000000); + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1e6e2070); + if (data & 0x08000000) { /* check fast reset */ + /* + * If "Fast restet" is enabled for ARM-ICE debugger, + * then WDT needs to enable, that + * WDT04 is WDT#1 Reload reg. + * WDT08 is WDT#1 counter restart reg to avoid system deadlock + * WDT0C is WDT#1 control reg + * [6:5]:= 01:Full chip + * [4]:= 1:1MHz clock source + * [1]:= 1:WDT will be cleeared and disabled after timeout occurs + * [0]:= 1:WDT enable + */ + ast_moutdwm(ast, 0x1E785004, 0x00000010); + ast_moutdwm(ast, 0x1E785008, 0x00004755); + ast_moutdwm(ast, 0x1E78500c, 0x00000033); + udelay(1000); + } + do { + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1e6e2000); + } while (data != 1); + ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */ +} + void ast_post_chip_2500(struct drm_device *dev) { struct ast_private *ast = to_ast_private(dev); @@ -2035,39 +2069,44 @@ void ast_post_chip_2500(struct drm_device *dev) u8 reg; reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); - if ((reg & 0x80) == 0) {/* vga only */ + if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */ /* Clear bus lock condition */ - ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); - ast_moutdwm(ast, 0x1e600084, 0x00010000); - ast_moutdwm(ast, 0x1e600088, 0x00000000); - ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - ast_write32(ast, 0x12000, 0x1688a8a8); - while (ast_read32(ast, 0x12000) != 0x1) - ; - - ast_write32(ast, 0x10000, 0xfc600309); - while (ast_read32(ast, 0x10000) != 0x1) - ; + ast_patch_ahb_2500(ast); + + /* Disable watchdog */ + ast_moutdwm(ast, 0x1E78502C, 0x00000000); + ast_moutdwm(ast, 0x1E78504C, 0x00000000); + + /* + * Reset USB port to patch USB unknown device issue + * SCU90 is Multi-function Pin Control #5 + * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub + * port). + * SCU94 is Multi-function Pin Control #6 + * [14:13]:= 1x:USB2.0 Host2 controller + * SCU70 is Hardware Strap reg + * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by + * [18]: 0(24)/1(48) MHz) + * SCU7C is Write clear reg to SCU70 + * [23]:= write 1 and then SCU70[23] will be clear as 0b. + */ + ast_moutdwm(ast, 0x1E6E2090, 0x20000000); + ast_moutdwm(ast, 0x1E6E2094, 0x00004000); + if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) { + ast_moutdwm(ast, 0x1E6E207C, 0x00800000); + mdelay(100); + ast_moutdwm(ast, 0x1E6E2070, 0x00800000); + } + /* Modify eSPI reset pin */ + temp = ast_mindwm(ast, 0x1E6E2070); + if (temp & 0x02000000) + ast_moutdwm(ast, 0x1E6E207C, 0x00004000); /* Slow down CPU/AHB CLK in VGA only mode */ temp = ast_read32(ast, 0x12008); temp |= 0x73; ast_write32(ast, 0x12008, temp); - /* Reset USB port to patch USB unknown device issue */ - ast_moutdwm(ast, 0x1e6e2090, 0x20000000); - temp = ast_mindwm(ast, 0x1e6e2094); - temp |= 0x00004000; - ast_moutdwm(ast, 0x1e6e2094, temp); - temp = ast_mindwm(ast, 0x1e6e2070); - if (temp & 0x00800000) { - ast_moutdwm(ast, 0x1e6e207c, 0x00800000); - mdelay(100); - ast_moutdwm(ast, 0x1e6e2070, 0x00800000); - } - if (!ast_dram_init_2500(ast)) drm_err(dev, "DRAM init failed !\n"); diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig deleted file mode 100644 index 7bcdf294fed8..000000000000 --- a/drivers/gpu/drm/bochs/Kconfig +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -config DRM_BOCHS - tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" - depends on DRM && PCI && MMU - select DRM_KMS_HELPER - select DRM_VRAM_HELPER - select DRM_TTM - select DRM_TTM_HELPER - help - Choose this option for qemu. - If M is selected the module will be called bochs-drm. diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile deleted file mode 100644 index 55473371300f..000000000000 --- a/drivers/gpu/drm/bochs/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_hw.o - -obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h deleted file mode 100644 index e9645c612aff..000000000000 --- a/drivers/gpu/drm/bochs/bochs.h +++ /dev/null @@ -1,98 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#include <linux/io.h> -#include <linux/console.h> - -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_gem.h> -#include <drm/drm_gem_vram_helper.h> -#include <drm/drm_simple_kms_helper.h> - -/* ---------------------------------------------------------------------- */ - -#define VBE_DISPI_IOPORT_INDEX 0x01CE -#define VBE_DISPI_IOPORT_DATA 0x01CF - -#define VBE_DISPI_INDEX_ID 0x0 -#define VBE_DISPI_INDEX_XRES 0x1 -#define VBE_DISPI_INDEX_YRES 0x2 -#define VBE_DISPI_INDEX_BPP 0x3 -#define VBE_DISPI_INDEX_ENABLE 0x4 -#define VBE_DISPI_INDEX_BANK 0x5 -#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6 -#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7 -#define VBE_DISPI_INDEX_X_OFFSET 0x8 -#define VBE_DISPI_INDEX_Y_OFFSET 0x9 -#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa - -#define VBE_DISPI_ID0 0xB0C0 -#define VBE_DISPI_ID1 0xB0C1 -#define VBE_DISPI_ID2 0xB0C2 -#define VBE_DISPI_ID3 0xB0C3 -#define VBE_DISPI_ID4 0xB0C4 -#define VBE_DISPI_ID5 0xB0C5 - -#define VBE_DISPI_DISABLED 0x00 -#define VBE_DISPI_ENABLED 0x01 -#define VBE_DISPI_GETCAPS 0x02 -#define VBE_DISPI_8BIT_DAC 0x20 -#define VBE_DISPI_LFB_ENABLED 0x40 -#define VBE_DISPI_NOCLEARMEM 0x80 - -/* ---------------------------------------------------------------------- */ - -enum bochs_types { - BOCHS_QEMU_STDVGA, - BOCHS_UNKNOWN, -}; - -struct bochs_device { - /* hw */ - void __iomem *mmio; - int ioports; - void __iomem *fb_map; - unsigned long fb_base; - unsigned long fb_size; - unsigned long qext_size; - - /* mode */ - u16 xres; - u16 yres; - u16 yres_virtual; - u32 stride; - u32 bpp; - struct edid *edid; - - /* drm */ - struct drm_device *dev; - struct drm_simple_display_pipe pipe; - struct drm_connector connector; -}; - -/* ---------------------------------------------------------------------- */ - -/* bochs_hw.c */ -int bochs_hw_init(struct drm_device *dev); -void bochs_hw_fini(struct drm_device *dev); - -void bochs_hw_blank(struct bochs_device *bochs, bool blank); -void bochs_hw_setmode(struct bochs_device *bochs, - struct drm_display_mode *mode); -void bochs_hw_setformat(struct bochs_device *bochs, - const struct drm_format_info *format); -void bochs_hw_setbase(struct bochs_device *bochs, - int x, int y, int stride, u64 addr); -int bochs_hw_load_edid(struct bochs_device *bochs); - -/* bochs_mm.c */ -int bochs_mm_init(struct bochs_device *bochs); -void bochs_mm_fini(struct bochs_device *bochs); - -/* bochs_kms.c */ -int bochs_kms_init(struct bochs_device *bochs); - -/* bochs_fbdev.c */ -extern const struct drm_mode_config_funcs bochs_mode_funcs; diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c deleted file mode 100644 index c828cadbabff..000000000000 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ /dev/null @@ -1,205 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - */ - -#include <linux/module.h> -#include <linux/pci.h> - -#include <drm/drm_drv.h> -#include <drm/drm_aperture.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_managed.h> - -#include "bochs.h" - -static int bochs_modeset = -1; -module_param_named(modeset, bochs_modeset, int, 0444); -MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting"); - -/* ---------------------------------------------------------------------- */ -/* drm interface */ - -static void bochs_unload(struct drm_device *dev) -{ - struct bochs_device *bochs = dev->dev_private; - - bochs_mm_fini(bochs); -} - -static int bochs_load(struct drm_device *dev) -{ - struct bochs_device *bochs; - int ret; - - bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL); - if (bochs == NULL) - return -ENOMEM; - dev->dev_private = bochs; - bochs->dev = dev; - - ret = bochs_hw_init(dev); - if (ret) - goto err; - - ret = bochs_mm_init(bochs); - if (ret) - goto err; - - ret = bochs_kms_init(bochs); - if (ret) - goto err; - - return 0; - -err: - bochs_unload(dev); - return ret; -} - -DEFINE_DRM_GEM_FOPS(bochs_fops); - -static const struct drm_driver bochs_driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .fops = &bochs_fops, - .name = "bochs-drm", - .desc = "bochs dispi vga interface (qemu stdvga)", - .date = "20130925", - .major = 1, - .minor = 0, - DRM_GEM_VRAM_DRIVER, - .release = bochs_unload, -}; - -/* ---------------------------------------------------------------------- */ -/* pm interface */ - -#ifdef CONFIG_PM_SLEEP -static int bochs_pm_suspend(struct device *dev) -{ - struct drm_device *drm_dev = dev_get_drvdata(dev); - - return drm_mode_config_helper_suspend(drm_dev); -} - -static int bochs_pm_resume(struct device *dev) -{ - struct drm_device *drm_dev = dev_get_drvdata(dev); - - return drm_mode_config_helper_resume(drm_dev); -} -#endif - -static const struct dev_pm_ops bochs_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend, - bochs_pm_resume) -}; - -/* ---------------------------------------------------------------------- */ -/* pci interface */ - -static int bochs_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct drm_device *dev; - unsigned long fbsize; - int ret; - - fbsize = pci_resource_len(pdev, 0); - if (fbsize < 4 * 1024 * 1024) { - DRM_ERROR("less than 4 MB video memory, ignoring device\n"); - return -ENOMEM; - } - - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb"); - if (ret) - return ret; - - dev = drm_dev_alloc(&bochs_driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); - - ret = pci_enable_device(pdev); - if (ret) - goto err_free_dev; - - pci_set_drvdata(pdev, dev); - - ret = bochs_load(dev); - if (ret) - goto err_free_dev; - - ret = drm_dev_register(dev, 0); - if (ret) - goto err_unload; - - drm_fbdev_generic_setup(dev, 32); - return ret; - -err_unload: - bochs_unload(dev); -err_free_dev: - drm_dev_put(dev); - return ret; -} - -static void bochs_pci_remove(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - drm_dev_unplug(dev); - drm_atomic_helper_shutdown(dev); - bochs_hw_fini(dev); - drm_dev_put(dev); -} - -static const struct pci_device_id bochs_pci_tbl[] = { - { - .vendor = 0x1234, - .device = 0x1111, - .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET, - .subdevice = PCI_SUBDEVICE_ID_QEMU, - .driver_data = BOCHS_QEMU_STDVGA, - }, - { - .vendor = 0x1234, - .device = 0x1111, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = BOCHS_UNKNOWN, - }, - { /* end of list */ } -}; - -static struct pci_driver bochs_pci_driver = { - .name = "bochs-drm", - .id_table = bochs_pci_tbl, - .probe = bochs_pci_probe, - .remove = bochs_pci_remove, - .driver.pm = &bochs_pm_ops, -}; - -/* ---------------------------------------------------------------------- */ -/* module init/exit */ - -static int __init bochs_init(void) -{ - if (vgacon_text_force() && bochs_modeset == -1) - return -EINVAL; - - if (bochs_modeset == 0) - return -EINVAL; - - return pci_register_driver(&bochs_pci_driver); -} - -static void __exit bochs_exit(void) -{ - pci_unregister_driver(&bochs_pci_driver); -} - -module_init(bochs_init); -module_exit(bochs_exit); - -MODULE_DEVICE_TABLE(pci, bochs_pci_tbl); -MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c deleted file mode 100644 index 7d3426d8cc69..000000000000 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ /dev/null @@ -1,323 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - */ - -#include <linux/pci.h> - -#include <drm/drm_drv.h> -#include <drm/drm_fourcc.h> - -#include <video/vga.h> -#include "bochs.h" - -/* ---------------------------------------------------------------------- */ - -static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val) -{ - if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) - return; - - if (bochs->mmio) { - int offset = ioport - 0x3c0 + 0x400; - writeb(val, bochs->mmio + offset); - } else { - outb(val, ioport); - } -} - -static u8 bochs_vga_readb(struct bochs_device *bochs, u16 ioport) -{ - if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) - return 0xff; - - if (bochs->mmio) { - int offset = ioport - 0x3c0 + 0x400; - return readb(bochs->mmio + offset); - } else { - return inb(ioport); - } -} - -static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg) -{ - u16 ret = 0; - - if (bochs->mmio) { - int offset = 0x500 + (reg << 1); - ret = readw(bochs->mmio + offset); - } else { - outw(reg, VBE_DISPI_IOPORT_INDEX); - ret = inw(VBE_DISPI_IOPORT_DATA); - } - return ret; -} - -static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val) -{ - if (bochs->mmio) { - int offset = 0x500 + (reg << 1); - writew(val, bochs->mmio + offset); - } else { - outw(reg, VBE_DISPI_IOPORT_INDEX); - outw(val, VBE_DISPI_IOPORT_DATA); - } -} - -static void bochs_hw_set_big_endian(struct bochs_device *bochs) -{ - if (bochs->qext_size < 8) - return; - - writel(0xbebebebe, bochs->mmio + 0x604); -} - -static void bochs_hw_set_little_endian(struct bochs_device *bochs) -{ - if (bochs->qext_size < 8) - return; - - writel(0x1e1e1e1e, bochs->mmio + 0x604); -} - -#ifdef __BIG_ENDIAN -#define bochs_hw_set_native_endian(_b) bochs_hw_set_big_endian(_b) -#else -#define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b) -#endif - -static int bochs_get_edid_block(void *data, u8 *buf, - unsigned int block, size_t len) -{ - struct bochs_device *bochs = data; - size_t i, start = block * EDID_LENGTH; - - if (start + len > 0x400 /* vga register offset */) - return -1; - - for (i = 0; i < len; i++) { - buf[i] = readb(bochs->mmio + start + i); - } - return 0; -} - -int bochs_hw_load_edid(struct bochs_device *bochs) -{ - u8 header[8]; - - if (!bochs->mmio) - return -1; - - /* check header to detect whenever edid support is enabled in qemu */ - bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header)); - if (drm_edid_header_is_valid(header) != 8) - return -1; - - kfree(bochs->edid); - bochs->edid = drm_do_get_edid(&bochs->connector, - bochs_get_edid_block, bochs); - if (bochs->edid == NULL) - return -1; - - return 0; -} - -int bochs_hw_init(struct drm_device *dev) -{ - struct bochs_device *bochs = dev->dev_private; - struct pci_dev *pdev = to_pci_dev(dev->dev); - unsigned long addr, size, mem, ioaddr, iosize; - u16 id; - - if (pdev->resource[2].flags & IORESOURCE_MEM) { - /* mmio bar with vga and bochs registers present */ - if (pci_request_region(pdev, 2, "bochs-drm") != 0) { - DRM_ERROR("Cannot request mmio region\n"); - return -EBUSY; - } - ioaddr = pci_resource_start(pdev, 2); - iosize = pci_resource_len(pdev, 2); - bochs->mmio = ioremap(ioaddr, iosize); - if (bochs->mmio == NULL) { - DRM_ERROR("Cannot map mmio region\n"); - return -ENOMEM; - } - } else { - ioaddr = VBE_DISPI_IOPORT_INDEX; - iosize = 2; - if (!request_region(ioaddr, iosize, "bochs-drm")) { - DRM_ERROR("Cannot request ioports\n"); - return -EBUSY; - } - bochs->ioports = 1; - } - - id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID); - mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K) - * 64 * 1024; - if ((id & 0xfff0) != VBE_DISPI_ID0) { - DRM_ERROR("ID mismatch\n"); - return -ENODEV; - } - - if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) - return -ENODEV; - addr = pci_resource_start(pdev, 0); - size = pci_resource_len(pdev, 0); - if (addr == 0) - return -ENODEV; - if (size != mem) { - DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n", - size, mem); - size = min(size, mem); - } - - if (pci_request_region(pdev, 0, "bochs-drm") != 0) - DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); - - bochs->fb_map = ioremap(addr, size); - if (bochs->fb_map == NULL) { - DRM_ERROR("Cannot map framebuffer\n"); - return -ENOMEM; - } - bochs->fb_base = addr; - bochs->fb_size = size; - - DRM_INFO("Found bochs VGA, ID 0x%x.\n", id); - DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n", - size / 1024, addr, - bochs->ioports ? "ioports" : "mmio", - ioaddr); - - if (bochs->mmio && pdev->revision >= 2) { - bochs->qext_size = readl(bochs->mmio + 0x600); - if (bochs->qext_size < 4 || bochs->qext_size > iosize) { - bochs->qext_size = 0; - goto noext; - } - DRM_DEBUG("Found qemu ext regs, size %ld\n", - bochs->qext_size); - bochs_hw_set_native_endian(bochs); - } - -noext: - return 0; -} - -void bochs_hw_fini(struct drm_device *dev) -{ - struct bochs_device *bochs = dev->dev_private; - - /* TODO: shot down existing vram mappings */ - - if (bochs->mmio) - iounmap(bochs->mmio); - if (bochs->ioports) - release_region(VBE_DISPI_IOPORT_INDEX, 2); - if (bochs->fb_map) - iounmap(bochs->fb_map); - pci_release_regions(to_pci_dev(dev->dev)); - kfree(bochs->edid); -} - -void bochs_hw_blank(struct bochs_device *bochs, bool blank) -{ - DRM_DEBUG_DRIVER("hw_blank %d\n", blank); - /* discard ar_flip_flop */ - (void)bochs_vga_readb(bochs, VGA_IS1_RC); - /* blank or unblank; we need only update index and set 0x20 */ - bochs_vga_writeb(bochs, VGA_ATT_W, blank ? 0 : 0x20); -} - -void bochs_hw_setmode(struct bochs_device *bochs, - struct drm_display_mode *mode) -{ - int idx; - - if (!drm_dev_enter(bochs->dev, &idx)) - return; - - bochs->xres = mode->hdisplay; - bochs->yres = mode->vdisplay; - bochs->bpp = 32; - bochs->stride = mode->hdisplay * (bochs->bpp / 8); - bochs->yres_virtual = bochs->fb_size / bochs->stride; - - DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n", - bochs->xres, bochs->yres, bochs->bpp, - bochs->yres_virtual); - - bochs_hw_blank(bochs, false); - - bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK, 0); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, bochs->xres); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT, - bochs->yres_virtual); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, 0); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, 0); - - bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, - VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED); - - drm_dev_exit(idx); -} - -void bochs_hw_setformat(struct bochs_device *bochs, - const struct drm_format_info *format) -{ - int idx; - - if (!drm_dev_enter(bochs->dev, &idx)) - return; - - DRM_DEBUG_DRIVER("format %c%c%c%c\n", - (format->format >> 0) & 0xff, - (format->format >> 8) & 0xff, - (format->format >> 16) & 0xff, - (format->format >> 24) & 0xff); - - switch (format->format) { - case DRM_FORMAT_XRGB8888: - bochs_hw_set_little_endian(bochs); - break; - case DRM_FORMAT_BGRX8888: - bochs_hw_set_big_endian(bochs); - break; - default: - /* should not happen */ - DRM_ERROR("%s: Huh? Got framebuffer format 0x%x", - __func__, format->format); - break; - } - - drm_dev_exit(idx); -} - -void bochs_hw_setbase(struct bochs_device *bochs, - int x, int y, int stride, u64 addr) -{ - unsigned long offset; - unsigned int vx, vy, vwidth, idx; - - if (!drm_dev_enter(bochs->dev, &idx)) - return; - - bochs->stride = stride; - offset = (unsigned long)addr + - y * bochs->stride + - x * (bochs->bpp / 8); - vy = offset / bochs->stride; - vx = (offset % bochs->stride) * 8 / bochs->bpp; - vwidth = stride * 8 / bochs->bpp; - - DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n", - x, y, addr, offset, vx, vy); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, vwidth); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy); - - drm_dev_exit(idx); -} diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c deleted file mode 100644 index 99410e77d51a..000000000000 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ /dev/null @@ -1,178 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - */ - -#include <linux/moduleparam.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_probe_helper.h> - -#include "bochs.h" - -static int defx = 1024; -static int defy = 768; - -module_param(defx, int, 0444); -module_param(defy, int, 0444); -MODULE_PARM_DESC(defx, "default x resolution"); -MODULE_PARM_DESC(defy, "default y resolution"); - -/* ---------------------------------------------------------------------- */ - -static const uint32_t bochs_formats[] = { - DRM_FORMAT_XRGB8888, - DRM_FORMAT_BGRX8888, -}; - -static void bochs_plane_update(struct bochs_device *bochs, - struct drm_plane_state *state) -{ - struct drm_gem_vram_object *gbo; - s64 gpu_addr; - - if (!state->fb || !bochs->stride) - return; - - gbo = drm_gem_vram_of_gem(state->fb->obj[0]); - gpu_addr = drm_gem_vram_offset(gbo); - if (WARN_ON_ONCE(gpu_addr < 0)) - return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ - - bochs_hw_setbase(bochs, - state->crtc_x, - state->crtc_y, - state->fb->pitches[0], - state->fb->offsets[0] + gpu_addr); - bochs_hw_setformat(bochs, state->fb->format); -} - -static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state, - struct drm_plane_state *plane_state) -{ - struct bochs_device *bochs = pipe->crtc.dev->dev_private; - - bochs_hw_setmode(bochs, &crtc_state->mode); - bochs_plane_update(bochs, plane_state); -} - -static void bochs_pipe_disable(struct drm_simple_display_pipe *pipe) -{ - struct bochs_device *bochs = pipe->crtc.dev->dev_private; - - bochs_hw_blank(bochs, true); -} - -static void bochs_pipe_update(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) -{ - struct bochs_device *bochs = pipe->crtc.dev->dev_private; - - bochs_plane_update(bochs, pipe->plane.state); -} - -static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = { - .enable = bochs_pipe_enable, - .disable = bochs_pipe_disable, - .update = bochs_pipe_update, - .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb, - .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb, -}; - -static int bochs_connector_get_modes(struct drm_connector *connector) -{ - struct bochs_device *bochs = - container_of(connector, struct bochs_device, connector); - int count = 0; - - if (bochs->edid) - count = drm_add_edid_modes(connector, bochs->edid); - - if (!count) { - count = drm_add_modes_noedid(connector, 8192, 8192); - drm_set_preferred_mode(connector, defx, defy); - } - return count; -} - -static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { - .get_modes = bochs_connector_get_modes, -}; - -static const struct drm_connector_funcs bochs_connector_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static void bochs_connector_init(struct drm_device *dev) -{ - struct bochs_device *bochs = dev->dev_private; - struct drm_connector *connector = &bochs->connector; - - drm_connector_init(dev, connector, &bochs_connector_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL); - drm_connector_helper_add(connector, - &bochs_connector_connector_helper_funcs); - - bochs_hw_load_edid(bochs); - if (bochs->edid) { - DRM_INFO("Found EDID data blob.\n"); - drm_connector_attach_edid_property(connector); - drm_connector_update_edid_property(connector, bochs->edid); - } -} - -static struct drm_framebuffer * -bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file, - const struct drm_mode_fb_cmd2 *mode_cmd) -{ - if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 && - mode_cmd->pixel_format != DRM_FORMAT_BGRX8888) - return ERR_PTR(-EINVAL); - - return drm_gem_fb_create(dev, file, mode_cmd); -} - -const struct drm_mode_config_funcs bochs_mode_funcs = { - .fb_create = bochs_gem_fb_create, - .mode_valid = drm_vram_helper_mode_valid, - .atomic_check = drm_atomic_helper_check, - .atomic_commit = drm_atomic_helper_commit, -}; - -int bochs_kms_init(struct bochs_device *bochs) -{ - int ret; - - ret = drmm_mode_config_init(bochs->dev); - if (ret) - return ret; - - bochs->dev->mode_config.max_width = 8192; - bochs->dev->mode_config.max_height = 8192; - - bochs->dev->mode_config.fb_base = bochs->fb_base; - bochs->dev->mode_config.preferred_depth = 24; - bochs->dev->mode_config.prefer_shadow = 0; - bochs->dev->mode_config.prefer_shadow_fbdev = 1; - bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; - - bochs->dev->mode_config.funcs = &bochs_mode_funcs; - - bochs_connector_init(bochs->dev); - drm_simple_display_pipe_init(bochs->dev, - &bochs->pipe, - &bochs_pipe_funcs, - bochs_formats, - ARRAY_SIZE(bochs_formats), - NULL, - &bochs->connector); - - drm_mode_config_reset(bochs->dev); - - return 0; -} diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c deleted file mode 100644 index 1b74f530b07c..000000000000 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - */ - -#include "bochs.h" - -/* ---------------------------------------------------------------------- */ - -int bochs_mm_init(struct bochs_device *bochs) -{ - struct drm_vram_mm *vmm; - - vmm = drm_vram_helper_alloc_mm(bochs->dev, bochs->fb_base, - bochs->fb_size); - return PTR_ERR_OR_ZERO(vmm); -} - -void bochs_mm_fini(struct bochs_device *bochs) -{ - if (!bochs->dev->vram_mm) - return; - - drm_vram_helper_release_mm(bochs->dev); -} diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 85b673613687..431b6e12a81f 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -303,6 +303,7 @@ config DRM_TI_SN65DSI86 select DRM_PANEL select DRM_MIPI_DSI select AUXILIARY_BUS + select DRM_DP_AUX_BUS help Texas Instruments SN65DSI86 DSI to eDP Bridge driver diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index aa19d5a40e31..59d718bde8c4 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -165,7 +165,7 @@ int adv7533_attach_dsi(struct adv7511 *adv) dsi->lanes = adv->num_dsi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | - MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE; + MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE; ret = mipi_dsi_attach(dsi); if (ret < 0) { diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 550814ca2139..b7d2e4449cfa 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1583,7 +1583,6 @@ static int analogix_dp_create_bridge(struct drm_device *drm_dev, struct analogix_dp_device *dp) { struct drm_bridge *bridge; - int ret; bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL); if (!bridge) { @@ -1596,13 +1595,7 @@ static int analogix_dp_create_bridge(struct drm_device *drm_dev, bridge->driver_private = dp; bridge->funcs = &analogix_dp_bridge_funcs; - ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0); - if (ret) { - DRM_ERROR("failed to attach drm bridge\n"); - return -EINVAL; - } - - return 0; + return drm_bridge_attach(dp->encoder, bridge, NULL, 0); } static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 7519b7a0f29d..920824d8df2f 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -1307,7 +1307,7 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx) dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | - MIPI_DSI_MODE_EOT_PACKET | + MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE; if (mipi_dsi_attach(dsi) < 0) { @@ -1359,11 +1359,8 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge, err = drm_bridge_attach(bridge->encoder, ctx->pdata.panel_bridge, &ctx->bridge, flags); - if (err) { - DRM_DEV_ERROR(dev, - "Fail to attach panel bridge: %d\n", err); + if (err) return err; - } } ctx->bridge_attached = 1; @@ -1730,7 +1727,6 @@ static int __maybe_unused anx7625_suspend(struct device *dev) if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) { anx7625_runtime_pm_suspend(dev); disable_irq(ctx->pdata.intp_irq); - flush_workqueue(ctx->workqueue); } return 0; @@ -1790,7 +1786,8 @@ static int anx7625_i2c_probe(struct i2c_client *client, platform->pdata.intp_irq = client->irq; if (platform->pdata.intp_irq) { INIT_WORK(&platform->work, anx7625_work_func); - platform->workqueue = create_workqueue("anx7625_work"); + platform->workqueue = alloc_workqueue("anx7625_work", + WQ_FREEZABLE | WQ_MEM_RECLAIM, 1); if (!platform->workqueue) { DRM_DEV_ERROR(dev, "fail to create work queue\n"); ret = -ENOMEM; @@ -1874,6 +1871,7 @@ static const struct of_device_id anx_match_table[] = { {.compatible = "analogix,anx7625",}, {}, }; +MODULE_DEVICE_TABLE(of, anx_match_table); static struct i2c_driver anx7625_driver = { .driver = { diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c index b31281f76117..e6e331071a00 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cdns-dsi.c @@ -829,7 +829,7 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) tmp = DIV_ROUND_UP(dsi_cfg.htotal, nlanes) - DIV_ROUND_UP(dsi_cfg.hsa, nlanes); - if (!(output->dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET)) + if (!(output->dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)) tmp -= DIV_ROUND_UP(DSI_EOT_PKT_SIZE, nlanes); tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8, @@ -902,7 +902,7 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) tmp = readl(dsi->regs + MCTL_MAIN_DATA_CTL); tmp &= ~(IF_VID_SELECT_MASK | HOST_EOT_GEN | IF_VID_MODE); - if (!(output->dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET)) + if (!(output->dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)) tmp |= HOST_EOT_GEN; if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO) diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 76c720b535fb..1b0c7eaf6c84 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -487,7 +487,7 @@ static int lt8912_attach_dsi(struct lt8912 *lt) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | - MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_NO_EOT_PACKET; ret = mipi_dsi_attach(dsi); if (ret < 0) { diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index 873995f0a741..ed8ac5059cd2 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -48,12 +48,6 @@ enum transfer_direction { #define NWL_DSI_ENDPOINT_LCDIF 0 #define NWL_DSI_ENDPOINT_DCSS 1 -struct nwl_dsi_plat_clk_config { - const char *id; - struct clk *clk; - bool present; -}; - struct nwl_dsi_transfer { const struct mipi_dsi_msg *msg; struct mipi_dsi_packet packet; @@ -196,7 +190,7 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps) u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp, - dsi->lanes * 8 * NSEC_PER_SEC); + dsi->lanes * 8ULL * NSEC_PER_SEC); } /* diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index e7c7c9b9c646..f08d0fded61f 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -143,6 +143,7 @@ struct dw_hdmi_phy_data { struct dw_hdmi { struct drm_connector connector; struct drm_bridge bridge; + struct drm_bridge *next_bridge; unsigned int version; @@ -2775,7 +2776,8 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge, struct dw_hdmi *hdmi = bridge->driver_private; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) - return 0; + return drm_bridge_attach(bridge->encoder, hdmi->next_bridge, + bridge, flags); return dw_hdmi_connector_create(hdmi); } @@ -3160,6 +3162,52 @@ static void dw_hdmi_init_hw(struct dw_hdmi *hdmi) /* ----------------------------------------------------------------------------- * Probe/remove API, used from platforms based on the DRM bridge API. */ + +static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi) +{ + struct device_node *endpoint; + struct device_node *remote; + + if (!hdmi->plat_data->output_port) + return 0; + + endpoint = of_graph_get_endpoint_by_regs(hdmi->dev->of_node, + hdmi->plat_data->output_port, + -1); + if (!endpoint) { + /* + * On platforms whose bindings don't make the output port + * mandatory (such as Rockchip) the plat_data->output_port + * field isn't set, so it's safe to make this a fatal error. + */ + dev_err(hdmi->dev, "Missing endpoint in port@%u\n", + hdmi->plat_data->output_port); + return -ENODEV; + } + + remote = of_graph_get_remote_port_parent(endpoint); + of_node_put(endpoint); + if (!remote) { + dev_err(hdmi->dev, "Endpoint in port@%u unconnected\n", + hdmi->plat_data->output_port); + return -ENODEV; + } + + if (!of_device_is_available(remote)) { + dev_err(hdmi->dev, "port@%u remote device is disabled\n", + hdmi->plat_data->output_port); + of_node_put(remote); + return -ENODEV; + } + + hdmi->next_bridge = of_drm_find_bridge(remote); + of_node_put(remote); + if (!hdmi->next_bridge) + return -EPROBE_DEFER; + + return 0; +} + struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, const struct dw_hdmi_plat_data *plat_data) { @@ -3196,6 +3244,10 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, mutex_init(&hdmi->cec_notifier_mutex); spin_lock_init(&hdmi->audio_lock); + ret = dw_hdmi_parse_dt(hdmi); + if (ret < 0) + return ERR_PTR(ret); + ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); if (ddc_node) { hdmi->ddc = of_get_i2c_adapter_by_node(ddc_node); @@ -3474,7 +3526,6 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0); if (ret) { dw_hdmi_remove(hdmi); - DRM_ERROR("Failed to initialize bridge with drm\n"); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 6b268f9445b3..e44e18a0112a 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -246,6 +246,7 @@ struct dw_mipi_dsi { struct clk *pclk; + bool device_found; unsigned int lane_mbps; /* per lane */ u32 channel; u32 lanes; @@ -309,13 +310,37 @@ static inline u32 dsi_read(struct dw_mipi_dsi *dsi, u32 reg) return readl(dsi->base + reg); } +static int dw_mipi_dsi_panel_or_bridge(struct dw_mipi_dsi *dsi, + struct device_node *node) +{ + struct drm_bridge *bridge; + struct drm_panel *panel; + int ret; + + ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge); + if (ret) + return ret; + + if (panel) { + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + } + + dsi->panel_bridge = bridge; + + if (!dsi->panel_bridge) + return -EPROBE_DEFER; + + return 0; +} + static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct dw_mipi_dsi *dsi = host_to_dsi(host); const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data; - struct drm_bridge *bridge; - struct drm_panel *panel; int ret; if (device->lanes > dsi->plat_data->max_data_lanes) { @@ -329,22 +354,14 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host, dsi->format = device->format; dsi->mode_flags = device->mode_flags; - ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, - &panel, &bridge); - if (ret) - return ret; + if (!dsi->device_found) { + ret = dw_mipi_dsi_panel_or_bridge(dsi, host->dev->of_node); + if (ret) + return ret; - if (panel) { - bridge = drm_panel_bridge_add_typed(panel, - DRM_MODE_CONNECTOR_DSI); - if (IS_ERR(bridge)) - return PTR_ERR(bridge); + dsi->device_found = true; } - dsi->panel_bridge = bridge; - - drm_bridge_add(&dsi->bridge); - if (pdata->host_ops && pdata->host_ops->attach) { ret = pdata->host_ops->attach(pdata->priv_data, device); if (ret < 0) @@ -999,6 +1016,16 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge, /* Set the encoder type as caller does not know it */ bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI; + if (!dsi->device_found) { + int ret; + + ret = dw_mipi_dsi_panel_or_bridge(dsi, dsi->dev->of_node); + if (ret) + return ret; + + dsi->device_found = true; + } + /* Attach the panel-bridge to the dsi bridge */ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge, flags); @@ -1181,6 +1208,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, #ifdef CONFIG_OF dsi->bridge.of_node = pdev->dev.of_node; #endif + drm_bridge_add(&dsi->bridge); return dsi; } @@ -1229,15 +1257,7 @@ EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove); */ int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder) { - int ret; - - ret = drm_bridge_attach(encoder, &dsi->bridge, NULL, 0); - if (ret) { - DRM_ERROR("Failed to initialize bridge with drm\n"); - return ret; - } - - return ret; + return drm_bridge_attach(encoder, &dsi->bridge, NULL, 0); } EXPORT_SYMBOL_GPL(dw_mipi_dsi_bind); diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index 8ed8302d6bbb..a3db532bbdd1 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -291,7 +291,7 @@ static int tc358768_calc_pll(struct tc358768_priv *priv, const struct drm_display_mode *mode, bool verify_only) { - const u32 frs_limits[] = { + static const u32 frs_limits[] = { 1000000000, 500000000, 250000000, @@ -825,7 +825,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) val |= TC358768_DSI_CONTROL_HSCKMD; - if (dsi_dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET) + if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) val |= TC358768_DSI_CONTROL_EOTDIS; tc358768_write(priv, TC358768_DSI_CONFW, val); diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 750f2172ef08..a32f70bc68ea 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -137,7 +137,6 @@ enum sn65dsi83_model { struct sn65dsi83 { struct drm_bridge bridge; - struct drm_display_mode mode; struct device *dev; struct regmap *regmap; struct device_node *host_node; @@ -147,8 +146,6 @@ struct sn65dsi83 { int dsi_lanes; bool lvds_dual_link; bool lvds_dual_link_even_odd_swap; - bool lvds_format_24bpp; - bool lvds_format_jeida; }; static const struct regmap_range sn65dsi83_readable_ranges[] = { @@ -291,7 +288,8 @@ err_dsi_attach: return ret; } -static void sn65dsi83_pre_enable(struct drm_bridge *bridge) +static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); @@ -306,7 +304,8 @@ static void sn65dsi83_pre_enable(struct drm_bridge *bridge) usleep_range(1000, 1100); } -static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx) +static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx, + const struct drm_display_mode *mode) { /* * The encoding of the LVDS_CLK_RANGE is as follows: @@ -322,7 +321,7 @@ static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx) * the clock to 25..154 MHz, the range calculation can be simplified * as follows: */ - int mode_clock = ctx->mode.clock; + int mode_clock = mode->clock; if (ctx->lvds_dual_link) mode_clock /= 2; @@ -330,7 +329,8 @@ static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx) return (mode_clock - 12500) / 25000; } -static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx) +static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx, + const struct drm_display_mode *mode) { /* * The encoding of the CHA_DSI_CLK_RANGE is as follows: @@ -346,7 +346,7 @@ static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx) * DSI_CLK = mode clock * bpp / dsi_data_lanes / 2 * the 2 is there because the bus is DDR. */ - return DIV_ROUND_UP(clamp((unsigned int)ctx->mode.clock * + return DIV_ROUND_UP(clamp((unsigned int)mode->clock * mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) / ctx->dsi_lanes / 2, 40000U, 500000U), 5000U); } @@ -364,23 +364,73 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx) return dsi_div - 1; } -static void sn65dsi83_enable(struct drm_bridge *bridge) +static void sn65dsi83_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); + struct drm_atomic_state *state = old_bridge_state->base.state; + const struct drm_bridge_state *bridge_state; + const struct drm_crtc_state *crtc_state; + const struct drm_display_mode *mode; + struct drm_connector *connector; + struct drm_crtc *crtc; + bool lvds_format_24bpp; + bool lvds_format_jeida; unsigned int pval; + __le16 le16val; u16 val; int ret; + /* Get the LVDS format from the bridge state. */ + bridge_state = drm_atomic_get_new_bridge_state(state, bridge); + + switch (bridge_state->output_bus_cfg.format) { + case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: + lvds_format_24bpp = false; + lvds_format_jeida = true; + break; + case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: + lvds_format_24bpp = true; + lvds_format_jeida = true; + break; + case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: + lvds_format_24bpp = true; + lvds_format_jeida = false; + break; + default: + /* + * Some bridges still don't set the correct + * LVDS bus pixel format, use SPWG24 default + * format until those are fixed. + */ + lvds_format_24bpp = true; + lvds_format_jeida = false; + dev_warn(ctx->dev, + "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n", + bridge_state->output_bus_cfg.format); + break; + } + + /* + * Retrieve the CRTC adjusted mode. This requires a little dance to go + * from the bridge to the encoder, to the connector and to the CRTC. + */ + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + mode = &crtc_state->adjusted_mode; + /* Clear reset, disable PLL */ regmap_write(ctx->regmap, REG_RC_RESET, 0x00); regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00); /* Reference clock derived from DSI link clock. */ regmap_write(ctx->regmap, REG_RC_LVDS_PLL, - REG_RC_LVDS_PLL_LVDS_CLK_RANGE(sn65dsi83_get_lvds_range(ctx)) | + REG_RC_LVDS_PLL_LVDS_CLK_RANGE(sn65dsi83_get_lvds_range(ctx, mode)) | REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY); regmap_write(ctx->regmap, REG_DSI_CLK, - REG_DSI_CLK_CHA_DSI_CLK_RANGE(sn65dsi83_get_dsi_range(ctx))); + REG_DSI_CLK_CHA_DSI_CLK_RANGE(sn65dsi83_get_dsi_range(ctx, mode))); regmap_write(ctx->regmap, REG_RC_DSI_CLK, REG_RC_DSI_CLK_DSI_CLK_DIVIDER(sn65dsi83_get_dsi_div(ctx))); @@ -394,20 +444,20 @@ static void sn65dsi83_enable(struct drm_bridge *bridge) regmap_write(ctx->regmap, REG_DSI_EQ, 0x00); /* Set up sync signal polarity. */ - val = (ctx->mode.flags & DRM_MODE_FLAG_NHSYNC ? + val = (mode->flags & DRM_MODE_FLAG_NHSYNC ? REG_LVDS_FMT_HS_NEG_POLARITY : 0) | - (ctx->mode.flags & DRM_MODE_FLAG_NVSYNC ? + (mode->flags & DRM_MODE_FLAG_NVSYNC ? REG_LVDS_FMT_VS_NEG_POLARITY : 0); /* Set up bits-per-pixel, 18bpp or 24bpp. */ - if (ctx->lvds_format_24bpp) { + if (lvds_format_24bpp) { val |= REG_LVDS_FMT_CHA_24BPP_MODE; if (ctx->lvds_dual_link) val |= REG_LVDS_FMT_CHB_24BPP_MODE; } /* Set up LVDS format, JEIDA/Format 1 or SPWG/Format 2 */ - if (ctx->lvds_format_jeida) { + if (lvds_format_jeida) { val |= REG_LVDS_FMT_CHA_24BPP_FORMAT1; if (ctx->lvds_dual_link) val |= REG_LVDS_FMT_CHB_24BPP_FORMAT1; @@ -426,29 +476,29 @@ static void sn65dsi83_enable(struct drm_bridge *bridge) REG_LVDS_LANE_CHB_LVDS_TERM); regmap_write(ctx->regmap, REG_LVDS_CM, 0x00); - val = cpu_to_le16(ctx->mode.hdisplay); + le16val = cpu_to_le16(mode->hdisplay); regmap_bulk_write(ctx->regmap, REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW, - &val, 2); - val = cpu_to_le16(ctx->mode.vdisplay); + &le16val, 2); + le16val = cpu_to_le16(mode->vdisplay); regmap_bulk_write(ctx->regmap, REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW, - &val, 2); + &le16val, 2); /* 32 + 1 pixel clock to ensure proper operation */ - val = cpu_to_le16(32 + 1); - regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &val, 2); - val = cpu_to_le16(ctx->mode.hsync_end - ctx->mode.hsync_start); + le16val = cpu_to_le16(32 + 1); + regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &le16val, 2); + le16val = cpu_to_le16(mode->hsync_end - mode->hsync_start); regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW, - &val, 2); - val = cpu_to_le16(ctx->mode.vsync_end - ctx->mode.vsync_start); + &le16val, 2); + le16val = cpu_to_le16(mode->vsync_end - mode->vsync_start); regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW, - &val, 2); + &le16val, 2); regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH, - ctx->mode.htotal - ctx->mode.hsync_end); + mode->htotal - mode->hsync_end); regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH, - ctx->mode.vtotal - ctx->mode.vsync_end); + mode->vtotal - mode->vsync_end); regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH, - ctx->mode.hsync_start - ctx->mode.hdisplay); + mode->hsync_start - mode->hdisplay); regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH, - ctx->mode.vsync_start - ctx->mode.vdisplay); + mode->vsync_start - mode->vdisplay); regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00); /* Enable PLL */ @@ -472,7 +522,8 @@ static void sn65dsi83_enable(struct drm_bridge *bridge) regmap_write(ctx->regmap, REG_IRQ_STAT, pval); } -static void sn65dsi83_disable(struct drm_bridge *bridge) +static void sn65dsi83_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); @@ -481,7 +532,8 @@ static void sn65dsi83_disable(struct drm_bridge *bridge) regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00); } -static void sn65dsi83_post_disable(struct drm_bridge *bridge) +static void sn65dsi83_atomic_post_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); @@ -503,70 +555,44 @@ sn65dsi83_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -static void sn65dsi83_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adj) -{ - struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); +#define MAX_INPUT_SEL_FORMATS 1 - ctx->mode = *adj; -} - -static bool sn65dsi83_mode_fixup(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - struct drm_display_mode *adj) +static u32 * +sn65dsi83_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) { - struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - u32 input_bus_format = MEDIA_BUS_FMT_RGB888_1X24; - struct drm_encoder *encoder = bridge->encoder; - struct drm_device *ddev = encoder->dev; - struct drm_connector *connector; + u32 *input_fmts; - /* The DSI format is always RGB888_1X24 */ - list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { - switch (connector->display_info.bus_formats[0]) { - case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: - ctx->lvds_format_24bpp = false; - ctx->lvds_format_jeida = true; - break; - case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: - ctx->lvds_format_24bpp = true; - ctx->lvds_format_jeida = true; - break; - case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: - ctx->lvds_format_24bpp = true; - ctx->lvds_format_jeida = false; - break; - default: - /* - * Some bridges still don't set the correct - * LVDS bus pixel format, use SPWG24 default - * format until those are fixed. - */ - ctx->lvds_format_24bpp = true; - ctx->lvds_format_jeida = false; - dev_warn(ctx->dev, - "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n", - connector->display_info.bus_formats[0]); - break; - } + *num_input_fmts = 0; - drm_display_info_set_bus_formats(&connector->display_info, - &input_bus_format, 1); - } + input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + /* This is the DSI-end bus format */ + input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + *num_input_fmts = 1; - return true; + return input_fmts; } static const struct drm_bridge_funcs sn65dsi83_funcs = { - .attach = sn65dsi83_attach, - .pre_enable = sn65dsi83_pre_enable, - .enable = sn65dsi83_enable, - .disable = sn65dsi83_disable, - .post_disable = sn65dsi83_post_disable, - .mode_valid = sn65dsi83_mode_valid, - .mode_set = sn65dsi83_mode_set, - .mode_fixup = sn65dsi83_mode_fixup, + .attach = sn65dsi83_attach, + .atomic_pre_enable = sn65dsi83_atomic_pre_enable, + .atomic_enable = sn65dsi83_atomic_enable, + .atomic_disable = sn65dsi83_atomic_disable, + .atomic_post_disable = sn65dsi83_atomic_post_disable, + .mode_valid = sn65dsi83_mode_valid, + + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_get_input_bus_fmts = sn65dsi83_atomic_get_input_bus_fmts, }; static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 45a2969afb2b..9bf889302bcc 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -23,6 +23,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_dp_aux_bus.h> #include <drm/drm_dp_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> @@ -116,6 +117,7 @@ * struct ti_sn65dsi86 - Platform data for ti-sn65dsi86 driver. * @bridge_aux: AUX-bus sub device for MIPI-to-eDP bridge functionality. * @gpio_aux: AUX-bus sub device for GPIO controller functionality. + * @aux_aux: AUX-bus sub device for eDP AUX channel functionality. * * @dev: Pointer to the top level (i2c) device. * @regmap: Regmap for accessing i2c. @@ -124,9 +126,8 @@ * @connector: Our connector. * @host_node: Remote DSI node. * @dsi: Our MIPI DSI source. - * @edid: Detected EDID of eDP panel. * @refclk: Our reference clock. - * @panel: Our panel. + * @next_bridge: The bridge on the eDP side. * @enable_gpio: The GPIO we toggle to enable the bridge. * @supplies: Data for bulk enabling/disabling our regulators. * @dp_lanes: Count of dp_lanes we're using. @@ -148,17 +149,17 @@ struct ti_sn65dsi86 { struct auxiliary_device bridge_aux; struct auxiliary_device gpio_aux; + struct auxiliary_device aux_aux; struct device *dev; struct regmap *regmap; struct drm_dp_aux aux; struct drm_bridge bridge; struct drm_connector connector; - struct edid *edid; struct device_node *host_node; struct mipi_dsi_device *dsi; struct clk *refclk; - struct drm_panel *panel; + struct drm_bridge *next_bridge; struct gpio_desc *enable_gpio; struct regulator_bulk_data supplies[SN_REGULATOR_SUPPLY_NUM]; int dp_lanes; @@ -393,35 +394,222 @@ static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata) debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); } -/* Connector funcs */ -static struct ti_sn65dsi86 * -connector_to_ti_sn65dsi86(struct drm_connector *connector) +/* ----------------------------------------------------------------------------- + * Auxiliary Devices (*not* AUX) + */ + +static void ti_sn65dsi86_uninit_aux(void *data) { - return container_of(connector, struct ti_sn65dsi86, connector); + auxiliary_device_uninit(data); } -static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector) +static void ti_sn65dsi86_delete_aux(void *data) { - struct ti_sn65dsi86 *pdata = connector_to_ti_sn65dsi86(connector); - struct edid *edid = pdata->edid; - int num, ret; + auxiliary_device_delete(data); +} - if (!edid) { - pm_runtime_get_sync(pdata->dev); - edid = pdata->edid = drm_get_edid(connector, &pdata->aux.ddc); - pm_runtime_put_autosuspend(pdata->dev); +/* + * AUX bus docs say that a non-NULL release is mandatory, but it makes no + * sense for the model used here where all of the aux devices are allocated + * in the single shared structure. We'll use this noop as a workaround. + */ +static void ti_sn65dsi86_noop(struct device *dev) {} + +static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, + struct auxiliary_device *aux, + const char *name) +{ + struct device *dev = pdata->dev; + int ret; + + aux->name = name; + aux->dev.parent = dev; + aux->dev.release = ti_sn65dsi86_noop; + device_set_of_node_from_dev(&aux->dev, dev); + ret = auxiliary_device_init(aux); + if (ret) + return ret; + ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux); + if (ret) + return ret; + + ret = auxiliary_device_add(aux); + if (ret) + return ret; + ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux); + + return ret; +} + +/* ----------------------------------------------------------------------------- + * AUX Adapter + */ + +static struct ti_sn65dsi86 *aux_to_ti_sn65dsi86(struct drm_dp_aux *aux) +{ + return container_of(aux, struct ti_sn65dsi86, aux); +} + +static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct ti_sn65dsi86 *pdata = aux_to_ti_sn65dsi86(aux); + u32 request = msg->request & ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE); + u32 request_val = AUX_CMD_REQ(msg->request); + u8 *buf = msg->buffer; + unsigned int len = msg->size; + unsigned int val; + int ret; + u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG]; + + if (len > SN_AUX_MAX_PAYLOAD_BYTES) + return -EINVAL; + + pm_runtime_get_sync(pdata->dev); + mutex_lock(&pdata->comms_mutex); + + /* + * If someone tries to do a DDC over AUX transaction before pre_enable() + * on a device without a dedicated reference clock then we just can't + * do it. Fail right away. This prevents non-refclk users from reading + * the EDID before enabling the panel but such is life. + */ + if (!pdata->comms_enabled) { + ret = -EIO; + goto exit; } - if (edid && drm_edid_is_valid(edid)) { - ret = drm_connector_update_edid_property(connector, edid); - if (!ret) { - num = drm_add_edid_modes(connector, edid); - if (num) - return num; + switch (request) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val); + /* Assume it's good */ + msg->reply = 0; + break; + default: + ret = -EINVAL; + goto exit; + } + + BUILD_BUG_ON(sizeof(addr_len) != sizeof(__be32)); + put_unaligned_be32((msg->address & SN_AUX_ADDR_MASK) << 8 | len, + addr_len); + regmap_bulk_write(pdata->regmap, SN_AUX_ADDR_19_16_REG, addr_len, + ARRAY_SIZE(addr_len)); + + if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) + regmap_bulk_write(pdata->regmap, SN_AUX_WDATA_REG(0), buf, len); + + /* Clear old status bits before start so we don't get confused */ + regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG, + AUX_IRQ_STATUS_NAT_I2C_FAIL | + AUX_IRQ_STATUS_AUX_RPLY_TOUT | + AUX_IRQ_STATUS_AUX_SHORT); + + regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND); + + /* Zero delay loop because i2c transactions are slow already */ + ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val, + !(val & AUX_CMD_SEND), 0, 50 * 1000); + if (ret) + goto exit; + + ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val); + if (ret) + goto exit; + + if (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) { + /* + * The hardware tried the message seven times per the DP spec + * but it hit a timeout. We ignore defers here because they're + * handled in hardware. + */ + ret = -ETIMEDOUT; + goto exit; + } + + if (val & AUX_IRQ_STATUS_AUX_SHORT) { + ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len); + if (ret) + goto exit; + } else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) { + switch (request) { + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_READ: + msg->reply |= DP_AUX_I2C_REPLY_NACK; + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_NATIVE_WRITE: + msg->reply |= DP_AUX_NATIVE_REPLY_NACK; + break; } + len = 0; + goto exit; } - return drm_panel_get_modes(pdata->panel, connector); + if (request != DP_AUX_NATIVE_WRITE && request != DP_AUX_I2C_WRITE && len != 0) + ret = regmap_bulk_read(pdata->regmap, SN_AUX_RDATA_REG(0), buf, len); + +exit: + mutex_unlock(&pdata->comms_mutex); + pm_runtime_mark_last_busy(pdata->dev); + pm_runtime_put_autosuspend(pdata->dev); + + if (ret) + return ret; + return len; +} + +static int ti_sn_aux_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); + int ret; + + pdata->aux.name = "ti-sn65dsi86-aux"; + pdata->aux.dev = &adev->dev; + pdata->aux.transfer = ti_sn_aux_transfer; + drm_dp_aux_init(&pdata->aux); + + ret = devm_of_dp_aux_populate_ep_devices(&pdata->aux); + if (ret) + return ret; + + /* + * The eDP to MIPI bridge parts don't work until the AUX channel is + * setup so we don't add it in the main driver probe, we add it now. + */ + return ti_sn65dsi86_add_aux_device(pdata, &pdata->bridge_aux, "bridge"); +} + +static const struct auxiliary_device_id ti_sn_aux_id_table[] = { + { .name = "ti_sn65dsi86.aux", }, + {}, +}; + +static struct auxiliary_driver ti_sn_aux_driver = { + .name = "aux", + .probe = ti_sn_aux_probe, + .id_table = ti_sn_aux_id_table, +}; + +/* ----------------------------------------------------------------------------- + * DRM Connector Operations + */ + +static struct ti_sn65dsi86 * +connector_to_ti_sn65dsi86(struct drm_connector *connector) +{ + return container_of(connector, struct ti_sn65dsi86, connector); +} + +static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector) +{ + struct ti_sn65dsi86 *pdata = connector_to_ti_sn65dsi86(connector); + + return drm_bridge_get_modes(pdata->next_bridge, connector); } static enum drm_mode_status @@ -448,23 +636,32 @@ static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge) +static int ti_sn_bridge_connector_init(struct ti_sn65dsi86 *pdata) { - return container_of(bridge, struct ti_sn65dsi86, bridge); -} + int ret; -static int ti_sn65dsi86_parse_regulators(struct ti_sn65dsi86 *pdata) -{ - unsigned int i; - const char * const ti_sn_bridge_supply_names[] = { - "vcca", "vcc", "vccio", "vpll", - }; + ret = drm_connector_init(pdata->bridge.dev, &pdata->connector, + &ti_sn_bridge_connector_funcs, + DRM_MODE_CONNECTOR_eDP); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; + } - for (i = 0; i < SN_REGULATOR_SUPPLY_NUM; i++) - pdata->supplies[i].supply = ti_sn_bridge_supply_names[i]; + drm_connector_helper_add(&pdata->connector, + &ti_sn_bridge_connector_helper_funcs); + drm_connector_attach_encoder(&pdata->connector, pdata->bridge.encoder); - return devm_regulator_bulk_get(pdata->dev, SN_REGULATOR_SUPPLY_NUM, - pdata->supplies); + return 0; +} + +/*------------------------------------------------------------------------------ + * DRM Bridge + */ + +static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge) +{ + return container_of(bridge, struct ti_sn65dsi86, bridge); } static int ti_sn_bridge_attach(struct drm_bridge *bridge, @@ -491,17 +688,9 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, return ret; } - ret = drm_connector_init(bridge->dev, &pdata->connector, - &ti_sn_bridge_connector_funcs, - DRM_MODE_CONNECTOR_eDP); - if (ret) { - DRM_ERROR("Failed to initialize connector with drm\n"); + ret = ti_sn_bridge_connector_init(pdata); + if (ret < 0) goto err_conn_init; - } - - drm_connector_helper_add(&pdata->connector, - &ti_sn_bridge_connector_helper_funcs); - drm_connector_attach_encoder(&pdata->connector, bridge->encoder); /* * TODO: ideally finding host resource and dsi dev registration needs @@ -547,8 +736,16 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, } pdata->dsi = dsi; + /* Attach the next bridge */ + ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge, + &pdata->bridge, flags); + if (ret < 0) + goto err_dsi_detach; + return 0; +err_dsi_detach: + mipi_dsi_detach(dsi); err_dsi_attach: mipi_dsi_device_unregister(dsi); err_dsi_host: @@ -567,8 +764,6 @@ static void ti_sn_bridge_disable(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); - drm_panel_disable(pdata->panel); - /* disable video stream */ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0); /* semi auto link training mode OFF */ @@ -633,9 +828,9 @@ static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata) return i; } -static void ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata, - bool rate_valid[]) +static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata) { + unsigned int valid_rates = 0; unsigned int rate_per_200khz; unsigned int rate_mhz; u8 dpcd_val; @@ -675,13 +870,13 @@ static void ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata, j < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); j++) { if (ti_sn_bridge_dp_rate_lut[j] == rate_mhz) - rate_valid[j] = true; + valid_rates |= BIT(j); } } for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); i++) { - if (rate_valid[i]) - return; + if (valid_rates & BIT(i)) + return valid_rates; } DRM_DEV_ERROR(pdata->dev, "No matching eDP rates in table; falling back\n"); @@ -703,15 +898,17 @@ static void ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata, (int)dpcd_val); fallthrough; case DP_LINK_BW_5_4: - rate_valid[7] = 1; + valid_rates |= BIT(7); fallthrough; case DP_LINK_BW_2_7: - rate_valid[4] = 1; + valid_rates |= BIT(4); fallthrough; case DP_LINK_BW_1_62: - rate_valid[1] = 1; + valid_rates |= BIT(1); break; } + + return valid_rates; } static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata) @@ -829,8 +1026,8 @@ exit: static void ti_sn_bridge_enable(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); - bool rate_valid[ARRAY_SIZE(ti_sn_bridge_dp_rate_lut)] = { }; const char *last_err_str = "No supported DP rate"; + unsigned int valid_rates; int dp_rate_idx; unsigned int val; int ret = -EINVAL; @@ -869,13 +1066,13 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge) regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK, val); - ti_sn_bridge_read_valid_rates(pdata, rate_valid); + valid_rates = ti_sn_bridge_read_valid_rates(pdata); /* Train until we run out of rates */ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata); dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); dp_rate_idx++) { - if (!rate_valid[dp_rate_idx]) + if (!(valid_rates & BIT(dp_rate_idx))) continue; ret = ti_sn_link_training(pdata, dp_rate_idx, &last_err_str); @@ -893,8 +1090,6 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge) /* enable video stream */ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, VSTREAM_ENABLE); - - drm_panel_enable(pdata->panel); } static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge) @@ -905,16 +1100,12 @@ static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge) if (!pdata->refclk) ti_sn65dsi86_enable_comms(pdata); - - drm_panel_prepare(pdata->panel); } static void ti_sn_bridge_post_disable(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); - drm_panel_unprepare(pdata->panel); - if (!pdata->refclk) ti_sn65dsi86_disable_comms(pdata); @@ -930,137 +1121,135 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .post_disable = ti_sn_bridge_post_disable, }; -static struct ti_sn65dsi86 *aux_to_ti_sn65dsi86(struct drm_dp_aux *aux) -{ - return container_of(aux, struct ti_sn65dsi86, aux); -} - -static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, - struct drm_dp_aux_msg *msg) +static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata, + struct device_node *np) { - struct ti_sn65dsi86 *pdata = aux_to_ti_sn65dsi86(aux); - u32 request = msg->request & ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE); - u32 request_val = AUX_CMD_REQ(msg->request); - u8 *buf = msg->buffer; - unsigned int len = msg->size; - unsigned int val; - int ret; - u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG]; - - if (len > SN_AUX_MAX_PAYLOAD_BYTES) - return -EINVAL; - - pm_runtime_get_sync(pdata->dev); - mutex_lock(&pdata->comms_mutex); + u32 lane_assignments[SN_MAX_DP_LANES] = { 0, 1, 2, 3 }; + u32 lane_polarities[SN_MAX_DP_LANES] = { }; + struct device_node *endpoint; + u8 ln_assign = 0; + u8 ln_polrs = 0; + int dp_lanes; + int i; /* - * If someone tries to do a DDC over AUX transaction before pre_enable() - * on a device without a dedicated reference clock then we just can't - * do it. Fail right away. This prevents non-refclk users from reading - * the EDID before enabling the panel but such is life. + * Read config from the device tree about lane remapping and lane + * polarities. These are optional and we assume identity map and + * normal polarity if nothing is specified. It's OK to specify just + * data-lanes but not lane-polarities but not vice versa. + * + * Error checking is light (we just make sure we don't crash or + * buffer overrun) and we assume dts is well formed and specifying + * mappings that the hardware supports. */ - if (!pdata->comms_enabled) { - ret = -EIO; - goto exit; + endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); + dp_lanes = of_property_count_u32_elems(endpoint, "data-lanes"); + if (dp_lanes > 0 && dp_lanes <= SN_MAX_DP_LANES) { + of_property_read_u32_array(endpoint, "data-lanes", + lane_assignments, dp_lanes); + of_property_read_u32_array(endpoint, "lane-polarities", + lane_polarities, dp_lanes); + } else { + dp_lanes = SN_MAX_DP_LANES; } + of_node_put(endpoint); - switch (request) { - case DP_AUX_NATIVE_WRITE: - case DP_AUX_I2C_WRITE: - case DP_AUX_NATIVE_READ: - case DP_AUX_I2C_READ: - regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val); - /* Assume it's good */ - msg->reply = 0; - break; - default: - ret = -EINVAL; - goto exit; + /* + * Convert into register format. Loop over all lanes even if + * data-lanes had fewer elements so that we nicely initialize + * the LN_ASSIGN register. + */ + for (i = SN_MAX_DP_LANES - 1; i >= 0; i--) { + ln_assign = ln_assign << LN_ASSIGN_WIDTH | lane_assignments[i]; + ln_polrs = ln_polrs << 1 | lane_polarities[i]; } - BUILD_BUG_ON(sizeof(addr_len) != sizeof(__be32)); - put_unaligned_be32((msg->address & SN_AUX_ADDR_MASK) << 8 | len, - addr_len); - regmap_bulk_write(pdata->regmap, SN_AUX_ADDR_19_16_REG, addr_len, - ARRAY_SIZE(addr_len)); + /* Stash in our struct for when we power on */ + pdata->dp_lanes = dp_lanes; + pdata->ln_assign = ln_assign; + pdata->ln_polrs = ln_polrs; +} - if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) - regmap_bulk_write(pdata->regmap, SN_AUX_WDATA_REG(0), buf, len); +static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata) +{ + struct device_node *np = pdata->dev->of_node; - /* Clear old status bits before start so we don't get confused */ - regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG, - AUX_IRQ_STATUS_NAT_I2C_FAIL | - AUX_IRQ_STATUS_AUX_RPLY_TOUT | - AUX_IRQ_STATUS_AUX_SHORT); + pdata->host_node = of_graph_get_remote_node(np, 0, 0); - regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND); + if (!pdata->host_node) { + DRM_ERROR("remote dsi host node not found\n"); + return -ENODEV; + } - /* Zero delay loop because i2c transactions are slow already */ - ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val, - !(val & AUX_CMD_SEND), 0, 50 * 1000); - if (ret) - goto exit; + return 0; +} - ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val); - if (ret) - goto exit; +static int ti_sn_bridge_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); + struct device_node *np = pdata->dev->of_node; + struct drm_panel *panel; + int ret; - if (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) { - /* - * The hardware tried the message seven times per the DP spec - * but it hit a timeout. We ignore defers here because they're - * handled in hardware. - */ - ret = -ETIMEDOUT; - goto exit; - } + ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL); + if (ret) + return dev_err_probe(&adev->dev, ret, + "could not find any panel node\n"); - if (val & AUX_IRQ_STATUS_AUX_SHORT) { - ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len); - if (ret) - goto exit; - } else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) { - switch (request) { - case DP_AUX_I2C_WRITE: - case DP_AUX_I2C_READ: - msg->reply |= DP_AUX_I2C_REPLY_NACK; - break; - case DP_AUX_NATIVE_READ: - case DP_AUX_NATIVE_WRITE: - msg->reply |= DP_AUX_NATIVE_REPLY_NACK; - break; - } - len = 0; - goto exit; + pdata->next_bridge = devm_drm_panel_bridge_add(pdata->dev, panel); + if (IS_ERR(pdata->next_bridge)) { + DRM_ERROR("failed to create panel bridge\n"); + return PTR_ERR(pdata->next_bridge); } - if (request != DP_AUX_NATIVE_WRITE && request != DP_AUX_I2C_WRITE && len != 0) - ret = regmap_bulk_read(pdata->regmap, SN_AUX_RDATA_REG(0), buf, len); - -exit: - mutex_unlock(&pdata->comms_mutex); - pm_runtime_mark_last_busy(pdata->dev); - pm_runtime_put_autosuspend(pdata->dev); + ti_sn_bridge_parse_lanes(pdata, np); + ret = ti_sn_bridge_parse_dsi_host(pdata); if (ret) return ret; - return len; + + pdata->bridge.funcs = &ti_sn_bridge_funcs; + pdata->bridge.of_node = np; + + drm_bridge_add(&pdata->bridge); + + return 0; } -static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata) +static void ti_sn_bridge_remove(struct auxiliary_device *adev) { - struct device_node *np = pdata->dev->of_node; + struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); - pdata->host_node = of_graph_get_remote_node(np, 0, 0); + if (!pdata) + return; - if (!pdata->host_node) { - DRM_ERROR("remote dsi host node not found\n"); - return -ENODEV; + if (pdata->dsi) { + mipi_dsi_detach(pdata->dsi); + mipi_dsi_device_unregister(pdata->dsi); } - return 0; + drm_bridge_remove(&pdata->bridge); + + of_node_put(pdata->host_node); } +static const struct auxiliary_device_id ti_sn_bridge_id_table[] = { + { .name = "ti_sn65dsi86.bridge", }, + {}, +}; + +static struct auxiliary_driver ti_sn_bridge_driver = { + .name = "bridge", + .probe = ti_sn_bridge_probe, + .remove = ti_sn_bridge_remove, + .id_table = ti_sn_bridge_id_table, +}; + +/* ----------------------------------------------------------------------------- + * GPIO Controller + */ + #if defined(CONFIG_OF_GPIO) static int tn_sn_bridge_of_xlate(struct gpio_chip *chip, @@ -1265,171 +1454,27 @@ static inline void ti_sn_gpio_unregister(void) {} #endif -static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata, - struct device_node *np) -{ - u32 lane_assignments[SN_MAX_DP_LANES] = { 0, 1, 2, 3 }; - u32 lane_polarities[SN_MAX_DP_LANES] = { }; - struct device_node *endpoint; - u8 ln_assign = 0; - u8 ln_polrs = 0; - int dp_lanes; - int i; - - /* - * Read config from the device tree about lane remapping and lane - * polarities. These are optional and we assume identity map and - * normal polarity if nothing is specified. It's OK to specify just - * data-lanes but not lane-polarities but not vice versa. - * - * Error checking is light (we just make sure we don't crash or - * buffer overrun) and we assume dts is well formed and specifying - * mappings that the hardware supports. - */ - endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); - dp_lanes = of_property_count_u32_elems(endpoint, "data-lanes"); - if (dp_lanes > 0 && dp_lanes <= SN_MAX_DP_LANES) { - of_property_read_u32_array(endpoint, "data-lanes", - lane_assignments, dp_lanes); - of_property_read_u32_array(endpoint, "lane-polarities", - lane_polarities, dp_lanes); - } else { - dp_lanes = SN_MAX_DP_LANES; - } - of_node_put(endpoint); - - /* - * Convert into register format. Loop over all lanes even if - * data-lanes had fewer elements so that we nicely initialize - * the LN_ASSIGN register. - */ - for (i = SN_MAX_DP_LANES - 1; i >= 0; i--) { - ln_assign = ln_assign << LN_ASSIGN_WIDTH | lane_assignments[i]; - ln_polrs = ln_polrs << 1 | lane_polarities[i]; - } - - /* Stash in our struct for when we power on */ - pdata->dp_lanes = dp_lanes; - pdata->ln_assign = ln_assign; - pdata->ln_polrs = ln_polrs; -} - -static int ti_sn_bridge_probe(struct auxiliary_device *adev, - const struct auxiliary_device_id *id) -{ - struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); - struct device_node *np = pdata->dev->of_node; - int ret; - - ret = drm_of_find_panel_or_bridge(np, 1, 0, &pdata->panel, NULL); - if (ret) { - DRM_ERROR("could not find any panel node\n"); - return ret; - } - - ti_sn_bridge_parse_lanes(pdata, np); - - ret = ti_sn_bridge_parse_dsi_host(pdata); - if (ret) - return ret; - - pdata->aux.name = "ti-sn65dsi86-aux"; - pdata->aux.dev = pdata->dev; - pdata->aux.transfer = ti_sn_aux_transfer; - drm_dp_aux_init(&pdata->aux); - - pdata->bridge.funcs = &ti_sn_bridge_funcs; - pdata->bridge.of_node = np; - - drm_bridge_add(&pdata->bridge); - - return 0; -} - -static void ti_sn_bridge_remove(struct auxiliary_device *adev) -{ - struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); - - if (!pdata) - return; - - if (pdata->dsi) { - mipi_dsi_detach(pdata->dsi); - mipi_dsi_device_unregister(pdata->dsi); - } - - kfree(pdata->edid); - - drm_bridge_remove(&pdata->bridge); - - of_node_put(pdata->host_node); -} - -static const struct auxiliary_device_id ti_sn_bridge_id_table[] = { - { .name = "ti_sn65dsi86.bridge", }, - {}, -}; - -static struct auxiliary_driver ti_sn_bridge_driver = { - .name = "bridge", - .probe = ti_sn_bridge_probe, - .remove = ti_sn_bridge_remove, - .id_table = ti_sn_bridge_id_table, -}; +/* ----------------------------------------------------------------------------- + * Probe & Remove + */ static void ti_sn65dsi86_runtime_disable(void *data) { pm_runtime_disable(data); } -static void ti_sn65dsi86_uninit_aux(void *data) -{ - auxiliary_device_uninit(data); -} - -static void ti_sn65dsi86_delete_aux(void *data) -{ - auxiliary_device_delete(data); -} - -/* - * AUX bus docs say that a non-NULL release is mandatory, but it makes no - * sense for the model used here where all of the aux devices are allocated - * in the single shared structure. We'll use this noop as a workaround. - */ -static void ti_sn65dsi86_noop(struct device *dev) {} - -static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, - struct auxiliary_device *aux, - const char *name) +static int ti_sn65dsi86_parse_regulators(struct ti_sn65dsi86 *pdata) { - struct device *dev = pdata->dev; - int ret; - - /* - * NOTE: It would be nice to set the "of_node" of our children to be - * the same "of_node"" that the top-level component has. That doesn't - * work, though, since pinctrl will try (and fail) to reserve the - * pins again. Until that gets sorted out the children will just need - * to look at the of_node of the main device. - */ - - aux->name = name; - aux->dev.parent = dev; - aux->dev.release = ti_sn65dsi86_noop; - ret = auxiliary_device_init(aux); - if (ret) - return ret; - ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux); - if (ret) - return ret; + unsigned int i; + const char * const ti_sn_bridge_supply_names[] = { + "vcca", "vcc", "vccio", "vpll", + }; - ret = auxiliary_device_add(aux); - if (ret) - return ret; - ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux); + for (i = 0; i < SN_REGULATOR_SUPPLY_NUM; i++) + pdata->supplies[i].supply = ti_sn_bridge_supply_names[i]; - return ret; + return devm_regulator_bulk_get(pdata->dev, SN_REGULATOR_SUPPLY_NUM, + pdata->supplies); } static int ti_sn65dsi86_probe(struct i2c_client *client, @@ -1454,27 +1499,24 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, pdata->regmap = devm_regmap_init_i2c(client, &ti_sn65dsi86_regmap_config); - if (IS_ERR(pdata->regmap)) { - DRM_ERROR("regmap i2c init failed\n"); - return PTR_ERR(pdata->regmap); - } + if (IS_ERR(pdata->regmap)) + return dev_err_probe(dev, PTR_ERR(pdata->regmap), + "regmap i2c init failed\n"); - pdata->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(pdata->enable_gpio)) { - DRM_ERROR("failed to get enable gpio from DT\n"); - ret = PTR_ERR(pdata->enable_gpio); - return ret; - } + pdata->enable_gpio = devm_gpiod_get_optional(dev, "enable", + GPIOD_OUT_LOW); + if (IS_ERR(pdata->enable_gpio)) + return dev_err_probe(dev, PTR_ERR(pdata->enable_gpio), + "failed to get enable gpio from DT\n"); ret = ti_sn65dsi86_parse_regulators(pdata); - if (ret) { - DRM_ERROR("failed to parse regulators\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to parse regulators\n"); pdata->refclk = devm_clk_get_optional(dev, "refclk"); if (IS_ERR(pdata->refclk)) - return PTR_ERR(pdata->refclk); + return dev_err_probe(dev, PTR_ERR(pdata->refclk), + "failed to get reference clock\n"); pm_runtime_enable(dev); ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev); @@ -1490,10 +1532,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, * motiviation here is to solve the chicken-and-egg problem of probe * ordering. The bridge wants the panel to be there when it probes. * The panel wants its HPD GPIO (provided by sn65dsi86 on some boards) - * when it probes. There will soon be other devices (DDC I2C bus, PWM) - * that have the same problem. Having sub-devices allows the some sub - * devices to finish probing even if others return -EPROBE_DEFER and - * gets us around the problems. + * when it probes. The panel and maybe backlight might want the DDC + * bus. Soon the PWM provided by the bridge chip will have the same + * problem. Having sub-devices allows the some sub devices to finish + * probing even if others return -EPROBE_DEFER and gets us around the + * problems. */ if (IS_ENABLED(CONFIG_OF_GPIO)) { @@ -1502,7 +1545,13 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, return ret; } - return ti_sn65dsi86_add_aux_device(pdata, &pdata->bridge_aux, "bridge"); + /* + * NOTE: At the end of the AUX channel probe we'll add the aux device + * for the bridge. This is because the bridge can't be used until the + * AUX channel is there and this is a very simple solution to the + * dependency problem. + */ + return ti_sn65dsi86_add_aux_device(pdata, &pdata->aux_aux, "aux"); } static struct i2c_device_id ti_sn65dsi86_id[] = { @@ -1539,12 +1588,18 @@ static int __init ti_sn65dsi86_init(void) if (ret) goto err_main_was_registered; - ret = auxiliary_driver_register(&ti_sn_bridge_driver); + ret = auxiliary_driver_register(&ti_sn_aux_driver); if (ret) goto err_gpio_was_registered; + ret = auxiliary_driver_register(&ti_sn_bridge_driver); + if (ret) + goto err_aux_was_registered; + return 0; +err_aux_was_registered: + auxiliary_driver_unregister(&ti_sn_aux_driver); err_gpio_was_registered: ti_sn_gpio_unregister(); err_main_was_registered: @@ -1557,6 +1612,7 @@ module_init(ti_sn65dsi86_init); static void __exit ti_sn65dsi86_exit(void) { auxiliary_driver_unregister(&ti_sn_bridge_driver); + auxiliary_driver_unregister(&ti_sn_aux_driver); ti_sn_gpio_unregister(); i2c_del_driver(&ti_sn65dsi86_driver); } diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c index 9335d9d6cf9a..9ac39cf11694 100644 --- a/drivers/gpu/drm/drm_aperture.c +++ b/drivers/gpu/drm/drm_aperture.c @@ -33,6 +33,10 @@ * * .. code-block:: c * + * static const struct drm_driver example_driver = { + * ... + * }; + * * static int remove_conflicting_framebuffers(struct pci_dev *pdev) * { * bool primary = false; @@ -46,7 +50,7 @@ * #endif * * return drm_aperture_remove_conflicting_framebuffers(base, size, primary, - * "example driver"); + * &example_driver); * } * * static int probe(struct pci_dev *pdev) @@ -274,7 +278,7 @@ static void drm_aperture_detach_drivers(resource_size_t base, resource_size_t si * @base: the aperture's base address in physical memory * @size: aperture size in bytes * @primary: also kick vga16fb if present - * @name: requesting driver name + * @req_driver: requesting DRM driver * * This function removes graphics device drivers which use memory range described by * @base and @size. @@ -283,7 +287,7 @@ static void drm_aperture_detach_drivers(resource_size_t base, resource_size_t si * 0 on success, or a negative errno code otherwise */ int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, - bool primary, const char *name) + bool primary, const struct drm_driver *req_driver) { #if IS_REACHABLE(CONFIG_FB) struct apertures_struct *a; @@ -296,7 +300,7 @@ int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_ a->ranges[0].base = base; a->ranges[0].size = size; - ret = remove_conflicting_framebuffers(a, name, primary); + ret = remove_conflicting_framebuffers(a, req_driver->name, primary); kfree(a); if (ret) @@ -312,7 +316,7 @@ EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers); /** * drm_aperture_remove_conflicting_pci_framebuffers - remove existing framebuffers for PCI devices * @pdev: PCI device - * @name: requesting driver name + * @req_driver: requesting DRM driver * * This function removes graphics device drivers using memory range configured * for any of @pdev's memory bars. The function assumes that PCI device with @@ -321,7 +325,8 @@ EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers); * Returns: * 0 on success, or a negative errno code otherwise */ -int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name) +int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, + const struct drm_driver *req_driver) { resource_size_t base, size; int bar, ret = 0; @@ -339,7 +344,7 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const * otherwise the vga fbdev driver falls over. */ #if IS_REACHABLE(CONFIG_FB) - ret = remove_conflicting_pci_framebuffers(pdev, name); + ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name); #endif if (ret == 0) ret = vga_remove_vgacon(pdev); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index d820423fac32..c85dcfd69158 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -660,7 +660,7 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, return -ENOSPC; } - clips = drm_plane_get_damage_clips(new_plane_state); + clips = __drm_plane_get_damage_clips(new_plane_state); num_clips = drm_plane_get_damage_clips_count(new_plane_state); /* Make sure damage clips are valid and inside the fb. */ diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index bc3487964fb5..f7bf1ea62d58 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -35,6 +35,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_print.h> #include <drm/drm_self_refresh_helper.h> @@ -2405,6 +2406,15 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, ret = funcs->prepare_fb(plane, new_plane_state); if (ret) goto fail; + } else { + WARN_ON_ONCE(funcs->cleanup_fb); + + if (!drm_core_check_feature(dev, DRIVER_GEM)) + continue; + + ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state); + if (ret) + goto fail; } } diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index b59b26a71ad5..5b1d92f5afea 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -61,6 +61,36 @@ * trusted clients. */ +static bool drm_is_current_master_locked(struct drm_file *fpriv) +{ + /* Either drm_device.master_mutex or drm_file.master_lookup_lock + * should be held here. + */ + return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master; +} + +/** + * drm_is_current_master - checks whether @priv is the current master + * @fpriv: DRM file private + * + * Checks whether @fpriv is current master on its device. This decides whether a + * client is allowed to run DRM_MASTER IOCTLs. + * + * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting + * - the current master is assumed to own the non-shareable display hardware. + */ +bool drm_is_current_master(struct drm_file *fpriv) +{ + bool ret; + + spin_lock(&fpriv->master_lookup_lock); + ret = drm_is_current_master_locked(fpriv); + spin_unlock(&fpriv->master_lookup_lock); + + return ret; +} +EXPORT_SYMBOL(drm_is_current_master); + int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_auth *auth = data; @@ -135,16 +165,18 @@ static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv, static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) { struct drm_master *old_master; + struct drm_master *new_master; lockdep_assert_held_once(&dev->master_mutex); WARN_ON(fpriv->is_master); old_master = fpriv->master; - fpriv->master = drm_master_create(dev); - if (!fpriv->master) { - fpriv->master = old_master; + new_master = drm_master_create(dev); + if (!new_master) return -ENOMEM; - } + spin_lock(&fpriv->master_lookup_lock); + fpriv->master = new_master; + spin_unlock(&fpriv->master_lookup_lock); fpriv->is_master = 1; fpriv->authenticated = 1; @@ -223,7 +255,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, if (ret) goto out_unlock; - if (drm_is_current_master(file_priv)) + if (drm_is_current_master_locked(file_priv)) goto out_unlock; if (dev->master) { @@ -272,7 +304,7 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, if (ret) goto out_unlock; - if (!drm_is_current_master(file_priv)) { + if (!drm_is_current_master_locked(file_priv)) { ret = -EINVAL; goto out_unlock; } @@ -303,10 +335,13 @@ int drm_master_open(struct drm_file *file_priv) * any master object for render clients */ mutex_lock(&dev->master_mutex); - if (!dev->master) + if (!dev->master) { ret = drm_new_set_master(dev, file_priv); - else + } else { + spin_lock(&file_priv->master_lookup_lock); file_priv->master = drm_master_get(dev->master); + spin_unlock(&file_priv->master_lookup_lock); + } mutex_unlock(&dev->master_mutex); return ret; @@ -322,7 +357,7 @@ void drm_master_release(struct drm_file *file_priv) if (file_priv->magic) idr_remove(&file_priv->master->magic_map, file_priv->magic); - if (!drm_is_current_master(file_priv)) + if (!drm_is_current_master_locked(file_priv)) goto out; drm_legacy_lock_master_cleanup(dev, master); @@ -344,22 +379,6 @@ out: } /** - * drm_is_current_master - checks whether @priv is the current master - * @fpriv: DRM file private - * - * Checks whether @fpriv is current master on its device. This decides whether a - * client is allowed to run DRM_MASTER IOCTLs. - * - * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting - * - the current master is assumed to own the non-shareable display hardware. - */ -bool drm_is_current_master(struct drm_file *fpriv) -{ - return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master; -} -EXPORT_SYMBOL(drm_is_current_master); - -/** * drm_master_get - reference a master pointer * @master: &struct drm_master * @@ -372,6 +391,31 @@ struct drm_master *drm_master_get(struct drm_master *master) } EXPORT_SYMBOL(drm_master_get); +/** + * drm_file_get_master - reference &drm_file.master of @file_priv + * @file_priv: DRM file private + * + * Increments the reference count of @file_priv's &drm_file.master and returns + * the &drm_file.master. If @file_priv has no &drm_file.master, returns NULL. + * + * Master pointers returned from this function should be unreferenced using + * drm_master_put(). + */ +struct drm_master *drm_file_get_master(struct drm_file *file_priv) +{ + struct drm_master *master = NULL; + + spin_lock(&file_priv->master_lookup_lock); + if (!file_priv->master) + goto unlock; + master = drm_master_get(file_priv->master); + +unlock: + spin_unlock(&file_priv->master_lookup_lock); + return master; +} +EXPORT_SYMBOL(drm_file_get_master); + static void drm_master_destroy(struct kref *kref) { struct drm_master *master = container_of(kref, struct drm_master, refcount); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 044acd07c153..11ec2846addb 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -28,6 +28,7 @@ #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_encoder.h> +#include <drm/drm_print.h> #include "drm_crtc_internal.h" @@ -225,6 +226,15 @@ err_reset_bridge: bridge->dev = NULL; bridge->encoder = NULL; list_del(&bridge->chain_node); + +#ifdef CONFIG_OF + DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n", + bridge->of_node, encoder->name, ret); +#else + DRM_ERROR("failed to attach bridge to encoder %s: %d\n", + encoder->name, ret); +#endif + return ret; } EXPORT_SYMBOL(drm_bridge_attach); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index da39e7ff6965..2ba257b1ae20 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2414,6 +2414,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo __user *mode_ptr; uint32_t __user *encoder_ptr; + bool is_current_master; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; @@ -2444,9 +2445,11 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, out_resp->connector_type = connector->connector_type; out_resp->connector_type_id = connector->connector_type_id; + is_current_master = drm_is_current_master(file_priv); + mutex_lock(&dev->mode_config.mutex); if (out_resp->count_modes == 0) { - if (drm_is_current_master(file_priv)) + if (is_current_master) connector->funcs->fill_modes(connector, dev->mode_config.max_width, dev->mode_config.max_height); diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 1ca51addb589..edb772947cb4 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -262,6 +262,8 @@ int drm_plane_register_all(struct drm_device *dev); void drm_plane_unregister_all(struct drm_device *dev); int drm_plane_check_pixel_format(struct drm_plane *plane, u32 format, u64 modifier); +struct drm_mode_rect * +__drm_plane_get_damage_clips(const struct drm_plane_state *state); /* drm_bridge.c */ void drm_bridge_detach(struct drm_bridge *bridge); diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c index 3a4126dc2520..245959dad7bb 100644 --- a/drivers/gpu/drm/drm_damage_helper.c +++ b/drivers/gpu/drm/drm_damage_helper.c @@ -34,44 +34,6 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> -/** - * DOC: overview - * - * FB_DAMAGE_CLIPS is an optional plane property which provides a means to - * specify a list of damage rectangles on a plane in framebuffer coordinates of - * the framebuffer attached to the plane. In current context damage is the area - * of plane framebuffer that has changed since last plane update (also called - * page-flip), irrespective of whether currently attached framebuffer is same as - * framebuffer attached during last plane update or not. - * - * FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers - * to optimize internally especially for virtual devices where each framebuffer - * change needs to be transmitted over network, usb, etc. - * - * Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can - * ignore damage clips property and in that case driver will do a full plane - * update. In case damage clips are provided then it is guaranteed that the area - * inside damage clips will be updated to plane. For efficiency driver can do - * full update or can update more than specified in damage clips. Since driver - * is free to read more, user-space must always render the entire visible - * framebuffer. Otherwise there can be corruptions. Also, if a user-space - * provides damage clips which doesn't encompass the actual damage to - * framebuffer (since last plane update) can result in incorrect rendering. - * - * FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an - * array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates, - * damage clips are not in 16.16 fixed point. Similar to plane src in - * framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are - * inclusive and x2/y2 are exclusive. While kernel does not error for overlapped - * damage clips, it is strongly discouraged. - * - * Drivers that are interested in damage interface for plane should enable - * FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips(). - * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and - * drm_atomic_helper_damage_iter_next() helper iterator function to get damage - * rectangles clipped to &drm_plane_state.src. - */ - static void convert_clip_rect_to_rect(const struct drm_clip_rect *src, struct drm_mode_rect *dest, uint32_t num_clips, uint32_t src_inc) @@ -88,22 +50,6 @@ static void convert_clip_rect_to_rect(const struct drm_clip_rect *src, } /** - * drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property. - * @plane: Plane on which to enable damage clips property. - * - * This function lets driver to enable the damage clips property on a plane. - */ -void drm_plane_enable_fb_damage_clips(struct drm_plane *plane) -{ - struct drm_device *dev = plane->dev; - struct drm_mode_config *config = &dev->mode_config; - - drm_object_attach_property(&plane->base, config->prop_fb_damage_clips, - 0); -} -EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips); - -/** * drm_atomic_helper_check_plane_damage - Verify plane damage on atomic_check. * @state: The driver state object. * @plane_state: Plane state for which to verify damage. @@ -282,7 +228,7 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, if (!state || !state->crtc || !state->fb || !state->visible) return; - iter->clips = drm_helper_get_plane_damage_clips(state); + iter->clips = (struct drm_rect *)drm_plane_get_damage_clips(state); iter->num_clips = drm_plane_get_damage_clips_count(state); /* Round down for x1/y1 and round up for x2/y2 to catch all pixels */ diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 3d7182001004..b0a826489488 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -91,6 +91,7 @@ static int drm_clients_info(struct seq_file *m, void *data) mutex_lock(&dev->filelist_mutex); list_for_each_entry_reverse(priv, &dev->filelist, lhead) { struct task_struct *task; + bool is_current_master = drm_is_current_master(priv); rcu_read_lock(); /* locks pid_task()->comm */ task = pid_task(priv->pid, PIDTYPE_PID); @@ -99,7 +100,7 @@ static int drm_clients_info(struct seq_file *m, void *data) task ? task->comm : "<unknown>", pid_vnr(priv->pid), priv->minor->index, - drm_is_current_master(priv) ? 'y' : 'n', + is_current_master ? 'y' : 'n', priv->authenticated ? 'y' : 'n', from_kuid_munged(seq_user_ns(m), uid), priv->magic); diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index 3dd70d813f69..bbc3bc4ba844 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -46,10 +46,10 @@ * it reached a given hardware component (a CRC sampling "source"). * * Userspace can control generation of CRCs in a given CRTC by writing to the - * file dri/0/crtc-N/crc/control in debugfs, with N being the index of the CRTC. - * Accepted values are source names (which are driver-specific) and the "auto" - * keyword, which will let the driver select a default source of frame CRCs - * for this CRTC. + * file dri/0/crtc-N/crc/control in debugfs, with N being the :ref:`index of + * the CRTC<crtc_index>`. Accepted values are source names (which are + * driver-specific) and the "auto" keyword, which will let the driver select a + * default source of frame CRCs for this CRTC. * * Once frame CRC generation is enabled, userspace can capture them by reading * the dri/0/crtc-N/crc/data file. Each line in that file contains the frame diff --git a/drivers/gpu/drm/drm_dp_aux_bus.c b/drivers/gpu/drm/drm_dp_aux_bus.c new file mode 100644 index 000000000000..e49a70f3691b --- /dev/null +++ b/drivers/gpu/drm/drm_dp_aux_bus.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2021 Google Inc. + * + * The DP AUX bus is used for devices that are connected over a DisplayPort + * AUX bus. The devices on the far side of the bus are referred to as + * endpoints in this code. + * + * Commonly there is only one device connected to the DP AUX bus: a panel. + * Though historically panels (even DP panels) have been modeled as simple + * platform devices, putting them under the DP AUX bus allows the panel driver + * to perform transactions on that bus. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> + +#include <drm/drm_dp_aux_bus.h> +#include <drm/drm_dp_helper.h> + +/** + * dp_aux_ep_match() - The match function for the dp_aux_bus. + * @dev: The device to match. + * @drv: The driver to try to match against. + * + * At the moment, we just match on device tree. + * + * Return: True if this driver matches this device; false otherwise. + */ +static int dp_aux_ep_match(struct device *dev, struct device_driver *drv) +{ + return !!of_match_device(drv->of_match_table, dev); +} + +/** + * dp_aux_ep_probe() - The probe function for the dp_aux_bus. + * @dev: The device to probe. + * + * Calls through to the endpoint driver probe. + * + * Return: 0 if no error or negative error code. + */ +static int dp_aux_ep_probe(struct device *dev) +{ + struct dp_aux_ep_driver *aux_ep_drv = to_dp_aux_ep_drv(dev->driver); + struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev); + int ret; + + ret = dev_pm_domain_attach(dev, true); + if (ret) + return dev_err_probe(dev, ret, "Failed to attach to PM Domain\n"); + + ret = aux_ep_drv->probe(aux_ep); + if (ret) + dev_pm_domain_detach(dev, true); + + return ret; +} + +/** + * dp_aux_ep_remove() - The remove function for the dp_aux_bus. + * @dev: The device to remove. + * + * Calls through to the endpoint driver remove. + * + * Return: 0 if no error or negative error code. + */ +static int dp_aux_ep_remove(struct device *dev) +{ + struct dp_aux_ep_driver *aux_ep_drv = to_dp_aux_ep_drv(dev->driver); + struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev); + + if (aux_ep_drv->remove) + aux_ep_drv->remove(aux_ep); + dev_pm_domain_detach(dev, true); + + return 0; +} + +/** + * dp_aux_ep_shutdown() - The shutdown function for the dp_aux_bus. + * @dev: The device to shutdown. + * + * Calls through to the endpoint driver shutdown. + */ +static void dp_aux_ep_shutdown(struct device *dev) +{ + struct dp_aux_ep_driver *aux_ep_drv; + + if (!dev->driver) + return; + + aux_ep_drv = to_dp_aux_ep_drv(dev->driver); + if (aux_ep_drv->shutdown) + aux_ep_drv->shutdown(to_dp_aux_ep_dev(dev)); +} + +static struct bus_type dp_aux_bus_type = { + .name = "dp-aux", + .match = dp_aux_ep_match, + .probe = dp_aux_ep_probe, + .remove = dp_aux_ep_remove, + .shutdown = dp_aux_ep_shutdown, +}; + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return of_device_modalias(dev, buf, PAGE_SIZE); +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *dp_aux_ep_dev_attrs[] = { + &dev_attr_modalias.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dp_aux_ep_dev); + +/** + * dp_aux_ep_dev_release() - Free memory for the dp_aux_ep device + * @dev: The device to free. + * + * Return: 0 if no error or negative error code. + */ +static void dp_aux_ep_dev_release(struct device *dev) +{ + kfree(to_dp_aux_ep_dev(dev)); +} + +static struct device_type dp_aux_device_type_type = { + .groups = dp_aux_ep_dev_groups, + .uevent = of_device_uevent_modalias, + .release = dp_aux_ep_dev_release, +}; + +/** + * of_dp_aux_ep_destroy() - Destroy an DP AUX endpoint device + * @dev: The device to destroy. + * @data: Not used + * + * This is just used as a callback by of_dp_aux_depopulate_ep_devices() and + * is called for _all_ of the child devices of the device providing the AUX bus. + * We'll only act on those that are of type "dp_aux_bus_type". + * + * This function is effectively an inverse of what's in the loop + * in of_dp_aux_populate_ep_devices(). + * + * Return: 0 if no error or negative error code. + */ +static int of_dp_aux_ep_destroy(struct device *dev, void *data) +{ + struct device_node *np = dev->of_node; + + if (dev->bus != &dp_aux_bus_type) + return 0; + + if (!of_node_check_flag(np, OF_POPULATED)) + return 0; + + of_node_clear_flag(np, OF_POPULATED); + of_node_put(np); + + device_unregister(dev); + + return 0; +} + +/** + * of_dp_aux_depopulate_ep_devices() - Undo of_dp_aux_populate_ep_devices + * @aux: The AUX channel whose devices we want to depopulate + * + * This will destroy all devices that were created + * by of_dp_aux_populate_ep_devices(). + */ +void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux) +{ + device_for_each_child_reverse(aux->dev, NULL, of_dp_aux_ep_destroy); +} +EXPORT_SYMBOL_GPL(of_dp_aux_depopulate_ep_devices); + +/** + * of_dp_aux_populate_ep_devices() - Populate the endpoint devices on the DP AUX + * @aux: The AUX channel whose devices we want to populate. It is required that + * drm_dp_aux_init() has already been called for this AUX channel. + * + * This will populate all the devices under the "aux-bus" node of the device + * providing the AUX channel (AKA aux->dev). + * + * When this function finishes, it is _possible_ (but not guaranteed) that + * our sub-devices will have finished probing. It should be noted that if our + * sub-devices return -EPROBE_DEFER that we will not return any error codes + * ourselves but our sub-devices will _not_ have actually probed successfully + * yet. There may be other cases (maybe added in the future?) where sub-devices + * won't have been probed yet when this function returns, so it's best not to + * rely on that. + * + * If this function succeeds you should later make sure you call + * of_dp_aux_depopulate_ep_devices() to undo it, or just use the devm version + * of this function. + * + * Return: 0 if no error or negative error code. + */ +int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux) +{ + struct device_node *bus, *np; + struct dp_aux_ep_device *aux_ep; + int ret; + + /* drm_dp_aux_init() should have been called already; warn if not */ + WARN_ON_ONCE(!aux->ddc.algo); + + if (!aux->dev->of_node) + return 0; + + bus = of_get_child_by_name(aux->dev->of_node, "aux-bus"); + if (!bus) + return 0; + + for_each_available_child_of_node(bus, np) { + if (of_node_test_and_set_flag(np, OF_POPULATED)) + continue; + + aux_ep = kzalloc(sizeof(*aux_ep), GFP_KERNEL); + if (!aux_ep) + continue; + aux_ep->aux = aux; + + aux_ep->dev.parent = aux->dev; + aux_ep->dev.bus = &dp_aux_bus_type; + aux_ep->dev.type = &dp_aux_device_type_type; + aux_ep->dev.of_node = of_node_get(np); + dev_set_name(&aux_ep->dev, "aux-%s", dev_name(aux->dev)); + + ret = device_register(&aux_ep->dev); + if (ret) { + dev_err(aux->dev, "Failed to create AUX EP for %pOF: %d\n", np, ret); + of_node_clear_flag(np, OF_POPULATED); + of_node_put(np); + + /* + * As per docs of device_register(), call this instead + * of kfree() directly for error cases. + */ + put_device(&aux_ep->dev); + + /* + * Following in the footsteps of of_i2c_register_devices(), + * we won't fail the whole function here--we'll just + * continue registering any other devices we find. + */ + } + } + + of_node_put(bus); + + return 0; +} + +static void of_dp_aux_depopulate_ep_devices_void(void *data) +{ + of_dp_aux_depopulate_ep_devices(data); +} + +/** + * devm_of_dp_aux_populate_ep_devices() - devm wrapper for of_dp_aux_populate_ep_devices() + * @aux: The AUX channel whose devices we want to populate + * + * Handles freeing w/ devm on the device "aux->dev". + * + * Return: 0 if no error or negative error code. + */ +int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux) +{ + int ret; + + ret = of_dp_aux_populate_ep_devices(aux); + if (ret) + return ret; + + return devm_add_action_or_reset(aux->dev, + of_dp_aux_depopulate_ep_devices_void, + aux); +} +EXPORT_SYMBOL_GPL(devm_of_dp_aux_populate_ep_devices); + +int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *drv, struct module *owner) +{ + drv->driver.owner = owner; + drv->driver.bus = &dp_aux_bus_type; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL_GPL(__dp_aux_dp_driver_register); + +void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL_GPL(dp_aux_dp_driver_unregister); + +static int __init dp_aux_bus_init(void) +{ + int ret; + + ret = bus_register(&dp_aux_bus_type); + if (ret) + return ret; + + return 0; +} + +static void __exit dp_aux_bus_exit(void) +{ + bus_unregister(&dp_aux_bus_type); +} + +subsys_initcall(dp_aux_bus_init); +module_exit(dp_aux_bus_exit); + +MODULE_AUTHOR("Douglas Anderson <dianders@chromium.org>"); +MODULE_DESCRIPTION("DRM DisplayPort AUX bus"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 55b53df6ce34..b5f75ca05774 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -33,9 +33,17 @@ #include <drm/drm_print.h> #include <drm/drm_vblank.h> #include <drm/drm_dp_mst_helper.h> +#include <drm/drm_panel.h> #include "drm_crtc_helper_internal.h" +struct dp_aux_backlight { + struct backlight_device *base; + struct drm_dp_aux *aux; + struct drm_edp_backlight_info info; + bool enabled; +}; + /** * DOC: dp helpers * @@ -3115,3 +3123,457 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc) return 0; } EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr); + +/** + * drm_edp_backlight_set_level() - Set the backlight level of an eDP panel via AUX + * @aux: The DP AUX channel to use + * @bl: Backlight capability info from drm_edp_backlight_init() + * @level: The brightness level to set + * + * Sets the brightness level of an eDP panel's backlight. Note that the panel's backlight must + * already have been enabled by the driver by calling drm_edp_backlight_enable(). + * + * Returns: %0 on success, negative error code on failure + */ +int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + u16 level) +{ + int ret; + u8 buf[2] = { 0 }; + + if (bl->lsb_reg_used) { + buf[0] = (level & 0xff00) >> 8; + buf[1] = (level & 0x00ff); + } else { + buf[0] = level; + } + + ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf)); + if (ret != sizeof(buf)) { + drm_err(aux->drm_dev, + "%s: Failed to write aux backlight level: %d\n", + aux->name, ret); + return ret < 0 ? ret : -EIO; + } + + return 0; +} +EXPORT_SYMBOL(drm_edp_backlight_set_level); + +static int +drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + bool enable) +{ + int ret; + u8 buf; + + /* The panel uses something other then DPCD for enabling its backlight */ + if (!bl->aux_enable) + return 0; + + ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf); + if (ret != 1) { + drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n", + aux->name, ret); + return ret < 0 ? ret : -EIO; + } + if (enable) + buf |= DP_EDP_BACKLIGHT_ENABLE; + else + buf &= ~DP_EDP_BACKLIGHT_ENABLE; + + ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf); + if (ret != 1) { + drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n", + aux->name, ret); + return ret < 0 ? ret : -EIO; + } + + return 0; +} + +/** + * drm_edp_backlight_enable() - Enable an eDP panel's backlight using DPCD + * @aux: The DP AUX channel to use + * @bl: Backlight capability info from drm_edp_backlight_init() + * @level: The initial backlight level to set via AUX, if there is one + * + * This function handles enabling DPCD backlight controls on a panel over DPCD, while additionally + * restoring any important backlight state such as the given backlight level, the brightness byte + * count, backlight frequency, etc. + * + * Note that certain panels, while supporting brightness level controls over DPCD, may not support + * having their backlights enabled via the standard %DP_EDP_DISPLAY_CONTROL_REGISTER. On such panels + * &drm_edp_backlight_info.aux_enable will be set to %false, this function will skip the step of + * programming the %DP_EDP_DISPLAY_CONTROL_REGISTER, and the driver must perform the required + * implementation specific step for enabling the backlight after calling this function. + * + * Returns: %0 on success, negative error code on failure. + */ +int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + const u16 level) +{ + int ret; + u8 dpcd_buf, new_dpcd_buf; + + ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf); + if (ret != 1) { + drm_dbg_kms(aux->drm_dev, + "%s: Failed to read backlight mode: %d\n", aux->name, ret); + return ret < 0 ? ret : -EIO; + } + + new_dpcd_buf = dpcd_buf; + + if ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) != DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + + ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); + if (ret != 1) + drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", + aux->name, ret); + } + + if (bl->pwm_freq_pre_divider) { + ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider); + if (ret != 1) + drm_dbg_kms(aux->drm_dev, + "%s: Failed to write aux backlight frequency: %d\n", + aux->name, ret); + else + new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; + } + + if (new_dpcd_buf != dpcd_buf) { + ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf); + if (ret != 1) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n", + aux->name, ret); + return ret < 0 ? ret : -EIO; + } + } + + ret = drm_edp_backlight_set_level(aux, bl, level); + if (ret < 0) + return ret; + ret = drm_edp_backlight_set_enable(aux, bl, true); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_edp_backlight_enable); + +/** + * drm_edp_backlight_disable() - Disable an eDP backlight using DPCD, if supported + * @aux: The DP AUX channel to use + * @bl: Backlight capability info from drm_edp_backlight_init() + * + * This function handles disabling DPCD backlight controls on a panel over AUX. Note that some + * panels have backlights that are enabled/disabled by other means, despite having their brightness + * values controlled through DPCD. On such panels &drm_edp_backlight_info.aux_enable will be set to + * %false, this function will become a no-op (and we will skip updating + * %DP_EDP_DISPLAY_CONTROL_REGISTER), and the driver must take care to perform it's own + * implementation specific step for disabling the backlight. + * + * Returns: %0 on success or no-op, negative error code on failure. + */ +int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl) +{ + int ret; + + ret = drm_edp_backlight_set_enable(aux, bl, false); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_edp_backlight_disable); + +static inline int +drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, + u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) +{ + int fxp, fxp_min, fxp_max, fxp_actual, f = 1; + int ret; + u8 pn, pn_min, pn_max; + + ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); + if (ret != 1) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n", + aux->name, ret); + return -ENODEV; + } + + pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + bl->max = (1 << pn) - 1; + if (!driver_pwm_freq_hz) + return 0; + + /* + * Set PWM Frequency divider to match desired frequency provided by the driver. + * The PWM Frequency is calculated as 27Mhz / (F x P). + * - Where F = PWM Frequency Pre-Divider value programmed by field 7:0 of the + * EDP_BACKLIGHT_FREQ_SET register (DPCD Address 00728h) + * - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the + * EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h) + */ + + /* Find desired value of (F x P) + * Note that, if F x P is out of supported range, the maximum value or minimum value will + * applied automatically. So no need to check that. + */ + fxp = DIV_ROUND_CLOSEST(1000 * DP_EDP_BACKLIGHT_FREQ_BASE_KHZ, driver_pwm_freq_hz); + + /* Use highest possible value of Pn for more granularity of brightness adjustment while + * satifying the conditions below. + * - Pn is in the range of Pn_min and Pn_max + * - F is in the range of 1 and 255 + * - FxP is within 25% of desired value. + * Note: 25% is arbitrary value and may need some tweak. + */ + ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); + if (ret != 1) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", + aux->name, ret); + return 0; + } + ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); + if (ret != 1) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", + aux->name, ret); + return 0; + } + pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + /* Ensure frequency is within 25% of desired value */ + fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); + fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); + if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) { + drm_dbg_kms(aux->drm_dev, + "%s: Driver defined backlight frequency (%d) out of range\n", + aux->name, driver_pwm_freq_hz); + return 0; + } + + for (pn = pn_max; pn >= pn_min; pn--) { + f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255); + fxp_actual = f << pn; + if (fxp_min <= fxp_actual && fxp_actual <= fxp_max) + break; + } + + ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn); + if (ret != 1) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", + aux->name, ret); + return 0; + } + bl->pwmgen_bit_count = pn; + bl->max = (1 << pn) - 1; + + if (edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP) { + bl->pwm_freq_pre_divider = f; + drm_dbg_kms(aux->drm_dev, "%s: Using backlight frequency from driver (%dHz)\n", + aux->name, driver_pwm_freq_hz); + } + + return 0; +} + +static inline int +drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, + u8 *current_mode) +{ + int ret; + u8 buf[2]; + u8 mode_reg; + + ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg); + if (ret != 1) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n", + aux->name, ret); + return ret < 0 ? ret : -EIO; + } + + *current_mode = (mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK); + if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { + int size = 1 + bl->lsb_reg_used; + + ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size); + if (ret != size) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n", + aux->name, ret); + return ret < 0 ? ret : -EIO; + } + + if (bl->lsb_reg_used) + return (buf[0] << 8) | buf[1]; + else + return buf[0]; + } + + /* + * If we're not in DPCD control mode yet, the programmed brightness value is meaningless and + * the driver should assume max brightness + */ + return bl->max; +} + +/** + * drm_edp_backlight_init() - Probe a display panel's TCON using the standard VESA eDP backlight + * interface. + * @aux: The DP aux device to use for probing + * @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight + * @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz + * @edp_dpcd: A cached copy of the eDP DPCD + * @current_level: Where to store the probed brightness level + * @current_mode: Where to store the currently set backlight control mode + * + * Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities, + * along with also probing the current and maximum supported brightness levels. + * + * If @driver_pwm_freq_hz is non-zero, this will be used as the backlight frequency. Otherwise, the + * default frequency from the panel is used. + * + * Returns: %0 on success, negative error code on failure. + */ +int +drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, + u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE], + u16 *current_level, u8 *current_mode) +{ + int ret; + + if (edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) + bl->aux_enable = true; + if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + bl->lsb_reg_used = true; + + ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd); + if (ret < 0) + return ret; + + ret = drm_edp_backlight_probe_level(aux, bl, current_mode); + if (ret < 0) + return ret; + *current_level = ret; + + drm_dbg_kms(aux->drm_dev, + "%s: Found backlight level=%d/%d pwm_freq_pre_divider=%d mode=%x\n", + aux->name, *current_level, bl->max, bl->pwm_freq_pre_divider, *current_mode); + drm_dbg_kms(aux->drm_dev, + "%s: Backlight caps: pwmgen_bit_count=%d lsb_reg_used=%d aux_enable=%d\n", + aux->name, bl->pwmgen_bit_count, bl->lsb_reg_used, bl->aux_enable); + return 0; +} +EXPORT_SYMBOL(drm_edp_backlight_init); + +#if IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ + (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)) + +static int dp_aux_backlight_update_status(struct backlight_device *bd) +{ + struct dp_aux_backlight *bl = bl_get_data(bd); + u16 brightness = backlight_get_brightness(bd); + int ret = 0; + + if (!backlight_is_blank(bd)) { + if (!bl->enabled) { + drm_edp_backlight_enable(bl->aux, &bl->info, brightness); + bl->enabled = true; + return 0; + } + ret = drm_edp_backlight_set_level(bl->aux, &bl->info, brightness); + } else { + if (bl->enabled) { + drm_edp_backlight_disable(bl->aux, &bl->info); + bl->enabled = false; + } + } + + return ret; +} + +static const struct backlight_ops dp_aux_bl_ops = { + .update_status = dp_aux_backlight_update_status, +}; + +/** + * drm_panel_dp_aux_backlight - create and use DP AUX backlight + * @panel: DRM panel + * @aux: The DP AUX channel to use + * + * Use this function to create and handle backlight if your panel + * supports backlight control over DP AUX channel using DPCD + * registers as per VESA's standard backlight control interface. + * + * When the panel is enabled backlight will be enabled after a + * successful call to &drm_panel_funcs.enable() + * + * When the panel is disabled backlight will be disabled before the + * call to &drm_panel_funcs.disable(). + * + * A typical implementation for a panel driver supporting backlight + * control over DP AUX will call this function at probe time. + * Backlight will then be handled transparently without requiring + * any intervention from the driver. + * + * drm_panel_dp_aux_backlight() must be called after the call to drm_panel_init(). + * + * Return: 0 on success or a negative error code on failure. + */ +int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux) +{ + struct dp_aux_backlight *bl; + struct backlight_properties props = { 0 }; + u16 current_level; + u8 current_mode; + u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + int ret; + + if (!panel || !panel->dev || !aux) + return -EINVAL; + + ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd, + EDP_DISPLAY_CTL_CAP_SIZE); + if (ret < 0) + return ret; + + if (!drm_edp_backlight_supported(edp_dpcd)) { + DRM_DEV_INFO(panel->dev, "DP AUX backlight is not supported\n"); + return 0; + } + + bl = devm_kzalloc(panel->dev, sizeof(*bl), GFP_KERNEL); + if (!bl) + return -ENOMEM; + + bl->aux = aux; + + ret = drm_edp_backlight_init(aux, &bl->info, 0, edp_dpcd, + ¤t_level, ¤t_mode); + if (ret < 0) + return ret; + + props.type = BACKLIGHT_RAW; + props.brightness = current_level; + props.max_brightness = bl->info.max; + + bl->base = devm_backlight_device_register(panel->dev, "dp_aux_backlight", + panel->dev, bl, + &dp_aux_bl_ops, &props); + if (IS_ERR(bl->base)) + return PTR_ERR(bl->base); + + backlight_disable(bl->base); + + panel->backlight = bl->base; + + return 0; +} +EXPORT_SYMBOL(drm_panel_dp_aux_backlight); + +#endif diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index ad0795afc21c..86d13d6bc463 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2872,11 +2872,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, idx += tosend + 1; ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); - if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) { - struct drm_printer p = drm_debug_printer(DBG_PREFIX); + if (ret) { + if (drm_debug_enabled(DRM_UT_DP)) { + struct drm_printer p = drm_debug_printer(DBG_PREFIX); - drm_printf(&p, "sideband msg failed to send\n"); - drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); + drm_printf(&p, "sideband msg failed to send\n"); + drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); + } return ret; } diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index d4f0bac6f8f8..ceb1a9723855 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -176,6 +176,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor) init_waitqueue_head(&file->event_wait); file->event_space = 4096; /* set aside 4k for event buffer */ + spin_lock_init(&file->master_lookup_lock); mutex_init(&file->event_read_lock); if (drm_core_check_feature(dev, DRIVER_GEM)) diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 4d01464b6f95..d3d09aba9833 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -404,6 +404,9 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w) struct drm_framebuffer *fb = list_first_entry(&arg->fbs, typeof(*fb), filp_head); + drm_dbg_kms(fb->dev, + "Removing [FB:%d] from all active usage due to RMFB ioctl\n", + fb->base.id); list_del_init(&fb->filp_head); drm_framebuffer_remove(fb); } @@ -981,6 +984,10 @@ retry: if (plane->state->fb != fb) continue; + drm_dbg_kms(dev, + "Disabling [PLANE:%d:%s] because [FB:%d] is removed\n", + plane->base.id, plane->name, fb->base.id); + plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { ret = PTR_ERR(plane_state); @@ -990,6 +997,11 @@ retry: if (disable_crtcs && plane_state->crtc->primary == plane) { struct drm_crtc_state *crtc_state; + drm_dbg_kms(dev, + "Disabling [CRTC:%d:%s] because [FB:%d] is removed\n", + plane_state->crtc->base.id, + plane_state->crtc->name, fb->base.id); + crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc); ret = drm_atomic_add_affected_connectors(state, plane_state->crtc); @@ -1052,6 +1064,10 @@ static void legacy_remove_fb(struct drm_framebuffer *fb) /* remove from any CRTC */ drm_for_each_crtc(crtc, dev) { if (crtc->primary->fb == fb) { + drm_dbg_kms(dev, + "Disabling [CRTC:%d:%s] because [FB:%d] is removed\n", + crtc->base.id, crtc->name, fb->base.id); + /* should turn off the crtc */ if (drm_crtc_force_disable(crtc)) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); @@ -1059,8 +1075,12 @@ static void legacy_remove_fb(struct drm_framebuffer *fb) } drm_for_each_plane(plane, dev) { - if (plane->fb == fb) + if (plane->fb == fb) { + drm_dbg_kms(dev, + "Disabling [PLANE:%d:%s] because [FB:%d] is removed\n", + plane->base.id, plane->name, fb->base.id); drm_plane_force_disable(plane); + } } drm_modeset_unlock_all(dev); } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a34525332bef..96d642f29fa4 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1126,15 +1126,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) return -EACCES; } - if (node->readonly) { - if (vma->vm_flags & VM_WRITE) { - drm_gem_object_put(obj); - return -EINVAL; - } - - vma->vm_flags &= ~VM_MAYWRITE; - } - ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); @@ -1289,6 +1280,9 @@ EXPORT_SYMBOL(drm_gem_unlock_reservations); * @fence_array: array of dma_fence * for the job to block on. * @fence: the dma_fence to add to the list of dependencies. * + * This functions consumes the reference for @fence both on success and error + * cases. + * * Returns: * 0 on success, or an error on failing to expand the array. */ diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index a27135084ae5..26af09b959d4 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -135,6 +135,9 @@ * GEM based framebuffer drivers which have their buffers always pinned in * memory. * + * This function is the default implementation for GEM drivers of + * &drm_plane_helper_funcs.prepare_fb if no callback is provided. + * * See drm_atomic_set_fence_for_plane() for a discussion of implicit and * explicit fencing in atomic modeset updates. */ @@ -180,6 +183,27 @@ EXPORT_SYMBOL(drm_gem_simple_display_pipe_prepare_fb); */ /** + * __drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state + * @plane: the plane + * @new_shadow_plane_state: the new shadow-buffered plane state + * + * This function duplicates shadow-buffered plane state. This is helpful for drivers + * that subclass struct drm_shadow_plane_state. + * + * The function does not duplicate existing mappings of the shadow buffers. + * Mappings are maintained during the atomic commit by the plane's prepare_fb + * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb() + * for corresponding helpers. + */ +void +__drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane, + struct drm_shadow_plane_state *new_shadow_plane_state) +{ + __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); +} +EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state); + +/** * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state * @plane: the plane * @@ -208,13 +232,26 @@ drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane) new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL); if (!new_shadow_plane_state) return NULL; - __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); + __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); return &new_shadow_plane_state->base; } EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state); /** + * __drm_gem_destroy_shadow_plane_state - cleans up shadow-buffered plane state + * @shadow_plane_state: the shadow-buffered plane state + * + * This function cleans up shadow-buffered plane state. Helpful for drivers that + * subclass struct drm_shadow_plane_state. + */ +void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state) +{ + __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); +} +EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state); + +/** * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state * @plane: the plane * @plane_state: the plane state of type struct drm_shadow_plane_state @@ -229,12 +266,27 @@ void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); - __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); + __drm_gem_destroy_shadow_plane_state(shadow_plane_state); kfree(shadow_plane_state); } EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state); /** + * __drm_gem_reset_shadow_plane - resets a shadow-buffered plane + * @plane: the plane + * @shadow_plane_state: the shadow-buffered plane state + * + * This function resets state for shadow-buffered planes. Helpful + * for drivers that subclass struct drm_shadow_plane_state. + */ +void __drm_gem_reset_shadow_plane(struct drm_plane *plane, + struct drm_shadow_plane_state *shadow_plane_state) +{ + __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); +} +EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); + +/** * drm_gem_reset_shadow_plane - resets a shadow-buffered plane * @plane: the plane * @@ -255,7 +307,7 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane) shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL); if (!shadow_plane_state) return; - __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); + __drm_gem_reset_shadow_plane(plane, shadow_plane_state); } EXPORT_SYMBOL(drm_gem_reset_shadow_plane); diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index e2c68822e05c..67bc9edc1d98 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -306,6 +306,95 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, } EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty); +/** + * drm_gem_fb_begin_cpu_access - prepares GEM buffer objects for CPU access + * @fb: the framebuffer + * @dir: access mode + * + * Prepares a framebuffer's GEM buffer objects for CPU access. This function + * must be called before accessing the BO data within the kernel. For imported + * BOs, the function calls dma_buf_begin_cpu_access(). + * + * See drm_gem_fb_end_cpu_access() for signalling the end of CPU access. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir) +{ + struct dma_buf_attachment *import_attach; + struct drm_gem_object *obj; + size_t i; + int ret, ret2; + + for (i = 0; i < ARRAY_SIZE(fb->obj); ++i) { + obj = drm_gem_fb_get_obj(fb, i); + if (!obj) + continue; + import_attach = obj->import_attach; + if (!import_attach) + continue; + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir); + if (ret) + goto err_dma_buf_end_cpu_access; + } + + return 0; + +err_dma_buf_end_cpu_access: + while (i) { + --i; + obj = drm_gem_fb_get_obj(fb, i); + if (!obj) + continue; + import_attach = obj->import_attach; + if (!import_attach) + continue; + ret2 = dma_buf_end_cpu_access(import_attach->dmabuf, dir); + if (ret2) { + drm_err(fb->dev, + "dma_buf_end_cpu_access() failed during error handling: %d\n", + ret2); + } + } + + return ret; +} +EXPORT_SYMBOL(drm_gem_fb_begin_cpu_access); + +/** + * drm_gem_fb_end_cpu_access - signals end of CPU access to GEM buffer objects + * @fb: the framebuffer + * @dir: access mode + * + * Signals the end of CPU access to the given framebuffer's GEM buffer objects. This + * function must be paired with a corresponding call to drm_gem_fb_begin_cpu_access(). + * For imported BOs, the function calls dma_buf_end_cpu_access(). + * + * See also drm_gem_fb_begin_cpu_access(). + */ +void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir) +{ + size_t i = ARRAY_SIZE(fb->obj); + struct dma_buf_attachment *import_attach; + struct drm_gem_object *obj; + int ret; + + while (i) { + --i; + obj = drm_gem_fb_get_obj(fb, i); + if (!obj) + continue; + import_attach = obj->import_attach; + if (!import_attach) + continue; + ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir); + if (ret) + drm_err(fb->dev, "dma_buf_end_cpu_access() failed: %d\n", ret); + } +} +EXPORT_SYMBOL(drm_gem_fb_end_cpu_access); + static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd) { diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 6d625cee7a6a..d5e6d4568f99 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -505,13 +505,13 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, if (!args->pitch || !args->size) { args->pitch = min_pitch; - args->size = args->pitch * args->height; + args->size = PAGE_ALIGN(args->pitch * args->height); } else { /* ensure sane minimum values */ if (args->pitch < min_pitch) args->pitch = min_pitch; if (args->size < args->pitch * args->height) - args->size = args->pitch * args->height; + args->size = PAGE_ALIGN(args->pitch * args->height); } shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 2a1229b8364e..1e9b82e51a07 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1012,9 +1012,8 @@ static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) * Helpers for integration with struct drm_device */ -/* deprecated; use drmm_vram_mm_init() */ -struct drm_vram_mm *drm_vram_helper_alloc_mm( - struct drm_device *dev, uint64_t vram_base, size_t vram_size) +static struct drm_vram_mm *drm_vram_helper_alloc_mm(struct drm_device *dev, uint64_t vram_base, + size_t vram_size) { int ret; @@ -1036,9 +1035,8 @@ err_kfree: dev->vram_mm = NULL; return ERR_PTR(ret); } -EXPORT_SYMBOL(drm_vram_helper_alloc_mm); -void drm_vram_helper_release_mm(struct drm_device *dev) +static void drm_vram_helper_release_mm(struct drm_device *dev) { if (!dev->vram_mm) return; @@ -1047,7 +1045,6 @@ void drm_vram_helper_release_mm(struct drm_device *dev) kfree(dev->vram_mm); dev->vram_mm = NULL; } -EXPORT_SYMBOL(drm_vram_helper_release_mm); static void drm_vram_mm_release(struct drm_device *dev, void *ptr) { diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index c3bd664ea733..201eae4bba6c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -74,10 +74,8 @@ * only supports devices with a single interrupt on the main device stored in * &drm_device.dev and set as the device paramter in drm_dev_alloc(). * - * These IRQ helpers are strictly optional. Drivers which roll their own only - * need to set &drm_device.irq_enabled to signal the DRM core that vblank - * interrupts are working. Since these helpers don't automatically clean up the - * requested interrupt like e.g. devm_request_irq() they're not really + * These IRQ helpers are strictly optional. Since these helpers don't automatically + * clean up the requested interrupt like e.g. devm_request_irq() they're not really * recommended. */ @@ -91,9 +89,7 @@ * and after the installation. * * This is the simplified helper interface provided for drivers with no special - * needs. Drivers which need to install interrupt handlers for multiple - * interrupts must instead set &drm_device.irq_enabled to signal the DRM core - * that vblank interrupts are available. + * needs. * * @irq must match the interrupt number that would be passed to request_irq(), * if called directly instead of using this helper function. @@ -140,7 +136,7 @@ int drm_irq_install(struct drm_device *dev, int irq) if (ret < 0) { dev->irq_enabled = false; if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL); + vga_client_unregister(to_pci_dev(dev->dev)); free_irq(irq, dev); } else { dev->irq = irq; @@ -156,8 +152,7 @@ EXPORT_SYMBOL(drm_irq_install); * * Calls the driver's &drm_driver.irq_uninstall function and unregisters the IRQ * handler. This should only be called by drivers which used drm_irq_install() - * to set up their interrupt handler. Other drivers must only reset - * &drm_device.irq_enabled to false. + * to set up their interrupt handler. * * Note that for kernel modesetting drivers it is a bug if this function fails. * The sanity checks are only to catch buggy user modesetting drivers which call @@ -203,7 +198,7 @@ int drm_irq_uninstall(struct drm_device *dev) DRM_DEBUG("irq=%d\n", dev->irq); if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL); + vga_client_unregister(to_pci_dev(dev->dev)); if (dev->driver->irq_uninstall) dev->driver->irq_uninstall(dev); diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 00fb433bcef1..79be797e8689 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -15,6 +15,57 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" +/** + * DOC: drm leasing + * + * DRM leases provide information about whether a DRM master may control a DRM + * mode setting object. This enables the creation of multiple DRM masters that + * manage subsets of display resources. + * + * The original DRM master of a device 'owns' the available drm resources. It + * may create additional DRM masters and 'lease' resources which it controls + * to the new DRM master. This gives the new DRM master control over the + * leased resources until the owner revokes the lease, or the new DRM master + * is closed. Some helpful terminology: + * + * - An 'owner' is a &struct drm_master that is not leasing objects from + * another &struct drm_master, and hence 'owns' the objects. The owner can be + * identified as the &struct drm_master for which &drm_master.lessor is NULL. + * + * - A 'lessor' is a &struct drm_master which is leasing objects to one or more + * other &struct drm_master. Currently, lessees are not allowed to + * create sub-leases, hence the lessor is the same as the owner. + * + * - A 'lessee' is a &struct drm_master which is leasing objects from some + * other &struct drm_master. Each lessee only leases resources from a single + * lessor recorded in &drm_master.lessor, and holds the set of objects that + * it is leasing in &drm_master.leases. + * + * - A 'lease' is a contract between the lessor and lessee that identifies + * which resources may be controlled by the lessee. All of the resources + * that are leased must be owned by or leased to the lessor, and lessors are + * not permitted to lease the same object to multiple lessees. + * + * The set of objects any &struct drm_master 'controls' is limited to the set + * of objects it leases (for lessees) or all objects (for owners). + * + * Objects not controlled by a &struct drm_master cannot be modified through + * the various state manipulating ioctls, and any state reported back to user + * space will be edited to make them appear idle and/or unusable. For + * instance, connectors always report 'disconnected', while encoders + * report no possible crtcs or clones. + * + * Since each lessee may lease objects from a single lessor, display resource + * leases form a tree of &struct drm_master. As lessees are currently not + * allowed to create sub-leases, the tree depth is limited to 1. All of + * these get activated simultaneously when the top level device owner changes + * through the SETMASTER or DROPMASTER IOCTL, so &drm_device.master points to + * the owner at the top of the lease tree (i.e. the &struct drm_master for which + * &drm_master.lessor is NULL). The full list of lessees that are leasing + * objects from the owner can be searched via the owner's + * &drm_master.lessee_idr. + */ + #define drm_for_each_lessee(lessee, lessor) \ list_for_each_entry((lessee), &(lessor)->lessees, lessee_list) @@ -106,10 +157,19 @@ static bool _drm_has_leased(struct drm_master *master, int id) */ bool _drm_lease_held(struct drm_file *file_priv, int id) { - if (!file_priv || !file_priv->master) + bool ret; + struct drm_master *master; + + if (!file_priv) return true; - return _drm_lease_held_master(file_priv->master, id); + master = drm_file_get_master(file_priv); + if (!master) + return true; + ret = _drm_lease_held_master(master, id); + drm_master_put(&master); + + return ret; } /** @@ -128,13 +188,22 @@ bool drm_lease_held(struct drm_file *file_priv, int id) struct drm_master *master; bool ret; - if (!file_priv || !file_priv->master || !file_priv->master->lessor) + if (!file_priv) return true; - master = file_priv->master; + master = drm_file_get_master(file_priv); + if (!master) + return true; + if (!master->lessor) { + ret = true; + goto out; + } mutex_lock(&master->dev->mode_config.idr_mutex); ret = _drm_lease_held_master(master, id); mutex_unlock(&master->dev->mode_config.idr_mutex); + +out: + drm_master_put(&master); return ret; } @@ -154,10 +223,16 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in) int count_in, count_out; uint32_t crtcs_out = 0; - if (!file_priv || !file_priv->master || !file_priv->master->lessor) + if (!file_priv) return crtcs_in; - master = file_priv->master; + master = drm_file_get_master(file_priv); + if (!master) + return crtcs_in; + if (!master->lessor) { + crtcs_out = crtcs_in; + goto out; + } dev = master->dev; count_in = count_out = 0; @@ -176,6 +251,9 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in) count_in++; } mutex_unlock(&master->dev->mode_config.idr_mutex); + +out: + drm_master_put(&master); return crtcs_out; } @@ -489,7 +567,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, size_t object_count; int ret = 0; struct idr leases; - struct drm_master *lessor = lessor_priv->master; + struct drm_master *lessor; struct drm_master *lessee = NULL; struct file *lessee_file = NULL; struct file *lessor_file = lessor_priv->filp; @@ -501,12 +579,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; - /* Do not allow sub-leases */ - if (lessor->lessor) { - DRM_DEBUG_LEASE("recursive leasing not allowed\n"); - return -EINVAL; - } - /* need some objects */ if (cl->object_count == 0) { DRM_DEBUG_LEASE("no objects in lease\n"); @@ -518,12 +590,22 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, return -EINVAL; } + lessor = drm_file_get_master(lessor_priv); + /* Do not allow sub-leases */ + if (lessor->lessor) { + DRM_DEBUG_LEASE("recursive leasing not allowed\n"); + ret = -EINVAL; + goto out_lessor; + } + object_count = cl->object_count; object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), array_size(object_count, sizeof(__u32))); - if (IS_ERR(object_ids)) - return PTR_ERR(object_ids); + if (IS_ERR(object_ids)) { + ret = PTR_ERR(object_ids); + goto out_lessor; + } idr_init(&leases); @@ -534,14 +616,15 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, if (ret) { DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret); idr_destroy(&leases); - return ret; + goto out_lessor; } /* Allocate a file descriptor for the lease */ fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK)); if (fd < 0) { idr_destroy(&leases); - return fd; + ret = fd; + goto out_lessor; } DRM_DEBUG_LEASE("Creating lease\n"); @@ -577,6 +660,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, /* Hook up the fd */ fd_install(fd, lessee_file); + drm_master_put(&lessor); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n"); return 0; @@ -586,6 +670,8 @@ out_lessee: out_leases: put_unused_fd(fd); +out_lessor: + drm_master_put(&lessor); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret); return ret; } @@ -608,7 +694,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev, struct drm_mode_list_lessees *arg = data; __u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr); __u32 count_lessees = arg->count_lessees; - struct drm_master *lessor = lessor_priv->master, *lessee; + struct drm_master *lessor, *lessee; int count; int ret = 0; @@ -619,6 +705,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; + lessor = drm_file_get_master(lessor_priv); DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id); mutex_lock(&dev->mode_config.idr_mutex); @@ -642,6 +729,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev, arg->count_lessees = count; mutex_unlock(&dev->mode_config.idr_mutex); + drm_master_put(&lessor); return ret; } @@ -661,7 +749,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev, struct drm_mode_get_lease *arg = data; __u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr); __u32 count_objects = arg->count_objects; - struct drm_master *lessee = lessee_priv->master; + struct drm_master *lessee; struct idr *object_idr; int count; void *entry; @@ -675,6 +763,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; + lessee = drm_file_get_master(lessee_priv); DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id); mutex_lock(&dev->mode_config.idr_mutex); @@ -702,6 +791,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev, arg->count_objects = count; mutex_unlock(&dev->mode_config.idr_mutex); + drm_master_put(&lessee); return ret; } @@ -720,7 +810,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev, void *data, struct drm_file *lessor_priv) { struct drm_mode_revoke_lease *arg = data; - struct drm_master *lessor = lessor_priv->master; + struct drm_master *lessor; struct drm_master *lessee; int ret = 0; @@ -730,6 +820,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; + lessor = drm_file_get_master(lessor_priv); mutex_lock(&dev->mode_config.idr_mutex); lessee = _drm_find_lessee(lessor, arg->lessee_id); @@ -750,6 +841,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev, fail: mutex_unlock(&dev->mode_config.idr_mutex); + drm_master_put(&lessor); return ret; } diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 43a9b739bba7..71b646c4131f 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -7,7 +7,6 @@ #include <linux/debugfs.h> #include <linux/delay.h> -#include <linux/dma-buf.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/regulator/consumer.h> @@ -202,21 +201,17 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, { struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); - struct dma_buf_attachment *import_attach = gem->import_attach; void *src = cma_obj->vaddr; - int ret = 0; + int ret; - if (import_attach) { - ret = dma_buf_begin_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); - if (ret) - return ret; - } + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + return ret; switch (fb->format->format) { case DRM_FORMAT_RGB565: if (swap) - drm_fb_swab(dst, src, fb, clip, !import_attach); + drm_fb_swab(dst, src, fb, clip, !gem->import_attach); else drm_fb_memcpy(dst, src, fb, clip); break; @@ -229,9 +224,8 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, return -EINVAL; } - if (import_attach) - ret = dma_buf_end_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); + return ret; } EXPORT_SYMBOL(mipi_dbi_buf_copy); @@ -928,6 +922,59 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc, return 0; } +static int mipi_dbi_typec1_command_read(struct mipi_dbi *dbi, u8 *cmd, + u8 *data, size_t len) +{ + struct spi_device *spi = dbi->spi; + u32 speed_hz = min_t(u32, MIPI_DBI_MAX_SPI_READ_SPEED, + spi->max_speed_hz / 2); + struct spi_transfer tr[2] = { + { + .speed_hz = speed_hz, + .bits_per_word = 9, + .tx_buf = dbi->tx_buf9, + .len = 2, + }, { + .speed_hz = speed_hz, + .bits_per_word = 8, + .len = len, + .rx_buf = data, + }, + }; + struct spi_message m; + u16 *dst16; + int ret; + + if (!len) + return -EINVAL; + + if (!spi_is_bpw_supported(spi, 9)) { + /* + * FIXME: implement something like mipi_dbi_spi1e_transfer() but + * for reads using emulation. + */ + dev_err(&spi->dev, + "reading on host not supporting 9 bpw not yet implemented\n"); + return -EOPNOTSUPP; + } + + /* + * Turn the 8bit command into a 16bit version of the command in the + * buffer. Only 9 bits of this will be used when executing the actual + * transfer. + */ + dst16 = dbi->tx_buf9; + dst16[0] = *cmd; + + spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr)); + ret = spi_sync(spi, &m); + + if (!ret) + MIPI_DBI_DEBUG_COMMAND(*cmd, data, len); + + return ret; +} + static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd, u8 *parameters, size_t num) { @@ -935,7 +982,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd, int ret; if (mipi_dbi_command_is_read(dbi, *cmd)) - return -EOPNOTSUPP; + return mipi_dbi_typec1_command_read(dbi, cmd, parameters, num); MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num); diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index ca04c34e8251..997b8827fed2 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -315,7 +315,7 @@ static int drm_of_lvds_get_remote_pixels_type( remote_port = of_graph_get_remote_port(endpoint); if (!remote_port) { - of_node_put(remote_port); + of_node_put(endpoint); return -EPIPE; } @@ -331,8 +331,10 @@ static int drm_of_lvds_get_remote_pixels_type( * configurations by passing the endpoints explicitly to * drm_of_lvds_get_dual_link_pixel_order(). */ - if (!current_pt || pixels_type != current_pt) + if (!current_pt || pixels_type != current_pt) { + of_node_put(endpoint); return -EINVAL; + } } return pixels_type; diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index b373958ecb30..f5fe8255597c 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -1397,6 +1397,110 @@ out: return ret; } +/** + * DOC: damage tracking + * + * FB_DAMAGE_CLIPS is an optional plane property which provides a means to + * specify a list of damage rectangles on a plane in framebuffer coordinates of + * the framebuffer attached to the plane. In current context damage is the area + * of plane framebuffer that has changed since last plane update (also called + * page-flip), irrespective of whether currently attached framebuffer is same as + * framebuffer attached during last plane update or not. + * + * FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers + * to optimize internally especially for virtual devices where each framebuffer + * change needs to be transmitted over network, usb, etc. + * + * Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can + * ignore damage clips property and in that case driver will do a full plane + * update. In case damage clips are provided then it is guaranteed that the area + * inside damage clips will be updated to plane. For efficiency driver can do + * full update or can update more than specified in damage clips. Since driver + * is free to read more, user-space must always render the entire visible + * framebuffer. Otherwise there can be corruptions. Also, if a user-space + * provides damage clips which doesn't encompass the actual damage to + * framebuffer (since last plane update) can result in incorrect rendering. + * + * FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an + * array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates, + * damage clips are not in 16.16 fixed point. Similar to plane src in + * framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are + * inclusive and x2/y2 are exclusive. While kernel does not error for overlapped + * damage clips, it is strongly discouraged. + * + * Drivers that are interested in damage interface for plane should enable + * FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips(). + * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and + * drm_atomic_helper_damage_iter_next() helper iterator function to get damage + * rectangles clipped to &drm_plane_state.src. + */ + +/** + * drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property. + * @plane: Plane on which to enable damage clips property. + * + * This function lets driver to enable the damage clips property on a plane. + */ +void drm_plane_enable_fb_damage_clips(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct drm_mode_config *config = &dev->mode_config; + + drm_object_attach_property(&plane->base, config->prop_fb_damage_clips, + 0); +} +EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips); + +/** + * drm_plane_get_damage_clips_count - Returns damage clips count. + * @state: Plane state. + * + * Simple helper to get the number of &drm_mode_rect clips set by user-space + * during plane update. + * + * Return: Number of clips in plane fb_damage_clips blob property. + */ +unsigned int +drm_plane_get_damage_clips_count(const struct drm_plane_state *state) +{ + return (state && state->fb_damage_clips) ? + state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0; +} +EXPORT_SYMBOL(drm_plane_get_damage_clips_count); + +struct drm_mode_rect * +__drm_plane_get_damage_clips(const struct drm_plane_state *state) +{ + return (struct drm_mode_rect *)((state && state->fb_damage_clips) ? + state->fb_damage_clips->data : NULL); +} + +/** + * drm_plane_get_damage_clips - Returns damage clips. + * @state: Plane state. + * + * Note that this function returns uapi type &drm_mode_rect. Drivers might want + * to use the helper functions drm_atomic_helper_damage_iter_init() and + * drm_atomic_helper_damage_iter_next() or drm_atomic_helper_damage_merged() if + * the driver can only handle a single damage region at most. + * + * Return: Damage clips in plane fb_damage_clips blob property. + */ +struct drm_mode_rect * +drm_plane_get_damage_clips(const struct drm_plane_state *state) +{ + struct drm_device *dev = state->plane->dev; + struct drm_mode_config *config = &dev->mode_config; + + /* check that drm_plane_enable_fb_damage_clips() was called */ + if (!drm_mode_obj_find_prop_id(&state->plane->base, + config->prop_fb_damage_clips->base.id)) + drm_warn_once(dev, "drm_plane_enable_fb_damage_clips() not called\n"); + + return __drm_plane_get_damage_clips(state); +} +EXPORT_SYMBOL(drm_plane_get_damage_clips); + struct drm_property * drm_create_scaling_filter_prop(struct drm_device *dev, unsigned int supported_filters) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 2a54f86856af..178e18c28cab 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -549,7 +549,7 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, * * FIXME: The underlying helper functions are named rather inconsistently. * - * Exporting buffers + * Importing buffers * ~~~~~~~~~~~~~~~~~ * * Importing dma-bufs using drm_gem_prime_import() relies on diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 0b095a313c44..735f4f34bcc4 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -9,6 +9,8 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_managed.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> @@ -225,8 +227,14 @@ static int drm_simple_kms_plane_prepare_fb(struct drm_plane *plane, struct drm_simple_display_pipe *pipe; pipe = container_of(plane, struct drm_simple_display_pipe, plane); - if (!pipe->funcs || !pipe->funcs->prepare_fb) - return 0; + if (!pipe->funcs || !pipe->funcs->prepare_fb) { + if (WARN_ON_ONCE(!drm_core_check_feature(plane->dev, DRIVER_GEM))) + return 0; + + WARN_ON_ONCE(pipe->funcs && pipe->funcs->cleanup_fb); + + return drm_gem_simple_display_pipe_prepare_fb(pipe, state); + } return pipe->funcs->prepare_fb(pipe, state); } diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index fdd2ec87cdd1..1c5b9ef6da37 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -861,7 +861,7 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, &fence); if (ret) goto err; - chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); + chain = dma_fence_chain_alloc(); if (!chain) { ret = -ENOMEM; goto err1; @@ -1402,10 +1402,10 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data, goto err_points; } for (i = 0; i < args->count_handles; i++) { - chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); + chains[i] = dma_fence_chain_alloc(); if (!chains[i]) { for (j = 0; j < i; j++) - kfree(chains[j]); + dma_fence_chain_free(chains[j]); ret = -ENOMEM; goto err_chains; } diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 3417e1ac7918..bba6781cc48f 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -1737,6 +1737,15 @@ static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe, reply->tval_usec = ts.tv_nsec / 1000; } +static bool drm_wait_vblank_supported(struct drm_device *dev) +{ + if (IS_ENABLED(CONFIG_DRM_LEGACY)) { + if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY))) + return dev->irq_enabled; + } + return drm_dev_has_vblank(dev); +} + int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1748,7 +1757,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, unsigned int pipe_index; unsigned int flags, pipe, high_pipe; - if (!dev->irq_enabled) + if (!drm_wait_vblank_supported(dev)) return -EOPNOTSUPP; if (vblwait->request.type & _DRM_VBLANK_SIGNAL) @@ -2023,7 +2032,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; - if (!dev->irq_enabled) + if (!drm_dev_has_vblank(dev)) return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id); @@ -2082,7 +2091,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; - if (!dev->irq_enabled) + if (!drm_dev_has_vblank(dev)) return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 19826e504efc..feb6da1b6ceb 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -190,7 +190,8 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu) ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, etnaviv_hw_jobs_limit, etnaviv_job_hang_limit, - msecs_to_jiffies(500), NULL, dev_name(gpu->dev)); + msecs_to_jiffies(500), NULL, NULL, + dev_name(gpu->dev)); if (ret) return ret; diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 9ac51b6ab34b..27664f663c5a 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -109,11 +109,8 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data, if (dp->ptn_bridge) { ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge, 0); - if (ret) { - DRM_DEV_ERROR(dp->dev, - "Failed to attach bridge to drm\n"); + if (ret) return ret; - } } return 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index e60257f1f24b..d8f1cf4d6b69 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -300,16 +300,6 @@ static int exynos_drm_bind(struct device *dev) drm_mode_config_reset(drm); - /* - * enable drm irq mode. - * - with irq_enabled = true, we can use the vblank feature. - * - * P.S. note that we wouldn't use drm irq handler but - * just specific driver own one instead because - * drm framework supports only one irq handler. - */ - drm->irq_enabled = true; - /* init kms poll for handling hpd */ drm_kms_helper_poll_init(drm); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 1d777d8c1a83..e39fac889edc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -809,15 +809,15 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi) reg |= DSIM_AUTO_MODE; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE) reg |= DSIM_HSE_MODE; - if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP)) + if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)) reg |= DSIM_HFP_MODE; - if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP)) + if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)) reg |= DSIM_HBP_MODE; - if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA)) + if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)) reg |= DSIM_HSA_MODE; } - if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)) + if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)) reg |= DSIM_EOT_DISABLE; switch (dsi->format) { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index f893731d6021..c769dec576de 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -970,11 +970,8 @@ static int hdmi_create_connector(struct drm_encoder *encoder) drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); drm_connector_attach_encoder(connector, encoder); - if (hdata->bridge) { + if (hdata->bridge) ret = drm_bridge_attach(encoder, hdata->bridge, NULL, 0); - if (ret) - DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n"); - } cec_fill_conn_info_from_drm(&conn_info, connector); diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 432bdcc57ac9..f9b1f88c73bd 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -113,11 +113,11 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder, /* Find the connector we're trying to set up */ list_for_each_entry(connector, &mode_config->connector_list, head) { - if (!connector->encoder || connector->encoder->crtc != crtc) - continue; + if (connector->encoder && connector->encoder->crtc == crtc) + break; } - if (!connector) { + if (list_entry_is_head(connector, &mode_config->connector_list, head)) { DRM_ERROR("Couldn't find connector when setting mode"); gma_power_end(dev); return; diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index e8b672dc9832..eb4e08846da4 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -364,7 +364,6 @@ static void gud_debugfs_init(struct drm_minor *minor) static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = { .check = gud_pipe_check, .update = gud_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_mode_config_funcs gud_mode_config_funcs = { @@ -394,14 +393,42 @@ static const struct drm_driver gud_drm_driver = { .minor = 0, }; -static void gud_free_buffers_and_mutex(struct drm_device *drm, void *unused) +static int gud_alloc_bulk_buffer(struct gud_device *gdrm) { - struct gud_device *gdrm = to_gud_device(drm); + unsigned int i, num_pages; + struct page **pages; + void *ptr; + int ret; + + gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len); + if (!gdrm->bulk_buf) + return -ENOMEM; + + num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE); + pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE) + pages[i] = vmalloc_to_page(ptr); + + ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages, + 0, gdrm->bulk_len, GFP_KERNEL); + kfree(pages); + + return ret; +} + +static void gud_free_buffers_and_mutex(void *data) +{ + struct gud_device *gdrm = data; vfree(gdrm->compress_buf); - kfree(gdrm->bulk_buf); + gdrm->compress_buf = NULL; + sg_free_table(&gdrm->bulk_sgt); + vfree(gdrm->bulk_buf); + gdrm->bulk_buf = NULL; mutex_destroy(&gdrm->ctrl_lock); - mutex_destroy(&gdrm->damage_lock); } static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) @@ -455,7 +482,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) INIT_WORK(&gdrm->work, gud_flush_work); gud_clear_damage(gdrm); - ret = drmm_add_action_or_reset(drm, gud_free_buffers_and_mutex, NULL); + ret = devm_add_action(dev, gud_free_buffers_and_mutex, gdrm); if (ret) return ret; @@ -536,24 +563,17 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) if (desc.max_buffer_size) max_buffer_size = le32_to_cpu(desc.max_buffer_size); -retry: - /* - * Use plain kmalloc here since devm_kmalloc() places struct devres at the beginning - * of the buffer it allocates. This wastes a lot of memory when allocating big buffers. - * Asking for 2M would actually allocate 4M. This would also prevent getting the biggest - * possible buffer potentially leading to split transfers. - */ - gdrm->bulk_buf = kmalloc(max_buffer_size, GFP_KERNEL | __GFP_NOWARN); - if (!gdrm->bulk_buf) { - max_buffer_size = roundup_pow_of_two(max_buffer_size) / 2; - if (max_buffer_size < SZ_512K) - return -ENOMEM; - goto retry; - } + /* Prevent a misbehaving device from allocating loads of RAM. 4096x4096@XRGB8888 = 64 MB */ + if (max_buffer_size > SZ_64M) + max_buffer_size = SZ_64M; gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out)); gdrm->bulk_len = max_buffer_size; + ret = gud_alloc_bulk_buffer(gdrm); + if (ret) + return ret; + if (gdrm->compression & GUD_COMPRESSION_LZ4) { gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL); if (!gdrm->lz4_comp_mem) @@ -640,6 +660,7 @@ static int gud_resume(struct usb_interface *intf) static const struct usb_device_id gud_id_table[] = { { USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) }, + { USB_DEVICE_INTERFACE_CLASS(0x16d0, 0x10a9, USB_CLASS_VENDOR_SPEC) }, { } }; diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h index b65105585578..2a388e27d5d7 100644 --- a/drivers/gpu/drm/gud/gud_internal.h +++ b/drivers/gpu/drm/gud/gud_internal.h @@ -5,6 +5,7 @@ #include <linux/list.h> #include <linux/mutex.h> +#include <linux/scatterlist.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <uapi/drm/drm_fourcc.h> @@ -26,6 +27,7 @@ struct gud_device { unsigned int bulk_pipe; void *bulk_buf; size_t bulk_len; + struct sg_table bulk_sgt; u8 compression; void *lz4_comp_mem; diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index 2f83ab6b8e61..4d7a26b68a2e 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -3,7 +3,6 @@ * Copyright 2020 Noralf Trønnes */ -#include <linux/dma-buf.h> #include <linux/lz4.h> #include <linux/usb.h> #include <linux/workqueue.h> @@ -15,6 +14,7 @@ #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_print.h> #include <drm/drm_rect.h> @@ -24,6 +24,19 @@ #include "gud_internal.h" /* + * Some userspace rendering loops runs all displays in the same loop. + * This means that a fast display will have to wait for a slow one. + * For this reason gud does flushing asynchronous by default. + * The down side is that in e.g. a single display setup userspace thinks + * the display is insanely fast since the driver reports back immediately + * that the flush/pageflip is done. This wastes CPU and power. + * Such users might want to set this module parameter to false. + */ +static bool gud_async_flush = true; +module_param_named(async_flush, gud_async_flush, bool, 0644); +MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=true]"); + +/* * FIXME: The driver is probably broken on Big Endian machines. * See discussion: * https://lore.kernel.org/dri-devel/CAKb7UvihLX0hgBOP3VBG7O+atwZcUVCPVuBdfmDMpg0NjXe-cQ@mail.gmail.com/ @@ -155,11 +168,9 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb, vaddr = map.vaddr + fb->offsets[0]; - if (import_attach) { - ret = dma_buf_begin_cpu_access(import_attach->dmabuf, DMA_FROM_DEVICE); - if (ret) - goto vunmap; - } + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + goto vunmap; retry: if (compression) buf = gdrm->compress_buf; @@ -212,21 +223,58 @@ retry: } end_cpu_access: - if (import_attach) - dma_buf_end_cpu_access(import_attach->dmabuf, DMA_FROM_DEVICE); + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); vunmap: drm_gem_shmem_vunmap(fb->obj[0], &map); return ret; } +struct gud_usb_bulk_context { + struct timer_list timer; + struct usb_sg_request sgr; +}; + +static void gud_usb_bulk_timeout(struct timer_list *t) +{ + struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer); + + usb_sg_cancel(&ctx->sgr); +} + +static int gud_usb_bulk(struct gud_device *gdrm, size_t len) +{ + struct gud_usb_bulk_context ctx; + int ret; + + ret = usb_sg_init(&ctx.sgr, gud_to_usb_device(gdrm), gdrm->bulk_pipe, 0, + gdrm->bulk_sgt.sgl, gdrm->bulk_sgt.nents, len, GFP_KERNEL); + if (ret) + return ret; + + timer_setup_on_stack(&ctx.timer, gud_usb_bulk_timeout, 0); + mod_timer(&ctx.timer, jiffies + msecs_to_jiffies(3000)); + + usb_sg_wait(&ctx.sgr); + + if (!del_timer_sync(&ctx.timer)) + ret = -ETIMEDOUT; + else if (ctx.sgr.status < 0) + ret = ctx.sgr.status; + else if (ctx.sgr.bytes != len) + ret = -EIO; + + destroy_timer_on_stack(&ctx.timer); + + return ret; +} + static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct drm_format_info *format, struct drm_rect *rect) { - struct usb_device *usb = gud_to_usb_device(gdrm); struct gud_set_buffer_req req; - int ret, actual_length; size_t len, trlen; + int ret; drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); @@ -255,10 +303,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb, return ret; } - ret = usb_bulk_msg(usb, gdrm->bulk_pipe, gdrm->bulk_buf, trlen, - &actual_length, msecs_to_jiffies(3000)); - if (!ret && trlen != actual_length) - ret = -EIO; + ret = gud_usb_bulk(gdrm, trlen); if (ret) gdrm->stats_num_errors++; @@ -543,6 +588,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe, if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) drm_rect_init(&damage, 0, 0, fb->width, fb->height); gud_fb_queue_damage(gdrm, fb, &damage); + if (!gud_async_flush) + flush_work(&gdrm->work); } if (!crtc->state->enable) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index fa8da0ef707e..89bed78f1466 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -152,8 +152,7 @@ static const struct drm_plane_funcs hibmc_plane_funcs = { }; static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = { - .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, - .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, + DRM_GEM_VRAM_PLANE_HELPER_FUNCS, .atomic_check = hibmc_plane_atomic_check, .atomic_update = hibmc_plane_atomic_update, }; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index f4bc5386574a..610fc8e135f9 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -19,7 +19,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_vram_helper.h> -#include <drm/drm_irq.h> #include <drm/drm_managed.h> #include <drm/drm_vblank.h> @@ -28,7 +27,7 @@ DEFINE_DRM_GEM_FOPS(hibmc_fops); -static irqreturn_t hibmc_drm_interrupt(int irq, void *arg) +static irqreturn_t hibmc_interrupt(int irq, void *arg) { struct drm_device *dev = (struct drm_device *)arg; struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); @@ -63,7 +62,6 @@ static const struct drm_driver hibmc_driver = { .dumb_create = hibmc_dumb_create, .dumb_map_offset = drm_gem_ttm_dumb_map_offset, .gem_prime_mmap = drm_gem_prime_mmap, - .irq_handler = hibmc_drm_interrupt, }; static int __maybe_unused hibmc_pm_suspend(struct device *dev) @@ -251,10 +249,11 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv) static int hibmc_unload(struct drm_device *dev) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + drm_atomic_helper_shutdown(dev); - if (dev->irq_enabled) - drm_irq_uninstall(dev); + free_irq(pdev->irq, dev); pci_disable_msi(to_pci_dev(dev->dev)); @@ -291,7 +290,9 @@ static int hibmc_load(struct drm_device *dev) if (ret) { drm_warn(dev, "enabling MSI failed: %d\n", ret); } else { - ret = drm_irq_install(dev, pdev->irq); + /* PCI devices require shared interrupts. */ + ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED, + dev->driver->name, dev); if (ret) drm_warn(dev, "install irq failed: %d\n", ret); } @@ -314,7 +315,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev, struct drm_device *dev; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "hibmcdrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &hibmc_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index 9b565a057340..952cfdb1961d 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -769,16 +769,9 @@ static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi) { struct drm_encoder *encoder = &dsi->encoder; struct drm_bridge *bridge = dsi->bridge; - int ret; /* associate the bridge to dsi encoder */ - ret = drm_bridge_attach(encoder, bridge, NULL, 0); - if (ret) { - DRM_ERROR("failed to attach external bridge\n"); - return ret; - } - - return 0; + return drm_bridge_attach(encoder, bridge, NULL, 0); } static int dsi_bind(struct device *dev, struct device *master, void *data) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index e590e19db657..98ae9a48f3fe 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -185,8 +185,6 @@ static int kirin_drm_kms_init(struct drm_device *dev, DRM_ERROR("failed to initialize vblank.\n"); goto err_unbind_all; } - /* with irq_enabled = true, we can use the vblank feature. */ - dev->irq_enabled = true; /* reset all the states of crtc/plane/encoder/connector */ drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index eb06c92c4bfd..cd818a629183 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -82,7 +82,7 @@ static int hyperv_setup_gen1(struct hyperv_drm_device *hv) return -ENODEV; } - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "hypervdrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &hyperv_driver); if (ret) { drm_err(dev, "Not able to remove boot fb\n"); return ret; @@ -127,7 +127,7 @@ static int hyperv_setup_gen2(struct hyperv_drm_device *hv, drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base, screen_info.lfb_size, false, - "hypervdrmfb"); + &hyperv_driver); hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024; diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 4f22cac1c49b..f57dfc74d6ce 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -155,6 +155,7 @@ gem-y += \ gem/i915_gem_stolen.o \ gem/i915_gem_throttle.o \ gem/i915_gem_tiling.o \ + gem/i915_gem_ttm.o \ gem/i915_gem_userptr.o \ gem/i915_gem_wait.o \ gem/i915_gemfs.o diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 16812488c5dd..43ec7fcd3f5d 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -729,8 +729,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); - enum pipe pipe = intel_crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + enum pipe pipe = crtc->pipe; u32 tmp; enum port port; enum transcoder dsi_trans; @@ -1253,15 +1253,36 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state, gen11_dsi_set_transcoder_timings(encoder, pipe_config); } +/* + * Wa_1409054076:icl,jsl,ehl + * When pipe A is disabled and MIPI DSI is enabled on pipe B, + * the AMT KVMR feature will incorrectly see pipe A as enabled. + * Set 0x42080 bit 23=1 before enabling DSI on pipe B and leave + * it set while DSI is enabled on pipe B + */ +static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder, + enum pipe pipe, bool enable) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B) + intel_de_rmw(dev_priv, CHICKEN_PAR1_1, + IGNORE_KVMR_PIPE_A, + enable ? IGNORE_KVMR_PIPE_A : 0); +} static void gen11_dsi_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc); drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); + /* Wa_1409054076:icl,jsl,ehl */ + icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true); + /* step6d: enable dsi transcoder */ gen11_dsi_enable_transcoder(encoder); @@ -1415,6 +1436,7 @@ static void gen11_dsi_disable(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + struct intel_crtc *crtc = to_intel_crtc(old_conn_state->crtc); /* step1: turn off backlight */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); @@ -1423,6 +1445,9 @@ static void gen11_dsi_disable(struct intel_atomic_state *state, /* step2d,e: disable transcoder and wait */ gen11_dsi_disable_transcoder(encoder); + /* Wa_1409054076:icl,jsl,ehl */ + icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, false); + /* step2f,g: powerdown panel */ gen11_dsi_powerdown_panel(encoder); @@ -1548,6 +1573,22 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE; } +static void gen11_dsi_sync_state(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum pipe pipe = intel_crtc->pipe; + + /* wa verify 1409054076:icl,jsl,ehl */ + if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B && + !(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A)) + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n", + encoder->base.base.id, + encoder->base.name); +} + static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { @@ -1966,6 +2007,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->post_disable = gen11_dsi_post_disable; encoder->port = port; encoder->get_config = gen11_dsi_get_config; + encoder->sync_state = gen11_dsi_sync_state; encoder->update_pipe = intel_panel_update_backlight; encoder->compute_config = gen11_dsi_compute_config; encoder->get_hw_state = gen11_dsi_get_hw_state; diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 648f1c0d3d39..408f82b0dc7d 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -38,6 +38,7 @@ #include "intel_crt.h" #include "intel_crtc.h" #include "intel_ddi.h" +#include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fdi.h" @@ -1081,6 +1082,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv) crt->base.enable_clock = hsw_ddi_enable_clock; crt->base.disable_clock = hsw_ddi_disable_clock; crt->base.is_clock_enabled = hsw_ddi_is_clock_enabled; + + intel_ddi_buf_trans_init(&crt->base); } else { if (HAS_PCH_SPLIT(dev_priv)) { crt->base.compute_config = pch_crt_compute_config; diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 95ff1707b4bd..448c4d99ac35 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -163,12 +163,12 @@ static void intel_crtc_free(struct intel_crtc *crtc) kfree(crtc); } -static void intel_crtc_destroy(struct drm_crtc *crtc) +static void intel_crtc_destroy(struct drm_crtc *_crtc) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = to_intel_crtc(_crtc); - drm_crtc_cleanup(crtc); - kfree(intel_crtc); + drm_crtc_cleanup(&crtc->base); + kfree(crtc); } static int intel_crtc_late_register(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index be716b56e8e0..26a3aa73fcc4 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -95,24 +95,18 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder, * values in advance. This function programs the correct values for * DP/eDP/FDI use cases. */ -void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; int i, n_entries; enum port port = encoder->port; - const struct ddi_buf_trans *ddi_translations; - - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) - ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv, - &n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - ddi_translations = intel_ddi_get_buf_trans_edp(encoder, - &n_entries); - else - ddi_translations = intel_ddi_get_buf_trans_dp(encoder, - &n_entries); + const struct intel_ddi_buf_trans *ddi_translations; + + ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) + return; /* If we're boosting the current, set bit 31 of trans1 */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && @@ -121,9 +115,9 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, for (i = 0; i < n_entries; i++) { intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i), - ddi_translations[i].trans1 | iboost_bit); + ddi_translations->entries[i].hsw.trans1 | iboost_bit); intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i), - ddi_translations[i].trans2); + ddi_translations->entries[i].hsw.trans2); } } @@ -132,17 +126,17 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, * values in advance. This function programs the correct values for * HDMI/DVI use cases. */ -static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, - int level) +static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; int n_entries; enum port port = encoder->port; - const struct ddi_buf_trans *ddi_translations; - - ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); + const struct intel_ddi_buf_trans *ddi_translations; + ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) @@ -155,9 +149,9 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, /* Entry 9 is for HDMI: */ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9), - ddi_translations[level].trans1 | iboost_bit); + ddi_translations->entries[level].hsw.trans1 | iboost_bit); intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9), - ddi_translations[level].trans2); + ddi_translations->entries[level].hsw.trans2); } void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, @@ -948,22 +942,16 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, iboost = intel_bios_encoder_dp_boost_level(encoder->devdata); if (iboost == 0) { - const struct ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *ddi_translations; int n_entries; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - ddi_translations = intel_ddi_get_buf_trans_edp(encoder, &n_entries); - else - ddi_translations = intel_ddi_get_buf_trans_dp(encoder, &n_entries); - + ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) level = n_entries - 1; - iboost = ddi_translations[level].i_boost; + iboost = ddi_translations->entries[level].hsw.i_boost; } /* Make sure that the requested I_boost is valid */ @@ -983,21 +971,21 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct bxt_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *ddi_translations; enum port port = encoder->port; int n_entries; - ddi_translations = bxt_get_buf_trans(encoder, crtc_state, &n_entries); + ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) level = n_entries - 1; bxt_ddi_phy_set_signal_level(dev_priv, port, - ddi_translations[level].margin, - ddi_translations[level].scale, - ddi_translations[level].enable, - ddi_translations[level].deemphasis); + ddi_translations->entries[level].bxt.margin, + ddi_translations->entries[level].bxt.scale, + ddi_translations->entries[level].bxt.enable, + ddi_translations->entries[level].bxt.deemphasis); } static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp, @@ -1005,36 +993,9 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp, { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; - enum phy phy = intel_port_to_phy(dev_priv, port); int n_entries; - if (DISPLAY_VER(dev_priv) >= 12) { - if (intel_phy_is_combo(dev_priv, phy)) - tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries); - else if (IS_ALDERLAKE_P(dev_priv)) - adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries); - else - tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries); - } else if (DISPLAY_VER(dev_priv) == 11) { - if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE)) - jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries); - else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) - ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries); - else if (intel_phy_is_combo(dev_priv, phy)) - icl_get_combo_buf_trans(encoder, crtc_state, &n_entries); - else - icl_get_mg_buf_trans(encoder, crtc_state, &n_entries); - } else if (IS_CANNONLAKE(dev_priv)) { - cnl_get_buf_trans(encoder, crtc_state, &n_entries); - } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - bxt_get_buf_trans(encoder, crtc_state, &n_entries); - } else { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - intel_ddi_get_buf_trans_edp(encoder, &n_entries); - else - intel_ddi_get_buf_trans_dp(encoder, &n_entries); - } + encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON(&dev_priv->drm, n_entries < 1)) n_entries = 1; @@ -1061,13 +1022,12 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct cnl_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *ddi_translations; enum port port = encoder->port; int n_entries, ln; u32 val; - ddi_translations = cnl_get_buf_trans(encoder, crtc_state, &n_entries); - + ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) @@ -1083,8 +1043,8 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port)); val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); - val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); - val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); + val |= SWING_SEL_UPPER(ddi_translations->entries[level].cnl.dw2_swing_sel); + val |= SWING_SEL_LOWER(ddi_translations->entries[level].cnl.dw2_swing_sel); /* Rcomp scalar is fixed as 0x98 for every table entry */ val |= RCOMP_SCALAR(0x98); intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val); @@ -1095,9 +1055,9 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); - val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); - val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); - val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); + val |= POST_CURSOR_1(ddi_translations->entries[level].cnl.dw4_post_cursor_1); + val |= POST_CURSOR_2(ddi_translations->entries[level].cnl.dw4_post_cursor_2); + val |= CURSOR_COEFF(ddi_translations->entries[level].cnl.dw4_cursor_coeff); intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val); } @@ -1112,7 +1072,7 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, /* Program PORT_TX_DW7 */ val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port)); val &= ~N_SCALAR_MASK; - val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); + val |= N_SCALAR(ddi_translations->entries[level].cnl.dw7_n_scalar); intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val); } @@ -1182,20 +1142,12 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct cnl_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *ddi_translations; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); int n_entries, ln; u32 val; - if (DISPLAY_VER(dev_priv) >= 12) - ddi_translations = tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries); - else if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE)) - ddi_translations = jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries); - else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) - ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries); - else - ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries); - + ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) @@ -1223,8 +1175,8 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy)); val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); - val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); - val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); + val |= SWING_SEL_UPPER(ddi_translations->entries[level].cnl.dw2_swing_sel); + val |= SWING_SEL_LOWER(ddi_translations->entries[level].cnl.dw2_swing_sel); /* Program Rcomp scalar for every table entry */ val |= RCOMP_SCALAR(0x98); intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val); @@ -1235,16 +1187,16 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); - val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); - val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); - val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); + val |= POST_CURSOR_1(ddi_translations->entries[level].cnl.dw4_post_cursor_1); + val |= POST_CURSOR_2(ddi_translations->entries[level].cnl.dw4_post_cursor_2); + val |= CURSOR_COEFF(ddi_translations->entries[level].cnl.dw4_cursor_coeff); intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); } /* Program PORT_TX_DW7 */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy)); val &= ~N_SCALAR_MASK; - val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); + val |= N_SCALAR(ddi_translations->entries[level].cnl.dw7_n_scalar); intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val); } @@ -1315,15 +1267,14 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); - const struct icl_mg_phy_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *ddi_translations; int n_entries, ln; u32 val; if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) return; - ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries); - + ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) @@ -1345,13 +1296,13 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( - ddi_translations[level].cri_txdeemph_override_17_12); + ddi_translations->entries[level].mg.cri_txdeemph_override_17_12); intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val); val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( - ddi_translations[level].cri_txdeemph_override_17_12); + ddi_translations->entries[level].mg.cri_txdeemph_override_17_12); intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val); } @@ -1361,9 +1312,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( - ddi_translations[level].cri_txdeemph_override_5_0) | + ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) | CRI_TXDEEMPH_OVERRIDE_11_6( - ddi_translations[level].cri_txdeemph_override_11_6) | + ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val); @@ -1371,9 +1322,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( - ddi_translations[level].cri_txdeemph_override_5_0) | + ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) | CRI_TXDEEMPH_OVERRIDE_11_6( - ddi_translations[level].cri_txdeemph_override_11_6) | + ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val); @@ -1453,18 +1404,14 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); - const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *ddi_translations; u32 val, dpcnt_mask, dpcnt_val; int n_entries, ln; if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) return; - if (IS_ALDERLAKE_P(dev_priv)) - ddi_translations = adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries); - else - ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries); - + ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) @@ -1473,9 +1420,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK | DKL_TX_DE_EMPAHSIS_COEFF_MASK | DKL_TX_VSWING_CONTROL_MASK); - dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control); - dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control); - dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control); + dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations->entries[level].dkl.dkl_vswing_control); + dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations->entries[level].dkl.dkl_de_emphasis_control); + dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations->entries[level].dkl.dkl_preshoot_control); for (ln = 0; ln < 2; ln++) { intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), @@ -2715,7 +2662,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) bxt_ddi_vswing_sequence(encoder, crtc_state, level); else - intel_prepare_dp_ddi_buffers(encoder, crtc_state); + hsw_prepare_dp_ddi_buffers(encoder, crtc_state); intel_ddi_power_up_lanes(encoder, crtc_state); @@ -2823,6 +2770,7 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state, conn_state); /* FIXME precompute everything properly */ + /* FIXME how do we turn infoframes off again? */ if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink) dig_port->set_infoframes(encoder, crtc_state->has_infoframe, @@ -3162,7 +3110,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) bxt_ddi_vswing_sequence(encoder, crtc_state, level); else - intel_prepare_hdmi_ddi_buffers(encoder, level); + hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state, level); if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) skl_ddi_set_iboost(encoder, crtc_state, level); @@ -3590,7 +3538,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u32 temp, flags = 0; @@ -3653,7 +3601,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; - intel_dp_get_m_n(intel_crtc, pipe_config); + intel_dp_get_m_n(crtc, pipe_config); if (DISPLAY_VER(dev_priv) >= 11) { i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config); @@ -3683,7 +3631,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->mst_master_transcoder = REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp); - intel_dp_get_m_n(intel_crtc, pipe_config); + intel_dp_get_m_n(crtc, pipe_config); pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); @@ -4509,6 +4457,36 @@ static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port) return false; } +static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + intel_dp_encoder_suspend(encoder); + + if (!intel_phy_is_tc(i915, phy)) + return; + + intel_tc_port_disconnect_phy(dig_port); +} + +static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + intel_dp_encoder_shutdown(encoder); + + if (!intel_phy_is_tc(i915, phy)) + return; + + intel_tc_port_disconnect_phy(dig_port); +} + #define port_tc_name(port) ((port) - PORT_TC1 + '1') #define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1') @@ -4618,8 +4596,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->get_hw_state = intel_ddi_get_hw_state; encoder->sync_state = intel_ddi_sync_state; encoder->initial_fastset_check = intel_ddi_initial_fastset_check; - encoder->suspend = intel_dp_encoder_suspend; - encoder->shutdown = intel_dp_encoder_shutdown; + encoder->suspend = intel_ddi_encoder_suspend; + encoder->shutdown = intel_ddi_encoder_shutdown; encoder->get_power_domains = intel_ddi_get_power_domains; encoder->type = INTEL_OUTPUT_DDI; @@ -4687,6 +4665,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->get_config = hsw_ddi_get_config; } + intel_ddi_buf_trans_init(encoder); + if (DISPLAY_VER(dev_priv) >= 13) encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port); else if (IS_DG1(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 59c6b01d4199..7d448485d887 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -40,8 +40,8 @@ bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder); void hsw_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state); struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder); -void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); +void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port); void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index 8bfd00f49f2a..63b1ae830d9a 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -13,858 +13,1129 @@ * them for both DP and FDI transports, allowing those ports to * automatically adapt to HDMI connections as well */ -static const struct ddi_buf_trans hsw_ddi_translations_dp[] = { - { 0x00FFFFFF, 0x0006000E, 0x0 }, - { 0x00D75FFF, 0x0005000A, 0x0 }, - { 0x00C30FFF, 0x00040006, 0x0 }, - { 0x80AAAFFF, 0x000B0000, 0x0 }, - { 0x00FFFFFF, 0x0005000A, 0x0 }, - { 0x00D75FFF, 0x000C0004, 0x0 }, - { 0x80C30FFF, 0x000B0000, 0x0 }, - { 0x00FFFFFF, 0x00040006, 0x0 }, - { 0x80D75FFF, 0x000B0000, 0x0 }, -}; - -static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = { - { 0x00FFFFFF, 0x0007000E, 0x0 }, - { 0x00D75FFF, 0x000F000A, 0x0 }, - { 0x00C30FFF, 0x00060006, 0x0 }, - { 0x00AAAFFF, 0x001E0000, 0x0 }, - { 0x00FFFFFF, 0x000F000A, 0x0 }, - { 0x00D75FFF, 0x00160004, 0x0 }, - { 0x00C30FFF, 0x001E0000, 0x0 }, - { 0x00FFFFFF, 0x00060006, 0x0 }, - { 0x00D75FFF, 0x001E0000, 0x0 }, -}; - -static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = { - /* Idx NT mV d T mV d db */ - { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */ - { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */ - { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */ - { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */ - { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */ - { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */ - { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */ - { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */ - { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */ - { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */ - { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */ - { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */ -}; - -static const struct ddi_buf_trans bdw_ddi_translations_edp[] = { - { 0x00FFFFFF, 0x00000012, 0x0 }, - { 0x00EBAFFF, 0x00020011, 0x0 }, - { 0x00C71FFF, 0x0006000F, 0x0 }, - { 0x00AAAFFF, 0x000E000A, 0x0 }, - { 0x00FFFFFF, 0x00020011, 0x0 }, - { 0x00DB6FFF, 0x0005000F, 0x0 }, - { 0x00BEEFFF, 0x000A000C, 0x0 }, - { 0x00FFFFFF, 0x0005000F, 0x0 }, - { 0x00DB6FFF, 0x000A000C, 0x0 }, -}; - -static const struct ddi_buf_trans bdw_ddi_translations_dp[] = { - { 0x00FFFFFF, 0x0007000E, 0x0 }, - { 0x00D75FFF, 0x000E000A, 0x0 }, - { 0x00BEFFFF, 0x00140006, 0x0 }, - { 0x80B2CFFF, 0x001B0002, 0x0 }, - { 0x00FFFFFF, 0x000E000A, 0x0 }, - { 0x00DB6FFF, 0x00160005, 0x0 }, - { 0x80C71FFF, 0x001A0002, 0x0 }, - { 0x00F7DFFF, 0x00180004, 0x0 }, - { 0x80D75FFF, 0x001B0002, 0x0 }, -}; - -static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = { - { 0x00FFFFFF, 0x0001000E, 0x0 }, - { 0x00D75FFF, 0x0004000A, 0x0 }, - { 0x00C30FFF, 0x00070006, 0x0 }, - { 0x00AAAFFF, 0x000C0000, 0x0 }, - { 0x00FFFFFF, 0x0004000A, 0x0 }, - { 0x00D75FFF, 0x00090004, 0x0 }, - { 0x00C30FFF, 0x000C0000, 0x0 }, - { 0x00FFFFFF, 0x00070006, 0x0 }, - { 0x00D75FFF, 0x000C0000, 0x0 }, -}; - -static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { - /* Idx NT mV d T mV df db */ - { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */ - { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */ - { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */ - { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */ - { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */ - { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */ - { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */ - { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */ - { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */ - { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */ +static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_dp[] = { + { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, + { .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } }, + { .hsw = { 0x00C30FFF, 0x00040006, 0x0 } }, + { .hsw = { 0x80AAAFFF, 0x000B0000, 0x0 } }, + { .hsw = { 0x00FFFFFF, 0x0005000A, 0x0 } }, + { .hsw = { 0x00D75FFF, 0x000C0004, 0x0 } }, + { .hsw = { 0x80C30FFF, 0x000B0000, 0x0 } }, + { .hsw = { 0x00FFFFFF, 0x00040006, 0x0 } }, + { .hsw = { 0x80D75FFF, 0x000B0000, 0x0 } }, +}; + +static const struct intel_ddi_buf_trans hsw_ddi_translations_dp = { + .entries = _hsw_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_hsw_ddi_translations_dp), +}; + +static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_fdi[] = { + { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, + { .hsw = { 0x00D75FFF, 0x000F000A, 0x0 } }, + { .hsw = { 0x00C30FFF, 0x00060006, 0x0 } }, + { .hsw = { 0x00AAAFFF, 0x001E0000, 0x0 } }, + { .hsw = { 0x00FFFFFF, 0x000F000A, 0x0 } }, + { .hsw = { 0x00D75FFF, 0x00160004, 0x0 } }, + { .hsw = { 0x00C30FFF, 0x001E0000, 0x0 } }, + { .hsw = { 0x00FFFFFF, 0x00060006, 0x0 } }, + { .hsw = { 0x00D75FFF, 0x001E0000, 0x0 } }, +}; + +static const struct intel_ddi_buf_trans hsw_ddi_translations_fdi = { + .entries = _hsw_ddi_translations_fdi, + .num_entries = ARRAY_SIZE(_hsw_ddi_translations_fdi), +}; + +static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_hdmi[] = { + /* Idx NT mV d T mV d db */ + { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, /* 0: 400 400 0 */ + { .hsw = { 0x00E79FFF, 0x000E000C, 0x0 } }, /* 1: 400 500 2 */ + { .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } }, /* 2: 400 600 3.5 */ + { .hsw = { 0x00FFFFFF, 0x0005000A, 0x0 } }, /* 3: 600 600 0 */ + { .hsw = { 0x00E79FFF, 0x001D0007, 0x0 } }, /* 4: 600 750 2 */ + { .hsw = { 0x00D75FFF, 0x000C0004, 0x0 } }, /* 5: 600 900 3.5 */ + { .hsw = { 0x00FFFFFF, 0x00040006, 0x0 } }, /* 6: 800 800 0 */ + { .hsw = { 0x80E79FFF, 0x00030002, 0x0 } }, /* 7: 800 1000 2 */ + { .hsw = { 0x00FFFFFF, 0x00140005, 0x0 } }, /* 8: 850 850 0 */ + { .hsw = { 0x00FFFFFF, 0x000C0004, 0x0 } }, /* 9: 900 900 0 */ + { .hsw = { 0x00FFFFFF, 0x001C0003, 0x0 } }, /* 10: 950 950 0 */ + { .hsw = { 0x80FFFFFF, 0x00030002, 0x0 } }, /* 11: 1000 1000 0 */ +}; + +static const struct intel_ddi_buf_trans hsw_ddi_translations_hdmi = { + .entries = _hsw_ddi_translations_hdmi, + .num_entries = ARRAY_SIZE(_hsw_ddi_translations_hdmi), + .hdmi_default_entry = 6, +}; + +static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_edp[] = { + { .hsw = { 0x00FFFFFF, 0x00000012, 0x0 } }, + { .hsw = { 0x00EBAFFF, 0x00020011, 0x0 } }, + { .hsw = { 0x00C71FFF, 0x0006000F, 0x0 } }, + { .hsw = { 0x00AAAFFF, 0x000E000A, 0x0 } }, + { .hsw = { 0x00FFFFFF, 0x00020011, 0x0 } }, + { .hsw = { 0x00DB6FFF, 0x0005000F, 0x0 } }, + { .hsw = { 0x00BEEFFF, 0x000A000C, 0x0 } }, + { .hsw = { 0x00FFFFFF, 0x0005000F, 0x0 } }, + { .hsw = { 0x00DB6FFF, 0x000A000C, 0x0 } }, +}; + +static const struct intel_ddi_buf_trans bdw_ddi_translations_edp = { + .entries = _bdw_ddi_translations_edp, + .num_entries = ARRAY_SIZE(_bdw_ddi_translations_edp), +}; + +static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_dp[] = { + { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, + { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, + { .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } }, + { .hsw = { 0x80B2CFFF, 0x001B0002, 0x0 } }, + { .hsw = { 0x00FFFFFF, 0x000E000A, 0x0 } }, + { .hsw = { 0x00DB6FFF, 0x00160005, 0x0 } }, + { .hsw = { 0x80C71FFF, 0x001A0002, 0x0 } }, + { .hsw = { 0x00F7DFFF, 0x00180004, 0x0 } }, + { .hsw = { 0x80D75FFF, 0x001B0002, 0x0 } }, +}; + +static const struct intel_ddi_buf_trans bdw_ddi_translations_dp = { + .entries = _bdw_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_bdw_ddi_translations_dp), +}; + +static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_fdi[] = { + { .hsw = { 0x00FFFFFF, 0x0001000E, 0x0 } }, + { .hsw = { 0x00D75FFF, 0x0004000A, 0x0 } }, + { .hsw = { 0x00C30FFF, 0x00070006, 0x0 } }, + { .hsw = { 0x00AAAFFF, 0x000C0000, 0x0 } }, + { .hsw = { 0x00FFFFFF, 0x0004000A, 0x0 } }, + { .hsw = { 0x00D75FFF, 0x00090004, 0x0 } }, + { .hsw = { 0x00C30FFF, 0x000C0000, 0x0 } }, + { .hsw = { 0x00FFFFFF, 0x00070006, 0x0 } }, + { .hsw = { 0x00D75FFF, 0x000C0000, 0x0 } }, +}; + +static const struct intel_ddi_buf_trans bdw_ddi_translations_fdi = { + .entries = _bdw_ddi_translations_fdi, + .num_entries = ARRAY_SIZE(_bdw_ddi_translations_fdi), +}; + +static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_hdmi[] = { + /* Idx NT mV d T mV df db */ + { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, /* 0: 400 400 0 */ + { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, /* 1: 400 600 3.5 */ + { .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } }, /* 2: 400 800 6 */ + { .hsw = { 0x00FFFFFF, 0x0009000D, 0x0 } }, /* 3: 450 450 0 */ + { .hsw = { 0x00FFFFFF, 0x000E000A, 0x0 } }, /* 4: 600 600 0 */ + { .hsw = { 0x00D7FFFF, 0x00140006, 0x0 } }, /* 5: 600 800 2.5 */ + { .hsw = { 0x80CB2FFF, 0x001B0002, 0x0 } }, /* 6: 600 1000 4.5 */ + { .hsw = { 0x00FFFFFF, 0x00140006, 0x0 } }, /* 7: 800 800 0 */ + { .hsw = { 0x80E79FFF, 0x001B0002, 0x0 } }, /* 8: 800 1000 2 */ + { .hsw = { 0x80FFFFFF, 0x001B0002, 0x0 } }, /* 9: 1000 1000 0 */ +}; + +static const struct intel_ddi_buf_trans bdw_ddi_translations_hdmi = { + .entries = _bdw_ddi_translations_hdmi, + .num_entries = ARRAY_SIZE(_bdw_ddi_translations_hdmi), + .hdmi_default_entry = 7, }; /* Skylake H and S */ -static const struct ddi_buf_trans skl_ddi_translations_dp[] = { - { 0x00002016, 0x000000A0, 0x0 }, - { 0x00005012, 0x0000009B, 0x0 }, - { 0x00007011, 0x00000088, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, - { 0x00002016, 0x0000009B, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000C0, 0x1 }, - { 0x00002016, 0x000000DF, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, +static const union intel_ddi_buf_trans_entry _skl_ddi_translations_dp[] = { + { .hsw = { 0x00002016, 0x000000A0, 0x0 } }, + { .hsw = { 0x00005012, 0x0000009B, 0x0 } }, + { .hsw = { 0x00007011, 0x00000088, 0x0 } }, + { .hsw = { 0x80009010, 0x000000C0, 0x1 } }, + { .hsw = { 0x00002016, 0x0000009B, 0x0 } }, + { .hsw = { 0x00005012, 0x00000088, 0x0 } }, + { .hsw = { 0x80007011, 0x000000C0, 0x1 } }, + { .hsw = { 0x00002016, 0x000000DF, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, +}; + +static const struct intel_ddi_buf_trans skl_ddi_translations_dp = { + .entries = _skl_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_skl_ddi_translations_dp), }; /* Skylake U */ -static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { - { 0x0000201B, 0x000000A2, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x1 }, - { 0x80009010, 0x000000C0, 0x1 }, - { 0x0000201B, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, - { 0x80007011, 0x000000C0, 0x1 }, - { 0x00002016, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, +static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_dp[] = { + { .hsw = { 0x0000201B, 0x000000A2, 0x0 } }, + { .hsw = { 0x00005012, 0x00000088, 0x0 } }, + { .hsw = { 0x80007011, 0x000000CD, 0x1 } }, + { .hsw = { 0x80009010, 0x000000C0, 0x1 } }, + { .hsw = { 0x0000201B, 0x0000009D, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, + { .hsw = { 0x80007011, 0x000000C0, 0x1 } }, + { .hsw = { 0x00002016, 0x00000088, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, +}; + +static const struct intel_ddi_buf_trans skl_u_ddi_translations_dp = { + .entries = _skl_u_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_dp), }; /* Skylake Y */ -static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { - { 0x00000018, 0x000000A2, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x3 }, - { 0x80009010, 0x000000C0, 0x3 }, - { 0x00000018, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, - { 0x80007011, 0x000000C0, 0x3 }, - { 0x00000018, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, +static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_dp[] = { + { .hsw = { 0x00000018, 0x000000A2, 0x0 } }, + { .hsw = { 0x00005012, 0x00000088, 0x0 } }, + { .hsw = { 0x80007011, 0x000000CD, 0x3 } }, + { .hsw = { 0x80009010, 0x000000C0, 0x3 } }, + { .hsw = { 0x00000018, 0x0000009D, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, + { .hsw = { 0x80007011, 0x000000C0, 0x3 } }, + { .hsw = { 0x00000018, 0x00000088, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, +}; + +static const struct intel_ddi_buf_trans skl_y_ddi_translations_dp = { + .entries = _skl_y_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_dp), }; /* Kabylake H and S */ -static const struct ddi_buf_trans kbl_ddi_translations_dp[] = { - { 0x00002016, 0x000000A0, 0x0 }, - { 0x00005012, 0x0000009B, 0x0 }, - { 0x00007011, 0x00000088, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, - { 0x00002016, 0x0000009B, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000C0, 0x1 }, - { 0x00002016, 0x00000097, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, +static const union intel_ddi_buf_trans_entry _kbl_ddi_translations_dp[] = { + { .hsw = { 0x00002016, 0x000000A0, 0x0 } }, + { .hsw = { 0x00005012, 0x0000009B, 0x0 } }, + { .hsw = { 0x00007011, 0x00000088, 0x0 } }, + { .hsw = { 0x80009010, 0x000000C0, 0x1 } }, + { .hsw = { 0x00002016, 0x0000009B, 0x0 } }, + { .hsw = { 0x00005012, 0x00000088, 0x0 } }, + { .hsw = { 0x80007011, 0x000000C0, 0x1 } }, + { .hsw = { 0x00002016, 0x00000097, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, +}; + +static const struct intel_ddi_buf_trans kbl_ddi_translations_dp = { + .entries = _kbl_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_kbl_ddi_translations_dp), }; /* Kabylake U */ -static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = { - { 0x0000201B, 0x000000A1, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x3 }, - { 0x80009010, 0x000000C0, 0x3 }, - { 0x0000201B, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, - { 0x80007011, 0x000000C0, 0x3 }, - { 0x00002016, 0x0000004F, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, +static const union intel_ddi_buf_trans_entry _kbl_u_ddi_translations_dp[] = { + { .hsw = { 0x0000201B, 0x000000A1, 0x0 } }, + { .hsw = { 0x00005012, 0x00000088, 0x0 } }, + { .hsw = { 0x80007011, 0x000000CD, 0x3 } }, + { .hsw = { 0x80009010, 0x000000C0, 0x3 } }, + { .hsw = { 0x0000201B, 0x0000009D, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, + { .hsw = { 0x80007011, 0x000000C0, 0x3 } }, + { .hsw = { 0x00002016, 0x0000004F, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, +}; + +static const struct intel_ddi_buf_trans kbl_u_ddi_translations_dp = { + .entries = _kbl_u_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_kbl_u_ddi_translations_dp), }; /* Kabylake Y */ -static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = { - { 0x00001017, 0x000000A1, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x3 }, - { 0x8000800F, 0x000000C0, 0x3 }, - { 0x00001017, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, - { 0x80007011, 0x000000C0, 0x3 }, - { 0x00001017, 0x0000004C, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, +static const union intel_ddi_buf_trans_entry _kbl_y_ddi_translations_dp[] = { + { .hsw = { 0x00001017, 0x000000A1, 0x0 } }, + { .hsw = { 0x00005012, 0x00000088, 0x0 } }, + { .hsw = { 0x80007011, 0x000000CD, 0x3 } }, + { .hsw = { 0x8000800F, 0x000000C0, 0x3 } }, + { .hsw = { 0x00001017, 0x0000009D, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, + { .hsw = { 0x80007011, 0x000000C0, 0x3 } }, + { .hsw = { 0x00001017, 0x0000004C, 0x0 } }, + { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, +}; + +static const struct intel_ddi_buf_trans kbl_y_ddi_translations_dp = { + .entries = _kbl_y_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_kbl_y_ddi_translations_dp), }; /* * Skylake/Kabylake H and S * eDP 1.4 low vswing translation parameters */ -static const struct ddi_buf_trans skl_ddi_translations_edp[] = { - { 0x00000018, 0x000000A8, 0x0 }, - { 0x00004013, 0x000000A9, 0x0 }, - { 0x00007011, 0x000000A2, 0x0 }, - { 0x00009010, 0x0000009C, 0x0 }, - { 0x00000018, 0x000000A9, 0x0 }, - { 0x00006013, 0x000000A2, 0x0 }, - { 0x00007011, 0x000000A6, 0x0 }, - { 0x00000018, 0x000000AB, 0x0 }, - { 0x00007013, 0x0000009F, 0x0 }, - { 0x00000018, 0x000000DF, 0x0 }, +static const union intel_ddi_buf_trans_entry _skl_ddi_translations_edp[] = { + { .hsw = { 0x00000018, 0x000000A8, 0x0 } }, + { .hsw = { 0x00004013, 0x000000A9, 0x0 } }, + { .hsw = { 0x00007011, 0x000000A2, 0x0 } }, + { .hsw = { 0x00009010, 0x0000009C, 0x0 } }, + { .hsw = { 0x00000018, 0x000000A9, 0x0 } }, + { .hsw = { 0x00006013, 0x000000A2, 0x0 } }, + { .hsw = { 0x00007011, 0x000000A6, 0x0 } }, + { .hsw = { 0x00000018, 0x000000AB, 0x0 } }, + { .hsw = { 0x00007013, 0x0000009F, 0x0 } }, + { .hsw = { 0x00000018, 0x000000DF, 0x0 } }, +}; + +static const struct intel_ddi_buf_trans skl_ddi_translations_edp = { + .entries = _skl_ddi_translations_edp, + .num_entries = ARRAY_SIZE(_skl_ddi_translations_edp), }; /* * Skylake/Kabylake U * eDP 1.4 low vswing translation parameters */ -static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = { - { 0x00000018, 0x000000A8, 0x0 }, - { 0x00004013, 0x000000A9, 0x0 }, - { 0x00007011, 0x000000A2, 0x0 }, - { 0x00009010, 0x0000009C, 0x0 }, - { 0x00000018, 0x000000A9, 0x0 }, - { 0x00006013, 0x000000A2, 0x0 }, - { 0x00007011, 0x000000A6, 0x0 }, - { 0x00002016, 0x000000AB, 0x0 }, - { 0x00005013, 0x0000009F, 0x0 }, - { 0x00000018, 0x000000DF, 0x0 }, +static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_edp[] = { + { .hsw = { 0x00000018, 0x000000A8, 0x0 } }, + { .hsw = { 0x00004013, 0x000000A9, 0x0 } }, + { .hsw = { 0x00007011, 0x000000A2, 0x0 } }, + { .hsw = { 0x00009010, 0x0000009C, 0x0 } }, + { .hsw = { 0x00000018, 0x000000A9, 0x0 } }, + { .hsw = { 0x00006013, 0x000000A2, 0x0 } }, + { .hsw = { 0x00007011, 0x000000A6, 0x0 } }, + { .hsw = { 0x00002016, 0x000000AB, 0x0 } }, + { .hsw = { 0x00005013, 0x0000009F, 0x0 } }, + { .hsw = { 0x00000018, 0x000000DF, 0x0 } }, +}; + +static const struct intel_ddi_buf_trans skl_u_ddi_translations_edp = { + .entries = _skl_u_ddi_translations_edp, + .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_edp), }; /* * Skylake/Kabylake Y * eDP 1.4 low vswing translation parameters */ -static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = { - { 0x00000018, 0x000000A8, 0x0 }, - { 0x00004013, 0x000000AB, 0x0 }, - { 0x00007011, 0x000000A4, 0x0 }, - { 0x00009010, 0x000000DF, 0x0 }, - { 0x00000018, 0x000000AA, 0x0 }, - { 0x00006013, 0x000000A4, 0x0 }, - { 0x00007011, 0x0000009D, 0x0 }, - { 0x00000018, 0x000000A0, 0x0 }, - { 0x00006012, 0x000000DF, 0x0 }, - { 0x00000018, 0x0000008A, 0x0 }, +static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_edp[] = { + { .hsw = { 0x00000018, 0x000000A8, 0x0 } }, + { .hsw = { 0x00004013, 0x000000AB, 0x0 } }, + { .hsw = { 0x00007011, 0x000000A4, 0x0 } }, + { .hsw = { 0x00009010, 0x000000DF, 0x0 } }, + { .hsw = { 0x00000018, 0x000000AA, 0x0 } }, + { .hsw = { 0x00006013, 0x000000A4, 0x0 } }, + { .hsw = { 0x00007011, 0x0000009D, 0x0 } }, + { .hsw = { 0x00000018, 0x000000A0, 0x0 } }, + { .hsw = { 0x00006012, 0x000000DF, 0x0 } }, + { .hsw = { 0x00000018, 0x0000008A, 0x0 } }, +}; + +static const struct intel_ddi_buf_trans skl_y_ddi_translations_edp = { + .entries = _skl_y_ddi_translations_edp, + .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_edp), }; /* Skylake/Kabylake U, H and S */ -static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { - { 0x00000018, 0x000000AC, 0x0 }, - { 0x00005012, 0x0000009D, 0x0 }, - { 0x00007011, 0x00000088, 0x0 }, - { 0x00000018, 0x000000A1, 0x0 }, - { 0x00000018, 0x00000098, 0x0 }, - { 0x00004013, 0x00000088, 0x0 }, - { 0x80006012, 0x000000CD, 0x1 }, - { 0x00000018, 0x000000DF, 0x0 }, - { 0x80003015, 0x000000CD, 0x1 }, /* Default */ - { 0x80003015, 0x000000C0, 0x1 }, - { 0x80000018, 0x000000C0, 0x1 }, +static const union intel_ddi_buf_trans_entry _skl_ddi_translations_hdmi[] = { + { .hsw = { 0x00000018, 0x000000AC, 0x0 } }, + { .hsw = { 0x00005012, 0x0000009D, 0x0 } }, + { .hsw = { 0x00007011, 0x00000088, 0x0 } }, + { .hsw = { 0x00000018, 0x000000A1, 0x0 } }, + { .hsw = { 0x00000018, 0x00000098, 0x0 } }, + { .hsw = { 0x00004013, 0x00000088, 0x0 } }, + { .hsw = { 0x80006012, 0x000000CD, 0x1 } }, + { .hsw = { 0x00000018, 0x000000DF, 0x0 } }, + { .hsw = { 0x80003015, 0x000000CD, 0x1 } }, /* Default */ + { .hsw = { 0x80003015, 0x000000C0, 0x1 } }, + { .hsw = { 0x80000018, 0x000000C0, 0x1 } }, +}; + +static const struct intel_ddi_buf_trans skl_ddi_translations_hdmi = { + .entries = _skl_ddi_translations_hdmi, + .num_entries = ARRAY_SIZE(_skl_ddi_translations_hdmi), + .hdmi_default_entry = 8, }; /* Skylake/Kabylake Y */ -static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = { - { 0x00000018, 0x000000A1, 0x0 }, - { 0x00005012, 0x000000DF, 0x0 }, - { 0x80007011, 0x000000CB, 0x3 }, - { 0x00000018, 0x000000A4, 0x0 }, - { 0x00000018, 0x0000009D, 0x0 }, - { 0x00004013, 0x00000080, 0x0 }, - { 0x80006013, 0x000000C0, 0x3 }, - { 0x00000018, 0x0000008A, 0x0 }, - { 0x80003015, 0x000000C0, 0x3 }, /* Default */ - { 0x80003015, 0x000000C0, 0x3 }, - { 0x80000018, 0x000000C0, 0x3 }, -}; - - -static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = { - /* Idx NT mV diff db */ - { 52, 0x9A, 0, 128, }, /* 0: 400 0 */ - { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */ - { 104, 0x9A, 0, 64, }, /* 2: 400 6 */ - { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */ - { 77, 0x9A, 0, 128, }, /* 4: 600 0 */ - { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */ - { 154, 0x9A, 0, 64, }, /* 6: 600 6 */ - { 102, 0x9A, 0, 128, }, /* 7: 800 0 */ - { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */ - { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */ -}; - -static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = { +static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_hdmi[] = { + { .hsw = { 0x00000018, 0x000000A1, 0x0 } }, + { .hsw = { 0x00005012, 0x000000DF, 0x0 } }, + { .hsw = { 0x80007011, 0x000000CB, 0x3 } }, + { .hsw = { 0x00000018, 0x000000A4, 0x0 } }, + { .hsw = { 0x00000018, 0x0000009D, 0x0 } }, + { .hsw = { 0x00004013, 0x00000080, 0x0 } }, + { .hsw = { 0x80006013, 0x000000C0, 0x3 } }, + { .hsw = { 0x00000018, 0x0000008A, 0x0 } }, + { .hsw = { 0x80003015, 0x000000C0, 0x3 } }, /* Default */ + { .hsw = { 0x80003015, 0x000000C0, 0x3 } }, + { .hsw = { 0x80000018, 0x000000C0, 0x3 } }, +}; + +static const struct intel_ddi_buf_trans skl_y_ddi_translations_hdmi = { + .entries = _skl_y_ddi_translations_hdmi, + .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_hdmi), + .hdmi_default_entry = 8, +}; + +static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_dp[] = { + /* Idx NT mV diff db */ + { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */ + { .bxt = { 78, 0x9A, 0, 85, } }, /* 1: 400 3.5 */ + { .bxt = { 104, 0x9A, 0, 64, } }, /* 2: 400 6 */ + { .bxt = { 154, 0x9A, 0, 43, } }, /* 3: 400 9.5 */ + { .bxt = { 77, 0x9A, 0, 128, } }, /* 4: 600 0 */ + { .bxt = { 116, 0x9A, 0, 85, } }, /* 5: 600 3.5 */ + { .bxt = { 154, 0x9A, 0, 64, } }, /* 6: 600 6 */ + { .bxt = { 102, 0x9A, 0, 128, } }, /* 7: 800 0 */ + { .bxt = { 154, 0x9A, 0, 85, } }, /* 8: 800 3.5 */ + { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */ +}; + +static const struct intel_ddi_buf_trans bxt_ddi_translations_dp = { + .entries = _bxt_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_bxt_ddi_translations_dp), +}; + +static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_edp[] = { /* Idx NT mV diff db */ - { 26, 0, 0, 128, }, /* 0: 200 0 */ - { 38, 0, 0, 112, }, /* 1: 200 1.5 */ - { 48, 0, 0, 96, }, /* 2: 200 4 */ - { 54, 0, 0, 69, }, /* 3: 200 6 */ - { 32, 0, 0, 128, }, /* 4: 250 0 */ - { 48, 0, 0, 104, }, /* 5: 250 1.5 */ - { 54, 0, 0, 85, }, /* 6: 250 4 */ - { 43, 0, 0, 128, }, /* 7: 300 0 */ - { 54, 0, 0, 101, }, /* 8: 300 1.5 */ - { 48, 0, 0, 128, }, /* 9: 300 0 */ + { .bxt = { 26, 0, 0, 128, } }, /* 0: 200 0 */ + { .bxt = { 38, 0, 0, 112, } }, /* 1: 200 1.5 */ + { .bxt = { 48, 0, 0, 96, } }, /* 2: 200 4 */ + { .bxt = { 54, 0, 0, 69, } }, /* 3: 200 6 */ + { .bxt = { 32, 0, 0, 128, } }, /* 4: 250 0 */ + { .bxt = { 48, 0, 0, 104, } }, /* 5: 250 1.5 */ + { .bxt = { 54, 0, 0, 85, } }, /* 6: 250 4 */ + { .bxt = { 43, 0, 0, 128, } }, /* 7: 300 0 */ + { .bxt = { 54, 0, 0, 101, } }, /* 8: 300 1.5 */ + { .bxt = { 48, 0, 0, 128, } }, /* 9: 300 0 */ +}; + +static const struct intel_ddi_buf_trans bxt_ddi_translations_edp = { + .entries = _bxt_ddi_translations_edp, + .num_entries = ARRAY_SIZE(_bxt_ddi_translations_edp), }; /* BSpec has 2 recommended values - entries 0 and 8. * Using the entry with higher vswing. */ -static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { - /* Idx NT mV diff db */ - { 52, 0x9A, 0, 128, }, /* 0: 400 0 */ - { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */ - { 52, 0x9A, 0, 64, }, /* 2: 400 6 */ - { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */ - { 77, 0x9A, 0, 128, }, /* 4: 600 0 */ - { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */ - { 77, 0x9A, 0, 64, }, /* 6: 600 6 */ - { 102, 0x9A, 0, 128, }, /* 7: 800 0 */ - { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */ - { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */ +static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_hdmi[] = { + /* Idx NT mV diff db */ + { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */ + { .bxt = { 52, 0x9A, 0, 85, } }, /* 1: 400 3.5 */ + { .bxt = { 52, 0x9A, 0, 64, } }, /* 2: 400 6 */ + { .bxt = { 42, 0x9A, 0, 43, } }, /* 3: 400 9.5 */ + { .bxt = { 77, 0x9A, 0, 128, } }, /* 4: 600 0 */ + { .bxt = { 77, 0x9A, 0, 85, } }, /* 5: 600 3.5 */ + { .bxt = { 77, 0x9A, 0, 64, } }, /* 6: 600 6 */ + { .bxt = { 102, 0x9A, 0, 128, } }, /* 7: 800 0 */ + { .bxt = { 102, 0x9A, 0, 85, } }, /* 8: 800 3.5 */ + { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */ +}; + +static const struct intel_ddi_buf_trans bxt_ddi_translations_hdmi = { + .entries = _bxt_ddi_translations_hdmi, + .num_entries = ARRAY_SIZE(_bxt_ddi_translations_hdmi), + .hdmi_default_entry = ARRAY_SIZE(_bxt_ddi_translations_hdmi) - 1, }; /* Voltage Swing Programming for VccIO 0.85V for DP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ - { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ - { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ - { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ - { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */ - { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ - { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_dp_0_85V[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x5D, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x6A, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */ + { .cnl = { 0xB, 0x7A, 0x32, 0x00, 0x0D } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7C, 0x2D, 0x00, 0x12 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x69, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xB, 0x7A, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7C, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */ + { .cnl = { 0xB, 0x7D, 0x3C, 0x00, 0x03 } }, /* 650 725 0.9 */ + { .cnl = { 0x6, 0x7C, 0x34, 0x00, 0x0B } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7B, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans cnl_ddi_translations_dp_0_85V = { + .entries = _cnl_ddi_translations_dp_0_85V, + .num_entries = ARRAY_SIZE(_cnl_ddi_translations_dp_0_85V), }; /* Voltage Swing Programming for VccIO 0.85V for HDMI */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ - { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ - { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ - { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */ - { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ +static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_hdmi_0_85V[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */ + { .cnl = { 0xB, 0x73, 0x36, 0x00, 0x09 } }, /* 450 650 3.2 */ + { .cnl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } }, /* 450 850 5.5 */ + { .cnl = { 0xB, 0x73, 0x3F, 0x00, 0x00 } }, /* 650 650 0.0 */ + { .cnl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 650 850 2.3 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 850 850 0.0 */ + { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */ +}; + +static const struct intel_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V = { + .entries = _cnl_ddi_translations_hdmi_0_85V, + .num_entries = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_85V), + .hdmi_default_entry = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_85V) - 1, }; /* Voltage Swing Programming for VccIO 0.85V for eDP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ - { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ - { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ - { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ - { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */ - { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */ - { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ +static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_edp_0_85V[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x66, 0x3A, 0x00, 0x05 } }, /* 384 500 2.3 */ + { .cnl = { 0x0, 0x7F, 0x38, 0x00, 0x07 } }, /* 153 200 2.3 */ + { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 192 250 2.3 */ + { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 230 300 2.3 */ + { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 269 350 2.3 */ + { .cnl = { 0xA, 0x66, 0x3C, 0x00, 0x03 } }, /* 446 500 1.0 */ + { .cnl = { 0xB, 0x70, 0x3C, 0x00, 0x03 } }, /* 460 600 2.3 */ + { .cnl = { 0xC, 0x75, 0x3C, 0x00, 0x03 } }, /* 537 700 2.3 */ + { .cnl = { 0x2, 0x7F, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */ +}; + +static const struct intel_ddi_buf_trans cnl_ddi_translations_edp_0_85V = { + .entries = _cnl_ddi_translations_edp_0_85V, + .num_entries = ARRAY_SIZE(_cnl_ddi_translations_edp_0_85V), }; /* Voltage Swing Programming for VccIO 0.95V for DP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ - { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ - { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ - { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ - { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */ - { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ - { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_dp_0_95V[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x5D, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x6A, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */ + { .cnl = { 0xB, 0x7A, 0x32, 0x00, 0x0D } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7C, 0x2D, 0x00, 0x12 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x69, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xB, 0x7A, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7C, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */ + { .cnl = { 0xB, 0x7D, 0x3C, 0x00, 0x03 } }, /* 650 725 0.9 */ + { .cnl = { 0x6, 0x7C, 0x34, 0x00, 0x0B } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7B, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans cnl_ddi_translations_dp_0_95V = { + .entries = _cnl_ddi_translations_dp_0_95V, + .num_entries = ARRAY_SIZE(_cnl_ddi_translations_dp_0_95V), }; /* Voltage Swing Programming for VccIO 0.95V for HDMI */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ - { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ - { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ - { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ - { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ - { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ - { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */ - { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */ - { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */ - { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ +static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_hdmi_0_95V[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x5C, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */ + { .cnl = { 0xB, 0x69, 0x37, 0x00, 0x08 } }, /* 400 600 3.5 */ + { .cnl = { 0x5, 0x76, 0x31, 0x00, 0x0E } }, /* 400 800 6.0 */ + { .cnl = { 0xA, 0x5E, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */ + { .cnl = { 0xB, 0x69, 0x3F, 0x00, 0x00 } }, /* 600 600 0.0 */ + { .cnl = { 0xB, 0x79, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */ + { .cnl = { 0x6, 0x7D, 0x32, 0x00, 0x0D } }, /* 600 1000 4.4 */ + { .cnl = { 0x5, 0x76, 0x3F, 0x00, 0x00 } }, /* 800 800 0.0 */ + { .cnl = { 0x6, 0x7D, 0x39, 0x00, 0x06 } }, /* 800 1000 1.9 */ + { .cnl = { 0x6, 0x7F, 0x39, 0x00, 0x06 } }, /* 850 1050 1.8 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1050 1050 0.0 */ +}; + +static const struct intel_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V = { + .entries = _cnl_ddi_translations_hdmi_0_95V, + .num_entries = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_95V), + .hdmi_default_entry = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_95V) - 1, }; /* Voltage Swing Programming for VccIO 0.95V for eDP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ - { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ - { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ - { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ - { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */ - { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */ - { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */ - { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ +static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_edp_0_95V[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x61, 0x3A, 0x00, 0x05 } }, /* 384 500 2.3 */ + { .cnl = { 0x0, 0x7F, 0x38, 0x00, 0x07 } }, /* 153 200 2.3 */ + { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 192 250 2.3 */ + { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 230 300 2.3 */ + { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 269 350 2.3 */ + { .cnl = { 0xA, 0x61, 0x3C, 0x00, 0x03 } }, /* 446 500 1.0 */ + { .cnl = { 0xB, 0x68, 0x39, 0x00, 0x06 } }, /* 460 600 2.3 */ + { .cnl = { 0xC, 0x6E, 0x39, 0x00, 0x06 } }, /* 537 700 2.3 */ + { .cnl = { 0x4, 0x7F, 0x3A, 0x00, 0x05 } }, /* 460 600 2.3 */ + { .cnl = { 0x2, 0x7F, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */ +}; + +static const struct intel_ddi_buf_trans cnl_ddi_translations_edp_0_95V = { + .entries = _cnl_ddi_translations_edp_0_95V, + .num_entries = ARRAY_SIZE(_cnl_ddi_translations_edp_0_95V), }; /* Voltage Swing Programming for VccIO 1.05V for DP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ - { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ - { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ - { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */ - { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ - { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */ - { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */ - { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ +static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_dp_1_05V[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x58, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */ + { .cnl = { 0xB, 0x64, 0x37, 0x00, 0x08 } }, /* 400 600 3.5 */ + { .cnl = { 0x5, 0x70, 0x31, 0x00, 0x0E } }, /* 400 800 6.0 */ + { .cnl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 400 1050 8.4 */ + { .cnl = { 0xB, 0x64, 0x3F, 0x00, 0x00 } }, /* 600 600 0.0 */ + { .cnl = { 0x5, 0x73, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */ + { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 550 1050 5.6 */ + { .cnl = { 0x5, 0x76, 0x3E, 0x00, 0x01 } }, /* 850 900 0.5 */ + { .cnl = { 0x6, 0x7F, 0x36, 0x00, 0x09 } }, /* 750 1050 2.9 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1050 1050 0.0 */ +}; + +static const struct intel_ddi_buf_trans cnl_ddi_translations_dp_1_05V = { + .entries = _cnl_ddi_translations_dp_1_05V, + .num_entries = ARRAY_SIZE(_cnl_ddi_translations_dp_1_05V), }; /* Voltage Swing Programming for VccIO 1.05V for HDMI */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ - { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ - { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ - { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ - { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ - { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ - { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */ - { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */ - { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */ - { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ +static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_hdmi_1_05V[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x58, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */ + { .cnl = { 0xB, 0x64, 0x37, 0x00, 0x08 } }, /* 400 600 3.5 */ + { .cnl = { 0x5, 0x70, 0x31, 0x00, 0x0E } }, /* 400 800 6.0 */ + { .cnl = { 0xA, 0x5B, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */ + { .cnl = { 0xB, 0x64, 0x3F, 0x00, 0x00 } }, /* 600 600 0.0 */ + { .cnl = { 0x5, 0x73, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */ + { .cnl = { 0x6, 0x7C, 0x32, 0x00, 0x0D } }, /* 600 1000 4.4 */ + { .cnl = { 0x5, 0x70, 0x3F, 0x00, 0x00 } }, /* 800 800 0.0 */ + { .cnl = { 0x6, 0x7C, 0x39, 0x00, 0x06 } }, /* 800 1000 1.9 */ + { .cnl = { 0x6, 0x7F, 0x39, 0x00, 0x06 } }, /* 850 1050 1.8 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1050 1050 0.0 */ +}; + +static const struct intel_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V = { + .entries = _cnl_ddi_translations_hdmi_1_05V, + .num_entries = ARRAY_SIZE(_cnl_ddi_translations_hdmi_1_05V), + .hdmi_default_entry = ARRAY_SIZE(_cnl_ddi_translations_hdmi_1_05V) - 1, }; /* Voltage Swing Programming for VccIO 1.05V for eDP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ - { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ - { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ - { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ - { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */ - { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */ - { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ +static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_edp_1_05V[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x5E, 0x3A, 0x00, 0x05 } }, /* 384 500 2.3 */ + { .cnl = { 0x0, 0x7F, 0x38, 0x00, 0x07 } }, /* 153 200 2.3 */ + { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 192 250 2.3 */ + { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 230 300 2.3 */ + { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 269 350 2.3 */ + { .cnl = { 0xA, 0x5E, 0x3C, 0x00, 0x03 } }, /* 446 500 1.0 */ + { .cnl = { 0xB, 0x64, 0x39, 0x00, 0x06 } }, /* 460 600 2.3 */ + { .cnl = { 0xE, 0x6A, 0x39, 0x00, 0x06 } }, /* 537 700 2.3 */ + { .cnl = { 0x2, 0x7F, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */ +}; + +static const struct intel_ddi_buf_trans cnl_ddi_translations_edp_1_05V = { + .entries = _cnl_ddi_translations_edp_1_05V, + .num_entries = ARRAY_SIZE(_cnl_ddi_translations_edp_1_05V), }; /* icl_combo_phy_ddi_translations */ -static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ - { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { - /* NT mV Trans mV db */ - { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ - { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ - { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ - { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ - { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ - { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ - { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ -}; - -static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ - { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { - /* NT mV Trans mV db */ - { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ - { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ - { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ - { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ - { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ -}; - -static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = { - /* NT mV Trans mV db */ - { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ - { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */ - { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */ - { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr[] = { - /* NT mV Trans mV db */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ - { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ - { 0xA, 0x35, 0x36, 0x00, 0x09 }, /* 200 350 4.9 */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ - { 0xA, 0x35, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ - { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ - { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ -}; - -static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] = { - /* NT mV Trans mV db */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 250 1.9 */ - { 0x1, 0x7F, 0x3D, 0x00, 0x02 }, /* 200 300 3.5 */ - { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 200 350 4.9 */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ - { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 300 1.6 */ - { 0xA, 0x35, 0x3A, 0x00, 0x05 }, /* 250 350 2.9 */ - { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ - { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ -}; - -static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = { - /* NT mV Trans mV db */ - { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */ - { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ - { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ - { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { - /* NT mV Trans mV db */ - { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */ - { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ - { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ - { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = { - /* Voltage swing pre-emphasis */ - { 0x18, 0x00, 0x00 }, /* 0 0 */ - { 0x1D, 0x00, 0x05 }, /* 0 1 */ - { 0x24, 0x00, 0x0C }, /* 0 2 */ - { 0x2B, 0x00, 0x14 }, /* 0 3 */ - { 0x21, 0x00, 0x00 }, /* 1 0 */ - { 0x2B, 0x00, 0x08 }, /* 1 1 */ - { 0x30, 0x00, 0x0F }, /* 1 2 */ - { 0x31, 0x00, 0x03 }, /* 2 0 */ - { 0x34, 0x00, 0x0B }, /* 2 1 */ - { 0x3F, 0x00, 0x00 }, /* 3 0 */ -}; - -static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = { - /* Voltage swing pre-emphasis */ - { 0x18, 0x00, 0x00 }, /* 0 0 */ - { 0x1D, 0x00, 0x05 }, /* 0 1 */ - { 0x24, 0x00, 0x0C }, /* 0 2 */ - { 0x2B, 0x00, 0x14 }, /* 0 3 */ - { 0x26, 0x00, 0x00 }, /* 1 0 */ - { 0x2C, 0x00, 0x07 }, /* 1 1 */ - { 0x33, 0x00, 0x0C }, /* 1 2 */ - { 0x2E, 0x00, 0x00 }, /* 2 0 */ - { 0x36, 0x00, 0x09 }, /* 2 1 */ - { 0x3F, 0x00, 0x00 }, /* 3 0 */ -}; - -static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = { - /* HDMI Preset VS Pre-emph */ - { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */ - { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */ - { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */ - { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */ - { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */ - { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */ - { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */ - { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */ - { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */ - { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */ -}; - -static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = { - /* VS pre-emp Non-trans mV Pre-emph dB */ - { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ - { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */ - { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */ - { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */ - { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ - { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */ - { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ - { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ - { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ - { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ -}; - -static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = { - /* VS pre-emp Non-trans mV Pre-emph dB */ - { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ - { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */ - { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */ - { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */ - { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ - { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */ - { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ - { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ - { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ - { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ -}; - -static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = { - /* HDMI Preset VS Pre-emph */ - { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */ - { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */ - { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */ - { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */ - { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */ - { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */ - { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */ - { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */ - { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */ - { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */ -}; - -static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = { - /* NT mV Trans mV db */ - { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ - { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ - { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ - { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ - { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ - { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */ - { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3 = { + .entries = _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3), +}; + +static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { .cnl = { 0x0, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ + { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */ + { .cnl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } }, /* 200 300 3.5 */ + { .cnl = { 0x9, 0x7F, 0x31, 0x00, 0x0E } }, /* 200 350 4.9 */ + { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */ + { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */ + { .cnl = { 0x9, 0x7F, 0x35, 0x00, 0x0A } }, /* 250 350 2.9 */ + { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */ + { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ + { .cnl = { 0x9, 0x7F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ +}; + +static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2 = { + .entries = _icl_combo_phy_ddi_translations_edp_hbr2, + .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_edp_hbr2), +}; + +static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_hdmi[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */ + { .cnl = { 0xB, 0x73, 0x36, 0x00, 0x09 } }, /* 450 650 3.2 */ + { .cnl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } }, /* 450 850 5.5 */ + { .cnl = { 0xB, 0x73, 0x3F, 0x00, 0x00 } }, /* 650 650 0.0 ALS */ + { .cnl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 650 850 2.3 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 850 850 0.0 */ + { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */ +}; + +static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi = { + .entries = _icl_combo_phy_ddi_translations_hdmi, + .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi), + .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi) - 1, +}; + +static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_dp[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x64, 0x34, 0x00, 0x0B } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x64, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_dp = { + .entries = _ehl_combo_phy_ddi_translations_dp, + .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_dp), +}; + +static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ + { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */ + { .cnl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } }, /* 200 300 3.5 */ + { .cnl = { 0xA, 0x35, 0x39, 0x00, 0x06 } }, /* 200 350 4.9 */ + { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */ + { .cnl = { 0x1, 0x7F, 0x3C, 0x00, 0x03 } }, /* 250 300 1.6 */ + { .cnl = { 0xA, 0x35, 0x39, 0x00, 0x06 } }, /* 250 350 2.9 */ + { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */ + { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ + { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ +}; + +static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_edp_hbr2 = { + .entries = _ehl_combo_phy_ddi_translations_edp_hbr2, + .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_edp_hbr2), +}; + +static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr[] = { + /* NT mV Trans mV db */ + { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ + { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */ + { .cnl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } }, /* 200 300 3.5 */ + { .cnl = { 0xA, 0x35, 0x36, 0x00, 0x09 } }, /* 200 350 4.9 */ + { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */ + { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */ + { .cnl = { 0xA, 0x35, 0x35, 0x00, 0x0A } }, /* 250 350 2.9 */ + { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */ + { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ + { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ +}; + +static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr = { + .entries = _jsl_combo_phy_ddi_translations_edp_hbr, + .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr), +}; + +static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ + { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */ + { .cnl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } }, /* 200 300 3.5 */ + { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 200 350 4.9 */ + { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */ + { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 300 1.6 */ + { .cnl = { 0xA, 0x35, 0x3A, 0x00, 0x05 } }, /* 250 350 2.9 */ + { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */ + { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ + { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ +}; + +static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2 = { + .entries = _jsl_combo_phy_ddi_translations_edp_hbr2, + .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr2), +}; + +static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x60, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x60, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr = { + .entries = _dg1_combo_phy_ddi_translations_dp_rbr_hbr, + .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_rbr_hbr), +}; + +static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x60, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3 = { + .entries = _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_hbr2_hbr3), +}; + +static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_rbr_hbr[] = { + /* Voltage swing pre-emphasis */ + { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */ + { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */ + { .mg = { 0x24, 0x00, 0x0C } }, /* 0 2 */ + { .mg = { 0x2B, 0x00, 0x14 } }, /* 0 3 */ + { .mg = { 0x21, 0x00, 0x00 } }, /* 1 0 */ + { .mg = { 0x2B, 0x00, 0x08 } }, /* 1 1 */ + { .mg = { 0x30, 0x00, 0x0F } }, /* 1 2 */ + { .mg = { 0x31, 0x00, 0x03 } }, /* 2 0 */ + { .mg = { 0x34, 0x00, 0x0B } }, /* 2 1 */ + { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */ +}; + +static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr = { + .entries = _icl_mg_phy_ddi_translations_rbr_hbr, + .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_rbr_hbr), +}; + +static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hbr2_hbr3[] = { + /* Voltage swing pre-emphasis */ + { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */ + { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */ + { .mg = { 0x24, 0x00, 0x0C } }, /* 0 2 */ + { .mg = { 0x2B, 0x00, 0x14 } }, /* 0 3 */ + { .mg = { 0x26, 0x00, 0x00 } }, /* 1 0 */ + { .mg = { 0x2C, 0x00, 0x07 } }, /* 1 1 */ + { .mg = { 0x33, 0x00, 0x0C } }, /* 1 2 */ + { .mg = { 0x2E, 0x00, 0x00 } }, /* 2 0 */ + { .mg = { 0x36, 0x00, 0x09 } }, /* 2 1 */ + { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */ +}; + +static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3 = { + .entries = _icl_mg_phy_ddi_translations_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hbr2_hbr3), +}; + +static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hdmi[] = { + /* HDMI Preset VS Pre-emph */ + { .mg = { 0x1A, 0x0, 0x0 } }, /* 1 400mV 0dB */ + { .mg = { 0x20, 0x0, 0x0 } }, /* 2 500mV 0dB */ + { .mg = { 0x29, 0x0, 0x0 } }, /* 3 650mV 0dB */ + { .mg = { 0x32, 0x0, 0x0 } }, /* 4 800mV 0dB */ + { .mg = { 0x3F, 0x0, 0x0 } }, /* 5 1000mV 0dB */ + { .mg = { 0x3A, 0x0, 0x5 } }, /* 6 Full -1.5 dB */ + { .mg = { 0x39, 0x0, 0x6 } }, /* 7 Full -1.8 dB */ + { .mg = { 0x38, 0x0, 0x7 } }, /* 8 Full -2 dB */ + { .mg = { 0x37, 0x0, 0x8 } }, /* 9 Full -2.5 dB */ + { .mg = { 0x36, 0x0, 0x9 } }, /* 10 Full -3 dB */ +}; + +static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi = { + .entries = _icl_mg_phy_ddi_translations_hdmi, + .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi), + .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi) - 1, +}; + +static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr[] = { + /* VS pre-emp Non-trans mV Pre-emph dB */ + { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */ + { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */ + { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */ + { .dkl = { 0x0, 0x0, 0x18 } }, /* 0 3 400mV 9.5 dB */ + { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */ + { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */ + { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */ + { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */ + { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */ + { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */ +}; + +static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr = { + .entries = _tgl_dkl_phy_ddi_translations_dp_hbr, + .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr), +}; + +static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr2[] = { + /* VS pre-emp Non-trans mV Pre-emph dB */ + { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */ + { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */ + { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */ + { .dkl = { 0x0, 0x0, 0x19 } }, /* 0 3 400mV 9.5 dB */ + { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */ + { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */ + { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */ + { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */ + { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */ + { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */ +}; + +static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr2 = { + .entries = _tgl_dkl_phy_ddi_translations_dp_hbr2, + .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr2), +}; + +static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_hdmi[] = { + /* HDMI Preset VS Pre-emph */ + { .dkl = { 0x7, 0x0, 0x0 } }, /* 1 400mV 0dB */ + { .dkl = { 0x6, 0x0, 0x0 } }, /* 2 500mV 0dB */ + { .dkl = { 0x4, 0x0, 0x0 } }, /* 3 650mV 0dB */ + { .dkl = { 0x2, 0x0, 0x0 } }, /* 4 800mV 0dB */ + { .dkl = { 0x0, 0x0, 0x0 } }, /* 5 1000mV 0dB */ + { .dkl = { 0x0, 0x0, 0x5 } }, /* 6 Full -1.5 dB */ + { .dkl = { 0x0, 0x0, 0x6 } }, /* 7 Full -1.8 dB */ + { .dkl = { 0x0, 0x0, 0x7 } }, /* 8 Full -2 dB */ + { .dkl = { 0x0, 0x0, 0x8 } }, /* 9 Full -2.5 dB */ + { .dkl = { 0x0, 0x0, 0xA } }, /* 10 Full -3 dB */ +}; + +static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_hdmi = { + .entries = _tgl_dkl_phy_ddi_translations_hdmi, + .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi), + .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi) - 1, +}; + +static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7D, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr = { + .entries = _tgl_combo_phy_ddi_translations_dp_hbr, + .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr), +}; + +static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x63, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x61, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7B, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2 = { + .entries = _tgl_combo_phy_ddi_translations_dp_hbr2, + .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr2), +}; + +static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x4F, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x60, 0x32, 0x00, 0x0D } }, /* 350 700 6.0 */ + { .cnl = { 0xC, 0x7F, 0x2D, 0x00, 0x12 } }, /* 350 900 8.2 */ + { .cnl = { 0xC, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x6F, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7D, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ + { .cnl = { 0x6, 0x60, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7F, 0x34, 0x00, 0x0B } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2 = { + .entries = _tgl_uy_combo_phy_ddi_translations_dp_hbr2, + .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_ddi_translations_dp_hbr2), }; /* * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries * that DisplayPort specification requires */ -static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = { - /* VS pre-emp */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */ -}; - -static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = { - /* NT mV Trans mV db */ - { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */ - { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ - { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */ - { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct tgl_dkl_phy_ddi_buf_trans adlp_dkl_phy_dp_ddi_trans_hbr[] = { - /* VS pre-emp Non-trans mV Pre-emph dB */ - { 0x7, 0x0, 0x01 }, /* 0 0 400mV 0 dB */ - { 0x5, 0x0, 0x06 }, /* 0 1 400mV 3.5 dB */ - { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */ - { 0x0, 0x0, 0x17 }, /* 0 3 400mV 9.5 dB */ - { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ - { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */ - { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ - { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ - { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ - { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB */ -}; - -static const struct tgl_dkl_phy_ddi_buf_trans adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3[] = { - /* VS pre-emp Non-trans mV Pre-emph dB */ - { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ - { 0x5, 0x0, 0x04 }, /* 0 1 400mV 3.5 dB */ - { 0x2, 0x0, 0x0A }, /* 0 2 400mV 6 dB */ - { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */ - { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ - { 0x2, 0x0, 0x06 }, /* 1 1 600mV 3.5 dB */ - { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ - { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ - { 0x0, 0x0, 0x09 }, /* 2 1 800mV 3.5 dB */ - { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB */ -}; - -bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table) -{ - return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl; -} +static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = { + /* VS pre-emp */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 0 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 1 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 2 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 3 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 0 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 1 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 2 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 0 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 1 */ +}; -static const struct ddi_buf_trans * -bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); +static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl = { + .entries = _tgl_combo_phy_ddi_translations_edp_hbr2_hobl, + .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_edp_hbr2_hobl), +}; - if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp); - return bdw_ddi_translations_edp; - } else { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); - return bdw_ddi_translations_dp; - } -} +static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x2F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7D, 0x2A, 0x00, 0x15 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x6E, 0x3E, 0x00, 0x01 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; -static const struct ddi_buf_trans * -skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); +static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr = { + .entries = _rkl_combo_phy_ddi_translations_dp_hbr, + .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr), +}; - if (IS_SKL_ULX(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); - return skl_y_ddi_translations_dp; - } else if (IS_SKL_ULT(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); - return skl_u_ddi_translations_dp; - } else { - *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); - return skl_ddi_translations_dp; - } -} +static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x50, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x61, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7F, 0x2E, 0x00, 0x11 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x5F, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x5F, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7E, 0x36, 0x00, 0x09 } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; -static const struct ddi_buf_trans * -kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); +static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3 = { + .entries = _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr2_hbr3), +}; - if (IS_KBL_ULX(dev_priv) || - IS_CFL_ULX(dev_priv) || - IS_CML_ULX(dev_priv)) { - *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); - return kbl_y_ddi_translations_dp; - } else if (IS_KBL_ULT(dev_priv) || - IS_CFL_ULT(dev_priv) || - IS_CML_ULT(dev_priv)) { - *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp); - return kbl_u_ddi_translations_dp; - } else { - *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp); - return kbl_ddi_translations_dp; - } -} +static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x63, 0x30, 0x00, 0x0F } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x63, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x61, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7B, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_dp_hbr2_hbr3 = { + .entries = _adls_combo_phy_ddi_translations_dp_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_dp_hbr2_hbr3), +}; + +static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { .cnl = { 0x9, 0x70, 0x3C, 0x00, 0x03 } }, /* 200 200 0.0 */ + { .cnl = { 0x9, 0x6D, 0x3A, 0x00, 0x05 } }, /* 200 250 1.9 */ + { .cnl = { 0x9, 0x7F, 0x36, 0x00, 0x09 } }, /* 200 300 3.5 */ + { .cnl = { 0x4, 0x59, 0x32, 0x00, 0x0D } }, /* 200 350 4.9 */ + { .cnl = { 0x2, 0x77, 0x3A, 0x00, 0x05 } }, /* 250 250 0.0 */ + { .cnl = { 0x2, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */ + { .cnl = { 0x4, 0x5A, 0x36, 0x00, 0x09 } }, /* 250 350 2.9 */ + { .cnl = { 0x4, 0x5E, 0x3D, 0x00, 0x04 } }, /* 300 300 0.0 */ + { .cnl = { 0x4, 0x65, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ + { .cnl = { 0x4, 0x6F, 0x3A, 0x00, 0x05 } }, /* 350 350 0.0 */ +}; + +static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr2 = { + .entries = _adls_combo_phy_ddi_translations_edp_hbr2, + .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr2), +}; + +static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr3[] = { + /* NT mV Trans mV db */ + { .cnl = { 0xA, 0x5E, 0x34, 0x00, 0x0B } }, /* 350 350 0.0 */ + { .cnl = { 0xA, 0x69, 0x32, 0x00, 0x0D } }, /* 350 500 3.1 */ + { .cnl = { 0xC, 0x74, 0x31, 0x00, 0x0E } }, /* 350 700 6.0 */ + { .cnl = { 0x6, 0x7F, 0x2E, 0x00, 0x11 } }, /* 350 900 8.2 */ + { .cnl = { 0xA, 0x5C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ + { .cnl = { 0xC, 0x7F, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ + { .cnl = { 0x6, 0x7F, 0x33, 0x00, 0x0C } }, /* 500 900 5.1 */ + { .cnl = { 0xC, 0x7F, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ + { .cnl = { 0x6, 0x7F, 0x3C, 0x00, 0x03 } }, /* 600 900 3.5 */ + { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ +}; + +static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr3 = { + .entries = _adls_combo_phy_ddi_translations_edp_hbr3, + .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr3), +}; -static const struct ddi_buf_trans * -skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) +static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr[] = { + /* VS pre-emp Non-trans mV Pre-emph dB */ + { .dkl = { 0x7, 0x0, 0x01 } }, /* 0 0 400mV 0 dB */ + { .dkl = { 0x5, 0x0, 0x06 } }, /* 0 1 400mV 3.5 dB */ + { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */ + { .dkl = { 0x0, 0x0, 0x17 } }, /* 0 3 400mV 9.5 dB */ + { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */ + { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */ + { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */ + { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */ + { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */ + { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */ +}; + +static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr = { + .entries = _adlp_dkl_phy_ddi_translations_dp_hbr, + .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr), +}; + +static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3[] = { + /* VS pre-emp Non-trans mV Pre-emph dB */ + { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */ + { .dkl = { 0x5, 0x0, 0x04 } }, /* 0 1 400mV 3.5 dB */ + { .dkl = { 0x2, 0x0, 0x0A } }, /* 0 2 400mV 6 dB */ + { .dkl = { 0x0, 0x0, 0x18 } }, /* 0 3 400mV 9.5 dB */ + { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */ + { .dkl = { 0x2, 0x0, 0x06 } }, /* 1 1 600mV 3.5 dB */ + { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */ + { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */ + { .dkl = { 0x0, 0x0, 0x09 } }, /* 2 1 800mV 3.5 dB */ + { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */ +}; + +static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3 = { + .entries = _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3), +}; + +bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + return table == &tgl_combo_phy_ddi_translations_edp_hbr2_hobl; +} - if (dev_priv->vbt.edp.low_vswing) { - if (IS_SKL_ULX(dev_priv) || - IS_KBL_ULX(dev_priv) || - IS_CFL_ULX(dev_priv) || - IS_CML_ULX(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); - return skl_y_ddi_translations_edp; - } else if (IS_SKL_ULT(dev_priv) || - IS_KBL_ULT(dev_priv) || - IS_CFL_ULT(dev_priv) || - IS_CML_ULT(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); - return skl_u_ddi_translations_edp; - } else { - *n_entries = ARRAY_SIZE(skl_ddi_translations_edp); - return skl_ddi_translations_edp; - } - } +static const struct intel_ddi_buf_trans * +intel_get_buf_trans(const struct intel_ddi_buf_trans *ddi_translations, int *num_entries) +{ + *num_entries = ddi_translations->num_entries; + return ddi_translations; +} - if (IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) - return kbl_get_buf_trans_dp(encoder, n_entries); +static const struct intel_ddi_buf_trans * +hsw_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) + return intel_get_buf_trans(&hsw_ddi_translations_fdi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&hsw_ddi_translations_hdmi, n_entries); else - return skl_get_buf_trans_dp(encoder, n_entries); + return intel_get_buf_trans(&hsw_ddi_translations_dp, n_entries); } -static const struct ddi_buf_trans * -skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) +static const struct intel_ddi_buf_trans * +bdw_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - if (IS_SKL_ULX(dev_priv) || - IS_KBL_ULX(dev_priv) || - IS_CFL_ULX(dev_priv) || - IS_CML_ULX(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); - return skl_y_ddi_translations_hdmi; - } else { - *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); - return skl_ddi_translations_hdmi; - } + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) + return intel_get_buf_trans(&bdw_ddi_translations_fdi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&bdw_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + i915->vbt.edp.low_vswing) + return intel_get_buf_trans(&bdw_ddi_translations_edp, n_entries); + else + return intel_get_buf_trans(&bdw_ddi_translations_dp, n_entries); } static int skl_buf_trans_num_entries(enum port port, int n_entries) @@ -876,146 +1147,143 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries) return min(n_entries, 9); } -const struct ddi_buf_trans * -intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) +static const struct intel_ddi_buf_trans * +_skl_get_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_ddi_buf_trans *ddi_translations, + int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) { - const struct ddi_buf_trans *ddi_translations = - kbl_get_buf_trans_dp(encoder, n_entries); - *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); - return ddi_translations; - } else if (IS_SKYLAKE(dev_priv)) { - const struct ddi_buf_trans *ddi_translations = - skl_get_buf_trans_dp(encoder, n_entries); - *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); - return ddi_translations; - } else if (IS_BROADWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); - return bdw_ddi_translations_dp; - } else if (IS_HASWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp); - return hsw_ddi_translations_dp; - } - - *n_entries = 0; - return NULL; + ddi_translations = intel_get_buf_trans(ddi_translations, n_entries); + *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); + return ddi_translations; } -const struct ddi_buf_trans * -intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) +static const struct intel_ddi_buf_trans * +skl_y_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) { - const struct ddi_buf_trans *ddi_translations = - skl_get_buf_trans_edp(encoder, n_entries); - *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); - return ddi_translations; - } else if (IS_BROADWELL(dev_priv)) { - return bdw_get_buf_trans_edp(encoder, n_entries); - } else if (IS_HASWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp); - return hsw_ddi_translations_dp; - } - - *n_entries = 0; - return NULL; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + i915->vbt.edp.low_vswing) + return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries); + else + return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_dp, n_entries); } -const struct ddi_buf_trans * -intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, - int *n_entries) +static const struct intel_ddi_buf_trans * +skl_u_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - if (IS_BROADWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi); - return bdw_ddi_translations_fdi; - } else if (IS_HASWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); - return hsw_ddi_translations_fdi; - } + struct drm_i915_private *i915 = to_i915(encoder->base.dev); - *n_entries = 0; - return NULL; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + i915->vbt.edp.low_vswing) + return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries); + else + return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_dp, n_entries); } -const struct ddi_buf_trans * -intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder, - int *n_entries) +static const struct intel_ddi_buf_trans * +skl_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) { - return skl_get_buf_trans_hdmi(dev_priv, n_entries); - } else if (IS_BROADWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - return bdw_ddi_translations_hdmi; - } else if (IS_HASWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); - return hsw_ddi_translations_hdmi; - } + struct drm_i915_private *i915 = to_i915(encoder->base.dev); - *n_entries = 0; - return NULL; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + i915->vbt.edp.low_vswing) + return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries); + else + return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_dp, n_entries); } -static const struct bxt_ddi_buf_trans * -bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) +static const struct intel_ddi_buf_trans * +kbl_y_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); - return bxt_ddi_translations_dp; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + i915->vbt.edp.low_vswing) + return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries); + else + return _skl_get_buf_trans_dp(encoder, &kbl_y_ddi_translations_dp, n_entries); } -static const struct bxt_ddi_buf_trans * -bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) +static const struct intel_ddi_buf_trans * +kbl_u_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); - return bxt_ddi_translations_edp; - } + struct drm_i915_private *i915 = to_i915(encoder->base.dev); - return bxt_get_buf_trans_dp(encoder, n_entries); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + i915->vbt.edp.low_vswing) + return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries); + else + return _skl_get_buf_trans_dp(encoder, &kbl_u_ddi_translations_dp, n_entries); } -static const struct bxt_ddi_buf_trans * -bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) +static const struct intel_ddi_buf_trans * +kbl_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi); - return bxt_ddi_translations_hdmi; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + i915->vbt.edp.low_vswing) + return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries); + else + return _skl_get_buf_trans_dp(encoder, &kbl_ddi_translations_dp, n_entries); } -const struct bxt_ddi_buf_trans * +static const struct intel_ddi_buf_trans * bxt_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return bxt_get_buf_trans_hdmi(encoder, n_entries); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - return bxt_get_buf_trans_edp(encoder, n_entries); - return bxt_get_buf_trans_dp(encoder, n_entries); + return intel_get_buf_trans(&bxt_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + i915->vbt.edp.low_vswing) + return intel_get_buf_trans(&bxt_ddi_translations_edp, n_entries); + else + return intel_get_buf_trans(&bxt_ddi_translations_dp, n_entries); } -static const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; if (voltage == VOLTAGE_INFO_0_85V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V); - return cnl_ddi_translations_hdmi_0_85V; + return intel_get_buf_trans(&cnl_ddi_translations_hdmi_0_85V, + n_entries); } else if (voltage == VOLTAGE_INFO_0_95V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V); - return cnl_ddi_translations_hdmi_0_95V; + return intel_get_buf_trans(&cnl_ddi_translations_hdmi_0_95V, + n_entries); } else if (voltage == VOLTAGE_INFO_1_05V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V); - return cnl_ddi_translations_hdmi_1_05V; + return intel_get_buf_trans(&cnl_ddi_translations_hdmi_1_05V, + n_entries); } else { *n_entries = 1; /* shut up gcc */ MISSING_CASE(voltage); @@ -1023,21 +1291,21 @@ cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) return NULL; } -static const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; if (voltage == VOLTAGE_INFO_0_85V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V); - return cnl_ddi_translations_dp_0_85V; + return intel_get_buf_trans(&cnl_ddi_translations_dp_0_85V, + n_entries); } else if (voltage == VOLTAGE_INFO_0_95V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V); - return cnl_ddi_translations_dp_0_95V; + return intel_get_buf_trans(&cnl_ddi_translations_dp_0_95V, + n_entries); } else if (voltage == VOLTAGE_INFO_1_05V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V); - return cnl_ddi_translations_dp_1_05V; + return intel_get_buf_trans(&cnl_ddi_translations_dp_1_05V, + n_entries); } else { *n_entries = 1; /* shut up gcc */ MISSING_CASE(voltage); @@ -1045,7 +1313,7 @@ cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) return NULL; } -static const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -1053,14 +1321,14 @@ cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) if (dev_priv->vbt.edp.low_vswing) { if (voltage == VOLTAGE_INFO_0_85V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); - return cnl_ddi_translations_edp_0_85V; + return intel_get_buf_trans(&cnl_ddi_translations_edp_0_85V, + n_entries); } else if (voltage == VOLTAGE_INFO_0_95V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); - return cnl_ddi_translations_edp_0_95V; + return intel_get_buf_trans(&cnl_ddi_translations_edp_0_95V, + n_entries); } else if (voltage == VOLTAGE_INFO_1_05V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V); - return cnl_ddi_translations_edp_1_05V; + return intel_get_buf_trans(&cnl_ddi_translations_edp_1_05V, + n_entries); } else { *n_entries = 1; /* shut up gcc */ MISSING_CASE(voltage); @@ -1071,7 +1339,7 @@ cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) } } -const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * cnl_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) @@ -1083,25 +1351,16 @@ cnl_get_buf_trans(struct intel_encoder *encoder, return cnl_get_buf_trans_dp(encoder, n_entries); } -static const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); - return icl_combo_phy_ddi_translations_hdmi; -} - -static const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * icl_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); - return icl_combo_phy_ddi_translations_dp_hbr2; + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + n_entries); } -static const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) @@ -1109,176 +1368,109 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (crtc_state->port_clock > 540000) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); - return icl_combo_phy_ddi_translations_edp_hbr3; + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + n_entries); } else if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); - return icl_combo_phy_ddi_translations_edp_hbr2; - } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) { - *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3); - return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3; - } else if (IS_DG1(dev_priv)) { - *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr); - return dg1_combo_phy_ddi_translations_dp_rbr_hbr; + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, + n_entries); } return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } -const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * icl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return icl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } -static const struct icl_mg_phy_ddi_buf_trans * -icl_get_mg_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi); - return icl_mg_phy_ddi_translations_hdmi; -} - -static const struct icl_mg_phy_ddi_buf_trans * +static const struct intel_ddi_buf_trans * icl_get_mg_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) { - *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3); - return icl_mg_phy_ddi_translations_hbr2_hbr3; + return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hbr2_hbr3, + n_entries); } else { - *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr); - return icl_mg_phy_ddi_translations_rbr_hbr; + return intel_get_buf_trans(&icl_mg_phy_ddi_translations_rbr_hbr, + n_entries); } } -const struct icl_mg_phy_ddi_buf_trans * +static const struct intel_ddi_buf_trans * icl_get_mg_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return icl_get_mg_buf_trans_hdmi(encoder, crtc_state, n_entries); + return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hdmi, n_entries); else return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries); } -static const struct cnl_ddi_buf_trans * -ehl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); - return icl_combo_phy_ddi_translations_hdmi; -} - -static const struct cnl_ddi_buf_trans * -ehl_get_combo_buf_trans_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp); - return ehl_combo_phy_ddi_translations_dp; -} - -static const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); - return icl_combo_phy_ddi_translations_edp_hbr2; - } - - return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); + if (crtc_state->port_clock > 270000) + return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_edp_hbr2, n_entries); + else + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, n_entries); } -const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * ehl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return ehl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + dev_priv->vbt.edp.low_vswing) return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else - return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct cnl_ddi_buf_trans * -jsl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); - return icl_combo_phy_ddi_translations_hdmi; -} - -static const struct cnl_ddi_buf_trans * -jsl_get_combo_buf_trans_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); - return icl_combo_phy_ddi_translations_dp_hbr2; + return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_dp, n_entries); } -static const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (dev_priv->vbt.edp.low_vswing) { - if (crtc_state->port_clock > 270000) { - *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr2); - return jsl_combo_phy_ddi_translations_edp_hbr2; - } else { - *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr); - return jsl_combo_phy_ddi_translations_edp_hbr; - } - } - - return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); + if (crtc_state->port_clock > 270000) + return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr2, n_entries); + else + return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr, n_entries); } -const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * jsl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return jsl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + dev_priv->vbt.edp.low_vswing) return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else - return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, n_entries); } -static const struct cnl_ddi_buf_trans * -tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); - return icl_combo_phy_ddi_translations_hdmi; -} - -static const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) @@ -1286,28 +1478,20 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (crtc_state->port_clock > 270000) { - if (IS_ROCKETLAKE(dev_priv)) { - *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3); - return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3; - } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { - *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2); - return tgl_uy_combo_phy_ddi_translations_dp_hbr2; + if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { + return intel_get_buf_trans(&tgl_uy_combo_phy_ddi_translations_dp_hbr2, + n_entries); } else { - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); - return tgl_combo_phy_ddi_translations_dp_hbr2; + return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr2, + n_entries); } } else { - if (IS_ROCKETLAKE(dev_priv)) { - *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr); - return rkl_combo_phy_ddi_translations_dp_hbr; - } else { - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); - return tgl_combo_phy_ddi_translations_dp_hbr; - } + return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr, + n_entries); } } -static const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) @@ -1316,87 +1500,213 @@ tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (crtc_state->port_clock > 540000) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); - return icl_combo_phy_ddi_translations_edp_hbr3; + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + n_entries); } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl); - return tgl_combo_phy_ddi_translations_edp_hbr2_hobl; + return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, + n_entries); } else if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); - return icl_combo_phy_ddi_translations_edp_hbr2; + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, + n_entries); } return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } -const struct cnl_ddi_buf_trans * +static const struct intel_ddi_buf_trans * tgl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } -static const struct tgl_dkl_phy_ddi_buf_trans * -tgl_get_dkl_buf_trans_hdmi(struct intel_encoder *encoder, +static const struct intel_ddi_buf_trans * +dg1_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); - return tgl_dkl_phy_hdmi_ddi_trans; + if (crtc_state->port_clock > 270000) + return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_hbr2_hbr3, + n_entries); + else + return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_rbr_hbr, + n_entries); +} + +static const struct intel_ddi_buf_trans * +dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (crtc_state->port_clock > 540000) + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + n_entries); + else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) + return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, + n_entries); + else if (dev_priv->vbt.edp.low_vswing) + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, + n_entries); + else + return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } -static const struct tgl_dkl_phy_ddi_buf_trans * +static const struct intel_ddi_buf_trans * +dg1_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return dg1_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct intel_ddi_buf_trans * +rkl_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (crtc_state->port_clock > 270000) + return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries); + else + return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr, n_entries); +} + +static const struct intel_ddi_buf_trans * +rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (crtc_state->port_clock > 540000) { + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + n_entries); + } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { + return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, + n_entries); + } else if (dev_priv->vbt.edp.low_vswing) { + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, + n_entries); + } + + return rkl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct intel_ddi_buf_trans * +rkl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return rkl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return rkl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct intel_ddi_buf_trans * +adls_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (crtc_state->port_clock > 270000) + return intel_get_buf_trans(&adls_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries); + else + return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr, n_entries); +} + +static const struct intel_ddi_buf_trans * +adls_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (crtc_state->port_clock > 540000) + return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr3, n_entries); + else if (i915->vbt.edp.hobl && !intel_dp->hobl_failed) + return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, n_entries); + else if (i915->vbt.edp.low_vswing) + return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr2, n_entries); + else + return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct intel_ddi_buf_trans * +adls_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return adls_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct intel_ddi_buf_trans * tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) { - *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2); - return tgl_dkl_phy_dp_ddi_trans_hbr2; + return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr2, + n_entries); } else { - *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); - return tgl_dkl_phy_dp_ddi_trans; + return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr, + n_entries); } } -const struct tgl_dkl_phy_ddi_buf_trans * +static const struct intel_ddi_buf_trans * tgl_get_dkl_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries); + return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries); else return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries); } -static const struct tgl_dkl_phy_ddi_buf_trans * +static const struct intel_ddi_buf_trans * adlp_get_dkl_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) { - *n_entries = ARRAY_SIZE(adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3); - return adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3; + return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3, + n_entries); + } else { + return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr, + n_entries); } - - *n_entries = ARRAY_SIZE(adlp_dkl_phy_dp_ddi_trans_hbr); - return adlp_dkl_phy_dp_ddi_trans_hbr; } -const struct tgl_dkl_phy_ddi_buf_trans * +static const struct intel_ddi_buf_trans * adlp_get_dkl_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries); + return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries); else return adlp_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries); } @@ -1406,43 +1716,70 @@ int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder, int *default_entry) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + const struct intel_ddi_buf_trans *ddi_translations; int n_entries; - if (DISPLAY_VER(dev_priv) >= 12) { - if (intel_phy_is_combo(dev_priv, phy)) - tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries); - else - tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries); - *default_entry = n_entries - 1; - } else if (DISPLAY_VER(dev_priv) == 11) { - if (intel_phy_is_combo(dev_priv, phy)) - icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries); - else - icl_get_mg_buf_trans_hdmi(encoder, crtc_state, &n_entries); - *default_entry = n_entries - 1; - } else if (IS_CANNONLAKE(dev_priv)) { - cnl_get_buf_trans_hdmi(encoder, &n_entries); - *default_entry = n_entries - 1; - } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - bxt_get_buf_trans_hdmi(encoder, &n_entries); - *default_entry = n_entries - 1; - } else if (DISPLAY_VER(dev_priv) == 9) { - intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); - *default_entry = 8; - } else if (IS_BROADWELL(dev_priv)) { - intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); - *default_entry = 7; - } else if (IS_HASWELL(dev_priv)) { - intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); - *default_entry = 6; - } else { - drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n"); + ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + + if (drm_WARN_ON(&dev_priv->drm, !ddi_translations)) { + *default_entry = 0; return 0; } - if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0)) - return 0; + *default_entry = ddi_translations->hdmi_default_entry; return n_entries; } + +void intel_ddi_buf_trans_init(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (IS_ALDERLAKE_P(i915)) { + if (intel_phy_is_combo(i915, phy)) + encoder->get_buf_trans = tgl_get_combo_buf_trans; + else + encoder->get_buf_trans = adlp_get_dkl_buf_trans; + } else if (IS_ALDERLAKE_S(i915)) { + encoder->get_buf_trans = adls_get_combo_buf_trans; + } else if (IS_ROCKETLAKE(i915)) { + encoder->get_buf_trans = rkl_get_combo_buf_trans; + } else if (IS_DG1(i915)) { + encoder->get_buf_trans = dg1_get_combo_buf_trans; + } else if (DISPLAY_VER(i915) >= 12) { + if (intel_phy_is_combo(i915, phy)) + encoder->get_buf_trans = tgl_get_combo_buf_trans; + else + encoder->get_buf_trans = tgl_get_dkl_buf_trans; + } else if (DISPLAY_VER(i915) == 11) { + if (IS_PLATFORM(i915, INTEL_JASPERLAKE)) + encoder->get_buf_trans = jsl_get_combo_buf_trans; + else if (IS_PLATFORM(i915, INTEL_ELKHARTLAKE)) + encoder->get_buf_trans = ehl_get_combo_buf_trans; + else if (intel_phy_is_combo(i915, phy)) + encoder->get_buf_trans = icl_get_combo_buf_trans; + else + encoder->get_buf_trans = icl_get_mg_buf_trans; + } else if (IS_CANNONLAKE(i915)) { + encoder->get_buf_trans = cnl_get_buf_trans; + } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { + encoder->get_buf_trans = bxt_get_buf_trans; + } else if (IS_CML_ULX(i915) || IS_CFL_ULX(i915) || IS_KBL_ULX(i915)) { + encoder->get_buf_trans = kbl_y_get_buf_trans; + } else if (IS_CML_ULT(i915) || IS_CFL_ULT(i915) || IS_KBL_ULT(i915)) { + encoder->get_buf_trans = kbl_u_get_buf_trans; + } else if (IS_COMETLAKE(i915) || IS_COFFEELAKE(i915) || IS_KABYLAKE(i915)) { + encoder->get_buf_trans = kbl_get_buf_trans; + } else if (IS_SKL_ULX(i915)) { + encoder->get_buf_trans = skl_y_get_buf_trans; + } else if (IS_SKL_ULT(i915)) { + encoder->get_buf_trans = skl_u_get_buf_trans; + } else if (IS_SKYLAKE(i915)) { + encoder->get_buf_trans = skl_get_buf_trans; + } else if (IS_BROADWELL(i915)) { + encoder->get_buf_trans = bdw_get_buf_trans; + } else { + encoder->get_buf_trans = hsw_get_buf_trans; + } +} diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h index 4c2efab38642..05226eb46cd6 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h @@ -12,7 +12,7 @@ struct drm_i915_private; struct intel_encoder; struct intel_crtc_state; -struct ddi_buf_trans { +struct hsw_ddi_buf_trans { u32 trans1; /* balance leg enable, de-emph level */ u32 trans2; /* vref sel, vswing */ u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */ @@ -45,60 +45,26 @@ struct tgl_dkl_phy_ddi_buf_trans { u32 dkl_de_emphasis_control; }; -bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table); +union intel_ddi_buf_trans_entry { + struct hsw_ddi_buf_trans hsw; + struct bxt_ddi_buf_trans bxt; + struct cnl_ddi_buf_trans cnl; + struct icl_mg_phy_ddi_buf_trans mg; + struct tgl_dkl_phy_ddi_buf_trans dkl; +}; + +struct intel_ddi_buf_trans { + const union intel_ddi_buf_trans_entry *entries; + u8 num_entries; + u8 hdmi_default_entry; +}; + +bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table); int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *default_entry); -const struct ddi_buf_trans * -intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries); -const struct ddi_buf_trans * -intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, - int *n_entries); -const struct ddi_buf_trans * -intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder, - int *n_entries); -const struct ddi_buf_trans * -intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries); - -const struct bxt_ddi_buf_trans * -bxt_get_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries); - -const struct tgl_dkl_phy_ddi_buf_trans * -adlp_get_dkl_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries); -const struct cnl_ddi_buf_trans * -tgl_get_combo_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries); -const struct tgl_dkl_phy_ddi_buf_trans * -tgl_get_dkl_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries); -const struct cnl_ddi_buf_trans * -jsl_get_combo_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries); -const struct cnl_ddi_buf_trans * -ehl_get_combo_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries); -const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries); -const struct icl_mg_phy_ddi_buf_trans * -icl_get_mg_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries); - -const struct cnl_ddi_buf_trans * -cnl_get_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries); +void intel_ddi_buf_trans_init(struct intel_encoder *encoder); #endif diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3bad4e00f7be..eec6c9e9cda7 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1914,20 +1914,50 @@ static void intel_dpt_unpin(struct i915_address_space *vm) i915_vma_put(dpt->vma); } +static bool +intel_reuse_initial_plane_obj(struct drm_i915_private *i915, + const struct intel_initial_plane_config *plane_config, + struct drm_framebuffer **fb, + struct i915_vma **vma) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + if (!crtc_state->uapi.active) + continue; + + if (!plane_state->ggtt_vma) + continue; + + if (intel_plane_ggtt_offset(plane_state) == plane_config->base) { + *fb = plane_state->hw.fb; + *vma = plane_state->ggtt_vma; + return true; + } + } + + return false; +} + static void -intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, +intel_find_initial_plane_obj(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { - struct drm_device *dev = intel_crtc->base.dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc *c; - struct drm_plane *primary = intel_crtc->base.primary; - struct drm_plane_state *plane_state = primary->state; - struct intel_plane *intel_plane = to_intel_plane(primary); - struct intel_plane_state *intel_state = - to_intel_plane_state(plane_state); struct intel_crtc_state *crtc_state = - to_intel_crtc_state(intel_crtc->base.state); + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); struct drm_framebuffer *fb; struct i915_vma *vma; @@ -1939,7 +1969,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, if (!plane_config->fb) return; - if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { + if (intel_alloc_initial_plane_obj(crtc, plane_config)) { fb = &plane_config->fb->base; vma = plane_config->vma; goto valid_fb; @@ -1949,25 +1979,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * Failed to alloc the obj, check to see if we should share * an fb with another CRTC instead */ - for_each_crtc(dev, c) { - struct intel_plane_state *state; - - if (c == &intel_crtc->base) - continue; - - if (!to_intel_crtc_state(c->state)->uapi.active) - continue; - - state = to_intel_plane_state(c->primary->state); - if (!state->ggtt_vma) - continue; - - if (intel_plane_ggtt_offset(state) == plane_config->base) { - fb = state->hw.fb; - vma = state->ggtt_vma; - goto valid_fb; - } - } + if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma)) + goto valid_fb; /* * We've failed to reconstruct the BIOS FB. Current display state @@ -1976,7 +1989,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * simplest solution is to just disable the primary plane now and * pretend the BIOS never had it enabled. */ - intel_plane_disable_noatomic(intel_crtc, intel_plane); + intel_plane_disable_noatomic(crtc, plane); if (crtc_state->bigjoiner) { struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc; @@ -1986,40 +1999,38 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, return; valid_fb: - plane_state->rotation = plane_config->rotation; - intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->rotation, - &intel_state->view); + plane_state->uapi.rotation = plane_config->rotation; + intel_fb_fill_view(to_intel_framebuffer(fb), + plane_state->uapi.rotation, &plane_state->view); __i915_vma_pin(vma); - intel_state->ggtt_vma = i915_vma_get(vma); - if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0) - if (vma->fence) - intel_state->flags |= PLANE_HAS_FENCE; + plane_state->ggtt_vma = i915_vma_get(vma); + if (intel_plane_uses_fence(plane_state) && + i915_vma_pin_fence(vma) == 0 && vma->fence) + plane_state->flags |= PLANE_HAS_FENCE; - plane_state->src_x = 0; - plane_state->src_y = 0; - plane_state->src_w = fb->width << 16; - plane_state->src_h = fb->height << 16; + plane_state->uapi.src_x = 0; + plane_state->uapi.src_y = 0; + plane_state->uapi.src_w = fb->width << 16; + plane_state->uapi.src_h = fb->height << 16; - plane_state->crtc_x = 0; - plane_state->crtc_y = 0; - plane_state->crtc_w = fb->width; - plane_state->crtc_h = fb->height; + plane_state->uapi.crtc_x = 0; + plane_state->uapi.crtc_y = 0; + plane_state->uapi.crtc_w = fb->width; + plane_state->uapi.crtc_h = fb->height; if (plane_config->tiling) dev_priv->preserve_bios_swizzle = true; - plane_state->fb = fb; + plane_state->uapi.fb = fb; drm_framebuffer_get(fb); - plane_state->crtc = &intel_crtc->base; - intel_plane_copy_uapi_to_hw_state(intel_state, intel_state, - intel_crtc); + plane_state->uapi.crtc = &crtc->base; + intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc); intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); - atomic_or(to_intel_plane(primary)->frontbuffer_bit, - &to_intel_frontbuffer(fb)->bits); + atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); } unsigned int @@ -2706,10 +2717,10 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) intel_wait_for_vblank(dev_priv, crtc->pipe); } -static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) +static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) { - if (intel_crtc->overlay) - (void) intel_overlay_switch_off(intel_crtc->overlay); + if (crtc->overlay) + (void) intel_overlay_switch_off(crtc->overlay); /* Let userspace switch the overlay on again. In most cases userspace * has to recompute where to put it anyway. @@ -6473,23 +6484,21 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx) { - struct intel_crtc *intel_crtc; - struct intel_encoder *intel_encoder = + struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); - struct drm_crtc *possible_crtc; - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_crtc *crtc = NULL; - struct drm_device *dev = encoder->dev; + struct intel_crtc *possible_crtc; + struct intel_crtc *crtc = NULL; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_mode_config *config = &dev->mode_config; struct drm_atomic_state *state = NULL, *restore_state = NULL; struct drm_connector_state *connector_state; struct intel_crtc_state *crtc_state; - int ret, i = -1; + int ret; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, - encoder->base.id, encoder->name); + encoder->base.base.id, encoder->base.name); old->restore_state = NULL; @@ -6507,9 +6516,9 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, /* See if we already have a CRTC for this connector */ if (connector->state->crtc) { - crtc = connector->state->crtc; + crtc = to_intel_crtc(connector->state->crtc); - ret = drm_modeset_lock(&crtc->mutex, ctx); + ret = drm_modeset_lock(&crtc->base.mutex, ctx); if (ret) goto fail; @@ -6518,17 +6527,17 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, } /* Find an unused one (if possible) */ - for_each_crtc(dev, possible_crtc) { - i++; - if (!(encoder->possible_crtcs & (1 << i))) + for_each_intel_crtc(dev, possible_crtc) { + if (!(encoder->base.possible_crtcs & + drm_crtc_mask(&possible_crtc->base))) continue; - ret = drm_modeset_lock(&possible_crtc->mutex, ctx); + ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx); if (ret) goto fail; - if (possible_crtc->state->enable) { - drm_modeset_unlock(&possible_crtc->mutex); + if (possible_crtc->base.state->enable) { + drm_modeset_unlock(&possible_crtc->base.mutex); continue; } @@ -6547,8 +6556,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, } found: - intel_crtc = to_intel_crtc(crtc); - state = drm_atomic_state_alloc(dev); restore_state = drm_atomic_state_alloc(dev); if (!state || !restore_state) { @@ -6565,11 +6572,11 @@ found: goto fail; } - ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); + ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base); if (ret) goto fail; - crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); + crtc_state = intel_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); goto fail; @@ -6582,15 +6589,15 @@ found: if (ret) goto fail; - ret = intel_modeset_disable_planes(state, crtc); + ret = intel_modeset_disable_planes(state, &crtc->base); if (ret) goto fail; ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); if (!ret) - ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); + ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base)); if (!ret) - ret = drm_atomic_add_affected_planes(restore_state, crtc); + ret = drm_atomic_add_affected_planes(restore_state, &crtc->base); if (ret) { drm_dbg_kms(&dev_priv->drm, "Failed to create a copy of old state to restore: %i\n", @@ -6609,7 +6616,7 @@ found: drm_atomic_state_put(state); /* let the connector get through one full cycle before testing */ - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); + intel_wait_for_vblank(dev_priv, crtc->pipe); return true; fail: @@ -7281,12 +7288,13 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, } if (dev_priv->display.compute_pipe_wm) { - ret = dev_priv->display.compute_pipe_wm(crtc_state); + ret = dev_priv->display.compute_pipe_wm(state, crtc); if (ret) { drm_dbg_kms(&dev_priv->drm, "Target pipe watermarks are invalid\n"); return ret; } + } if (dev_priv->display.compute_intermediate_wm) { @@ -7299,7 +7307,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, * old state and the new state. We can program these * immediately. */ - ret = dev_priv->display.compute_intermediate_wm(crtc_state); + ret = dev_priv->display.compute_intermediate_wm(state, crtc); if (ret) { drm_dbg_kms(&dev_priv->drm, "No valid intermediate pipe watermarks are possible\n"); @@ -11770,7 +11778,7 @@ intel_user_framebuffer_create(struct drm_device *dev, /* object is backed with LMEM for discrete */ i915 = to_i915(obj->base.dev); - if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) { + if (HAS_LMEM(i915) && !i915_gem_object_validates_to_lmem(obj)) { /* object is "remote", not in local memory */ i915_gem_object_put(obj); return ERR_PTR(-EREMOTE); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 88bb05d5c483..d5af5708c9da 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -544,6 +544,11 @@ static int i915_dmc_info(struct seq_file *m, void *unused) seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv))); seq_printf(m, "path: %s\n", dmc->fw_path); + seq_printf(m, "Pipe A fw support: %s\n", + yesno(GRAPHICS_VER(dev_priv) >= 12)); + seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload)); + seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv))); + seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload)); if (!intel_dmc_has_payload(dev_priv)) goto out; @@ -582,7 +587,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) out: seq_printf(m, "program base: 0x%08x\n", - intel_de_read(dev_priv, DMC_PROGRAM(0))); + intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); seq_printf(m, "ssp base: 0x%08x\n", intel_de_read(dev_priv, DMC_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL)); @@ -1225,7 +1230,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused) static void drrs_status_per_crtc(struct seq_file *m, struct drm_device *dev, - struct intel_crtc *intel_crtc) + struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_drrs *drrs = &dev_priv->drrs; @@ -1237,7 +1242,7 @@ static void drrs_status_per_crtc(struct seq_file *m, drm_for_each_connector_iter(connector, &conn_iter) { bool supported = false; - if (connector->state->crtc != &intel_crtc->base) + if (connector->state->crtc != &crtc->base) continue; seq_printf(m, "%s:\n", connector->name); @@ -1252,7 +1257,7 @@ static void drrs_status_per_crtc(struct seq_file *m, seq_puts(m, "\n"); - if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { + if (to_intel_crtc_state(crtc->base.state)->has_drrs) { struct intel_panel *panel; mutex_lock(&drrs->mutex); @@ -1298,16 +1303,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *intel_crtc; + struct intel_crtc *crtc; int active_crtc_cnt = 0; drm_modeset_lock_all(dev); - for_each_intel_crtc(dev, intel_crtc) { - if (intel_crtc->base.state->active) { + for_each_intel_crtc(dev, crtc) { + if (crtc->base.state->active) { active_crtc_cnt++; seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); - drrs_status_per_crtc(m, dev, intel_crtc); + drrs_status_per_crtc(m, dev, crtc); } } drm_modeset_unlock_all(dev); @@ -2064,7 +2069,7 @@ i915_fifo_underrun_reset_write(struct file *filp, size_t cnt, loff_t *ppos) { struct drm_i915_private *dev_priv = filp->private_data; - struct intel_crtc *intel_crtc; + struct intel_crtc *crtc; struct drm_device *dev = &dev_priv->drm; int ret; bool reset; @@ -2076,15 +2081,15 @@ i915_fifo_underrun_reset_write(struct file *filp, if (!reset) return cnt; - for_each_intel_crtc(dev, intel_crtc) { + for_each_intel_crtc(dev, crtc) { struct drm_crtc_commit *commit; struct intel_crtc_state *crtc_state; - ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex); + ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); if (ret) return ret; - crtc_state = to_intel_crtc_state(intel_crtc->base.state); + crtc_state = to_intel_crtc_state(crtc->base.state); commit = crtc_state->uapi.commit; if (commit) { ret = wait_for_completion_interruptible(&commit->hw_done); @@ -2095,12 +2100,12 @@ i915_fifo_underrun_reset_write(struct file *filp, if (!ret && crtc_state->hw.active) { drm_dbg_kms(&dev_priv->drm, "Re-arming FIFO underruns on pipe %c\n", - pipe_name(intel_crtc->pipe)); + pipe_name(crtc->pipe)); - intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state); + intel_crtc_arm_fifo_underrun(crtc, crtc_state); } - drm_modeset_unlock(&intel_crtc->base.mutex); + drm_modeset_unlock(&crtc->base.mutex); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 4298ae684d7d..285380079aab 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -961,8 +961,9 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv) static void assert_dmc_loaded(struct drm_i915_private *dev_priv) { drm_WARN_ONCE(&dev_priv->drm, - !intel_de_read(dev_priv, DMC_PROGRAM(0)), - "DMC program storage start is NULL\n"); + !intel_de_read(dev_priv, + DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), + "DMC program storage start is NULL\n"); drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE), "DMC SSP Base Not fine\n"); drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL), diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 04613864cbe8..d94f361b548b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -48,6 +48,7 @@ struct drm_printer; struct __intel_global_objs_state; +struct intel_ddi_buf_trans; /* * Display related stuff @@ -263,6 +264,9 @@ struct intel_encoder { * Returns whether the port clock is enabled or not. */ bool (*is_clock_enabled)(struct intel_encoder *encoder); + const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries); enum hpd_pin hpd_pin; enum intel_display_power_domain power_domain; /* for communication with audio component; protected by av_mutex */ @@ -310,7 +314,7 @@ struct intel_panel { /* DPCD backlight */ union { struct { - u8 pwmgen_bit_count; + struct drm_edp_backlight_info info; } vesa; struct { bool sdr_uses_aux; @@ -1040,7 +1044,9 @@ struct intel_crtc_state { bool has_psr; bool has_psr2; bool enable_psr2_sel_fetch; + bool req_psr2_sdp_prior_scanline; u32 dc3co_exitline; + u16 su_y_granularity; /* * Frequence the dpll for the port should run at. Differs from the @@ -1493,12 +1499,14 @@ struct intel_psr { bool colorimetry_support; bool psr2_enabled; bool psr2_sel_fetch_enabled; + bool req_psr2_sdp_prior_scanline; u8 sink_sync_latency; ktime_t last_entry_attempt; ktime_t last_exit; bool sink_not_reliable; bool irq_aux_error; - u16 su_x_granularity; + u16 su_w_granularity; + u16 su_y_granularity; u32 dc3co_exitline; u32 dc3co_exit_delay; struct delayed_work dc3co_work; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 97308da28059..f8789d4543bf 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -45,6 +45,10 @@ #define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE +#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 10) +#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 10) +MODULE_FIRMWARE(ADLP_DMC_PATH); + #define ADLS_DMC_PATH DMC_PATH(adls, 2, 01) #define ADLS_DMC_VERSION_REQUIRED DMC_VERSION(2, 1) MODULE_FIRMWARE(ADLS_DMC_PATH); @@ -96,6 +100,7 @@ MODULE_FIRMWARE(BXT_DMC_PATH); #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32 #define DMC_V1_MAX_MMIO_COUNT 8 #define DMC_V3_MAX_MMIO_COUNT 20 +#define DMC_V1_MMIO_START_RANGE 0x80000 struct intel_css_header { /* 0x09 for DMC */ @@ -239,7 +244,7 @@ struct stepping_info { bool intel_dmc_has_payload(struct drm_i915_private *i915) { - return i915->dmc.dmc_payload; + return i915->dmc.dmc_info[DMC_FW_MAIN].payload; } static const struct stepping_info skl_stepping_info[] = { @@ -316,8 +321,8 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) */ void intel_dmc_load_program(struct drm_i915_private *dev_priv) { - u32 *payload = dev_priv->dmc.dmc_payload; - u32 i, fw_size; + struct intel_dmc *dmc = &dev_priv->dmc; + u32 id, i; if (!HAS_DMC(dev_priv)) { drm_err(&dev_priv->drm, @@ -325,26 +330,31 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv) return; } - if (!intel_dmc_has_payload(dev_priv)) { + if (!dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload) { drm_err(&dev_priv->drm, "Tried to program CSR with empty payload\n"); return; } - fw_size = dev_priv->dmc.dmc_fw_size; assert_rpm_wakelock_held(&dev_priv->runtime_pm); preempt_disable(); - for (i = 0; i < fw_size; i++) - intel_uncore_write_fw(&dev_priv->uncore, DMC_PROGRAM(i), - payload[i]); + for (id = 0; id < DMC_FW_MAX; id++) { + for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) { + intel_uncore_write_fw(&dev_priv->uncore, + DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i), + dmc->dmc_info[id].payload[i]); + } + } preempt_enable(); - for (i = 0; i < dev_priv->dmc.mmio_count; i++) { - intel_de_write(dev_priv, dev_priv->dmc.mmioaddr[i], - dev_priv->dmc.mmiodata[i]); + for (id = 0; id < DMC_FW_MAX; id++) { + for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) { + intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i], + dmc->dmc_info[id].mmiodata[i]); + } } dev_priv->dmc.dc_state = 0; @@ -352,62 +362,72 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv) gen9_set_dc_state_debugmask(dev_priv); } +static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info, + const struct stepping_info *si) +{ + if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) || + (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) || + /* + * If we don't find a more specific one from above two checks, we + * then check for the generic one to be sure to work even with + * "broken firmware" + */ + (si->stepping == '*' && si->substepping == fw_info->substepping) || + (fw_info->stepping == '*' && fw_info->substepping == '*')) + return true; + + return false; +} + /* * Search fw_info table for dmc_offset to find firmware binary: num_entries is * already sanitized. */ -static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info, +static void dmc_set_fw_offset(struct intel_dmc *dmc, + const struct intel_fw_info *fw_info, unsigned int num_entries, const struct stepping_info *si, u8 package_ver) { - u32 dmc_offset = DMC_DEFAULT_FW_OFFSET; - unsigned int i; + unsigned int i, id; + + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); for (i = 0; i < num_entries; i++) { - if (package_ver > 1 && fw_info[i].dmc_id != 0) - continue; + id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; - if (fw_info[i].substepping == '*' && - si->stepping == fw_info[i].stepping) { - dmc_offset = fw_info[i].offset; - break; + if (id >= DMC_FW_MAX) { + drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id); + continue; } - if (si->stepping == fw_info[i].stepping && - si->substepping == fw_info[i].substepping) { - dmc_offset = fw_info[i].offset; - break; - } + /* More specific versions come first, so we don't even have to + * check for the stepping since we already found a previous FW + * for this id. + */ + if (dmc->dmc_info[id].present) + continue; - if (fw_info[i].stepping == '*' && - fw_info[i].substepping == '*') { - /* - * In theory we should stop the search as generic - * entries should always come after the more specific - * ones, but let's continue to make sure to work even - * with "broken" firmwares. If we don't find a more - * specific one, then we use this entry - */ - dmc_offset = fw_info[i].offset; + if (fw_info_matches_stepping(&fw_info[i], si)) { + dmc->dmc_info[id].present = true; + dmc->dmc_info[id].dmc_offset = fw_info[i].offset; } } - - return dmc_offset; } static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, - size_t rem_size) + size_t rem_size, u8 dmc_id) { struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; unsigned int header_len_bytes, dmc_header_size, payload_size, i; const u32 *mmioaddr, *mmiodata; - u32 mmio_count, mmio_count_max; + u32 mmio_count, mmio_count_max, start_mmioaddr; u8 *payload; - BUILD_BUG_ON(ARRAY_SIZE(dmc->mmioaddr) < DMC_V3_MAX_MMIO_COUNT || - ARRAY_SIZE(dmc->mmioaddr) < DMC_V1_MAX_MMIO_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT || + ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT); /* * Check if we can access common fields, we will checkc again below @@ -430,6 +450,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, mmio_count_max = DMC_V3_MAX_MMIO_COUNT; /* header_len is in dwords */ header_len_bytes = dmc_header->header_len * 4; + start_mmioaddr = v3->start_mmioaddr; dmc_header_size = sizeof(*v3); } else if (dmc_header->header_ver == 1) { const struct intel_dmc_header_v1 *v1 = @@ -443,6 +464,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, mmio_count = v1->mmio_count; mmio_count_max = DMC_V1_MAX_MMIO_COUNT; header_len_bytes = dmc_header->header_len; + start_mmioaddr = DMC_V1_MMIO_START_RANGE; dmc_header_size = sizeof(*v1); } else { drm_err(&i915->drm, "Unknown DMC fw header version: %u\n", @@ -463,16 +485,11 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, } for (i = 0; i < mmio_count; i++) { - if (mmioaddr[i] < DMC_MMIO_START_RANGE || - mmioaddr[i] > DMC_MMIO_END_RANGE) { - drm_err(&i915->drm, "DMC firmware has wrong mmio address 0x%x\n", - mmioaddr[i]); - return 0; - } - dmc->mmioaddr[i] = _MMIO(mmioaddr[i]); - dmc->mmiodata[i] = mmiodata[i]; + dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); + dmc_info->mmiodata[i] = mmiodata[i]; } - dmc->mmio_count = mmio_count; + dmc_info->mmio_count = mmio_count; + dmc_info->start_mmioaddr = start_mmioaddr; rem_size -= header_len_bytes; @@ -485,14 +502,14 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size); return 0; } - dmc->dmc_fw_size = dmc_header->fw_size; + dmc_info->dmc_fw_size = dmc_header->fw_size; - dmc->dmc_payload = kmalloc(payload_size, GFP_KERNEL); - if (!dmc->dmc_payload) + dmc_info->payload = kmalloc(payload_size, GFP_KERNEL); + if (!dmc_info->payload) return 0; payload = (u8 *)(dmc_header) + header_len_bytes; - memcpy(dmc->dmc_payload, payload, payload_size); + memcpy(dmc_info->payload, payload, payload_size); return header_len_bytes + payload_size; @@ -509,7 +526,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, { struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); u32 package_size = sizeof(struct intel_package_header); - u32 num_entries, max_entries, dmc_offset; + u32 num_entries, max_entries; const struct intel_fw_info *fw_info; if (rem_size < package_size) @@ -545,16 +562,11 @@ parse_dmc_fw_package(struct intel_dmc *dmc, fw_info = (const struct intel_fw_info *) ((u8 *)package_header + sizeof(*package_header)); - dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si, - package_header->header_ver); - if (dmc_offset == DMC_DEFAULT_FW_OFFSET) { - drm_err(&i915->drm, "DMC firmware not supported for %c stepping\n", - si->stepping); - return 0; - } + dmc_set_fw_offset(dmc, fw_info, num_entries, si, + package_header->header_ver); /* dmc_offset is in dwords */ - return package_size + dmc_offset * 4; + return package_size; error_truncated: drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); @@ -606,7 +618,8 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, struct intel_dmc *dmc = &dev_priv->dmc; const struct stepping_info *si = intel_get_stepping_info(dev_priv); u32 readcount = 0; - u32 r; + u32 r, offset; + int id; if (!fw) return; @@ -627,9 +640,19 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, readcount += r; - /* Extract dmc_header information */ - dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount]; - parse_dmc_fw_header(dmc, dmc_header, fw->size - readcount); + for (id = 0; id < DMC_FW_MAX; id++) { + if (!dev_priv->dmc.dmc_info[id].present) + continue; + + offset = readcount + dmc->dmc_info[id].dmc_offset * 4; + if (fw->size - offset < 0) { + drm_err(&dev_priv->drm, "Reading beyond the fw_size\n"); + continue; + } + + dmc_header = (struct intel_dmc_header_base *)&fw->data[offset]; + parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id); + } } static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv) @@ -705,7 +728,11 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) */ intel_dmc_runtime_pm_get(dev_priv); - if (IS_ALDERLAKE_S(dev_priv)) { + if (IS_ALDERLAKE_P(dev_priv)) { + dmc->fw_path = ADLP_DMC_PATH; + dmc->required_version = ADLP_DMC_VERSION_REQUIRED; + dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + } else if (IS_ALDERLAKE_S(dev_priv)) { dmc->fw_path = ADLS_DMC_PATH; dmc->required_version = ADLS_DMC_VERSION_REQUIRED; dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; @@ -827,5 +854,5 @@ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) intel_dmc_ucode_suspend(dev_priv); drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref); - kfree(dev_priv->dmc.dmc_payload); + kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload); } diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index 4c22f567b61b..c3c00ff03869 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -16,17 +16,30 @@ struct drm_i915_private; #define DMC_VERSION_MAJOR(version) ((version) >> 16) #define DMC_VERSION_MINOR(version) ((version) & 0xffff) +enum { + DMC_FW_MAIN = 0, + DMC_FW_PIPEA, + DMC_FW_PIPEB, + DMC_FW_MAX +}; + struct intel_dmc { struct work_struct work; const char *fw_path; u32 required_version; u32 max_fw_size; /* bytes */ - u32 *dmc_payload; - u32 dmc_fw_size; /* dwords */ u32 version; - u32 mmio_count; - i915_reg_t mmioaddr[20]; - u32 mmiodata[20]; + struct dmc_fw_info { + u32 mmio_count; + i915_reg_t mmioaddr[20]; + u32 mmiodata[20]; + u32 dmc_offset; + u32 start_mmioaddr; + u32 dmc_fw_size; /*dwords */ + u32 *payload; + bool present; + } dmc_info[DMC_FW_MAX]; + u32 dc_state; u32 target_dc_state; u32 allowed_dc_mask; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 6cc03b9e4321..5b52beaddada 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3031,9 +3031,6 @@ void intel_read_dp_sdp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, unsigned int type) { - if (encoder->type != INTEL_OUTPUT_DDI) - return; - switch (type) { case DP_SDP_VSC: intel_read_dp_vsc_sdp(encoder, crtc_state, @@ -4741,7 +4738,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, int refresh_rate) { struct intel_dp *intel_dp = dev_priv->drrs.dp; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum drrs_refresh_rate_type index = DRRS_HIGH_RR; if (refresh_rate <= 0) { @@ -4755,7 +4752,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, return; } - if (!intel_crtc) { + if (!crtc) { drm_dbg_kms(&dev_priv->drm, "DRRS: intel_crtc not initialized\n"); return; @@ -5238,7 +5235,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, } intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); - intel_connector->panel.backlight.power = intel_pps_backlight_power; + if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) + intel_connector->panel.backlight.power = intel_pps_backlight_power; intel_panel_setup_backlight(connector, pipe); if (fixed_mode) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 8e9ac9ba1d38..6ac568617ef3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -107,7 +107,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) u8 tcon_cap[4]; ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap)); - if (ret < 0) + if (ret != sizeof(tcon_cap)) return false; if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP)) @@ -137,7 +137,7 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe u8 tmp; u8 buf[2] = { 0 }; - if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) < 0) { + if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) { drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n"); return 0; } @@ -153,7 +153,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe return panel->backlight.max; } - if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) < 0) { + if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, + sizeof(buf)) != sizeof(buf)) { drm_err(&i915->drm, "Failed to read brightness from DPCD\n"); return 0; } @@ -172,7 +173,8 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, buf[0] = level & 0xFF; buf[1] = (level & 0xFF00) >> 8; - if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, 4) < 0) + if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, + sizeof(buf)) != sizeof(buf)) drm_err(dev, "Failed to write brightness level to DPCD\n"); } @@ -203,7 +205,7 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, u8 old_ctrl, ctrl; ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); - if (ret < 0) { + if (ret != 1) { drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret); return; } @@ -221,7 +223,7 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, } if (ctrl != old_ctrl) - if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) < 0) + if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1) drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n"); } @@ -268,153 +270,19 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi } /* VESA backlight callbacks */ -static void set_vesa_backlight_enable(struct intel_dp *intel_dp, bool enable) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 reg_val = 0; - - /* Early return when display use other mechanism to enable backlight. */ - if (!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) - return; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, - ®_val) < 0) { - drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n", - DP_EDP_DISPLAY_CONTROL_REGISTER); - return; - } - if (enable) - reg_val |= DP_EDP_BACKLIGHT_ENABLE; - else - reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); - - if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, - reg_val) != 1) { - drm_dbg_kms(&i915->drm, "Failed to %s aux backlight\n", - enabledisable(enable)); - } -} - -static bool intel_dp_aux_vesa_backlight_dpcd_mode(struct intel_connector *connector) -{ - struct intel_dp *intel_dp = intel_attached_dp(connector); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 mode_reg; - - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_EDP_BACKLIGHT_MODE_SET_REGISTER, - &mode_reg) != 1) { - drm_dbg_kms(&i915->drm, - "Failed to read the DPCD register 0x%x\n", - DP_EDP_BACKLIGHT_MODE_SET_REGISTER); - return false; - } - - return (mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) == - DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; -} - -/* - * Read the current backlight value from DPCD register(s) based - * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported - */ static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, enum pipe unused) { - struct intel_dp *intel_dp = intel_attached_dp(connector); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 read_val[2] = { 0x0 }; - u16 level = 0; - - /* - * If we're not in DPCD control mode yet, the programmed brightness - * value is meaningless and we should assume max brightness - */ - if (!intel_dp_aux_vesa_backlight_dpcd_mode(connector)) - return connector->panel.backlight.max; - - if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, - &read_val, sizeof(read_val)) < 0) { - drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n", - DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); - return 0; - } - level = read_val[0]; - if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) - level = (read_val[0] << 8 | read_val[1]); - - return level; + return connector->panel.backlight.level; } -/* - * Sends the current backlight level over the aux channel, checking if its using - * 8-bit or 16 bit value (MSB and LSB) - */ static void -intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, - u32 level) +intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_dp *intel_dp = intel_attached_dp(connector); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 vals[2] = { 0x0 }; - - vals[0] = level; - - /* Write the MSB and/or LSB */ - if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { - vals[0] = (level & 0xFF00) >> 8; - vals[1] = (level & 0xFF); - } - if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, - vals, sizeof(vals)) < 0) { - drm_dbg_kms(&i915->drm, - "Failed to write aux backlight level\n"); - return; - } -} - -/* - * Set PWM Frequency divider to match desired frequency in vbt. - * The PWM Frequency is calculated as 27Mhz / (F x P). - * - Where F = PWM Frequency Pre-Divider value programmed by field 7:0 of the - * EDP_BACKLIGHT_FREQ_SET register (DPCD Address 00728h) - * - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the - * EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h) - */ -static bool intel_dp_aux_vesa_set_pwm_freq(struct intel_connector *connector) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_dp *intel_dp = intel_attached_dp(connector); - const u8 pn = connector->panel.backlight.edp.vesa.pwmgen_bit_count; - int freq, fxp, f, fxp_actual, fxp_min, fxp_max; - - freq = dev_priv->vbt.backlight.pwm_freq_hz; - if (!freq) { - drm_dbg_kms(&dev_priv->drm, - "Use panel default backlight frequency\n"); - return false; - } - - fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq); - f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255); - fxp_actual = f << pn; - - /* Ensure frequency is within 25% of desired value */ - fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); - fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); - - if (fxp_min > fxp_actual || fxp_actual > fxp_max) { - drm_dbg_kms(&dev_priv->drm, "Actual frequency out of range\n"); - return false; - } + struct intel_panel *panel = &connector->panel; + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - if (drm_dp_dpcd_writeb(&intel_dp->aux, - DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) { - drm_dbg_kms(&dev_priv->drm, - "Failed to write aux backlight freq\n"); - return false; - } - return true; + drm_edp_backlight_set_level(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } static void @@ -422,159 +290,46 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_dp *intel_dp = intel_attached_dp(connector); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_panel *panel = &connector->panel; - u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode; - u8 pwmgen_bit_count = panel->backlight.edp.vesa.pwmgen_bit_count; - - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { - drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n", - DP_EDP_BACKLIGHT_MODE_SET_REGISTER); - return; - } - - new_dpcd_buf = dpcd_buf; - edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; - - switch (edp_backlight_mode) { - case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: - case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: - case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: - new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; - new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; - - if (drm_dp_dpcd_writeb(&intel_dp->aux, - DP_EDP_PWMGEN_BIT_COUNT, - pwmgen_bit_count) < 0) - drm_dbg_kms(&i915->drm, - "Failed to write aux pwmgen bit count\n"); - - break; - - /* Do nothing when it is already DPCD mode */ - case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: - default: - break; - } - - if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP) - if (intel_dp_aux_vesa_set_pwm_freq(connector)) - new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; - - if (new_dpcd_buf != dpcd_buf) { - if (drm_dp_dpcd_writeb(&intel_dp->aux, - DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { - drm_dbg_kms(&i915->drm, - "Failed to write aux backlight mode\n"); - } - } + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - intel_dp_aux_vesa_set_backlight(conn_state, level); - set_vesa_backlight_enable(intel_dp, true); + drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) { - set_vesa_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)), - false); -} - -static u32 intel_dp_aux_vesa_calc_max_backlight(struct intel_connector *connector) -{ - struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct intel_panel *panel = &connector->panel; - u32 max_backlight = 0; - int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1; - u8 pn, pn_min, pn_max; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT, &pn) == 1) { - pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - max_backlight = (1 << pn) - 1; - } - - /* Find desired value of (F x P) - * Note that, if F x P is out of supported range, the maximum value or - * minimum value will applied automatically. So no need to check that. - */ - freq = i915->vbt.backlight.pwm_freq_hz; - drm_dbg_kms(&i915->drm, "VBT defined backlight frequency %u Hz\n", - freq); - if (!freq) { - drm_dbg_kms(&i915->drm, - "Use panel default backlight frequency\n"); - return max_backlight; - } - - fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq); - - /* Use highest possible value of Pn for more granularity of brightness - * adjustment while satifying the conditions below. - * - Pn is in the range of Pn_min and Pn_max - * - F is in the range of 1 and 255 - * - FxP is within 25% of desired value. - * Note: 25% is arbitrary value and may need some tweak. - */ - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) { - drm_dbg_kms(&i915->drm, - "Failed to read pwmgen bit count cap min\n"); - return max_backlight; - } - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) { - drm_dbg_kms(&i915->drm, - "Failed to read pwmgen bit count cap max\n"); - return max_backlight; - } - pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - - fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); - fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); - if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) { - drm_dbg_kms(&i915->drm, - "VBT defined backlight frequency out of range\n"); - return max_backlight; - } - - for (pn = pn_max; pn >= pn_min; pn--) { - f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255); - fxp_actual = f << pn; - if (fxp_min <= fxp_actual && fxp_actual <= fxp_max) - break; - } - - drm_dbg_kms(&i915->drm, "Using eDP pwmgen bit count of %d\n", pn); - if (drm_dp_dpcd_writeb(&intel_dp->aux, - DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) { - drm_dbg_kms(&i915->drm, - "Failed to write aux pwmgen bit count\n"); - return max_backlight; - } - panel->backlight.edp.vesa.pwmgen_bit_count = pn; - - max_backlight = (1 << pn) - 1; + struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - return max_backlight; + drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info); } -static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, - enum pipe pipe) +static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe) { + struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_panel *panel = &connector->panel; + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u16 current_level; + u8 current_mode; + int ret; - panel->backlight.max = intel_dp_aux_vesa_calc_max_backlight(connector); - if (!panel->backlight.max) - return -ENODEV; + ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info, + i915->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd, + ¤t_level, ¤t_mode); + if (ret < 0) + return ret; + panel->backlight.max = panel->backlight.edp.vesa.info.max; panel->backlight.min = 0; - panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector, pipe); - panel->backlight.enabled = intel_dp_aux_vesa_backlight_dpcd_mode(connector) && - panel->backlight.level != 0; + if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { + panel->backlight.level = current_level; + panel->backlight.enabled = panel->backlight.level != 0; + } else { + panel->backlight.level = panel->backlight.max; + panel->backlight.enabled = false; + } return 0; } @@ -585,16 +340,12 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); - /* Check the eDP Display control capabilities registers to determine if - * the panel can support backlight control over the aux channel. - * - * TODO: We currently only support AUX only backlight configurations, not backlights which + /* TODO: We currently only support AUX only backlight configurations, not backlights which * require a mix of PWM and AUX controls to work. In the mean time, these machines typically * work just fine using normal PWM controls anyway. */ - if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && - (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && - (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) { + if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && + drm_edp_backlight_supported(intel_dp->edp_dpcd)) { drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); return true; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index b170e272bdee..3661cd19ce48 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -308,9 +308,9 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, * connector */ if (new_crtc) { - struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc); struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, intel_crtc); + intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state || !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) || @@ -835,13 +835,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); - if (DISPLAY_VER(dev_priv) <= 12) { - ret = intel_dp_hdcp_init(dig_port, intel_connector); - if (ret) - drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n", - connector->name, connector->base.id); - } - + ret = intel_dp_hdcp_init(dig_port, intel_connector); + if (ret) + drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n", + connector->name, connector->base.id); /* * Reuse the prop from the SST connector because we're * not allowed to create new props after device registration. diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 1847a161cb37..82effb64a3b9 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -104,7 +104,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) int i; u32 fbc_ctl; - /* Note: fbc.threshold == 1 for i8xx */ + /* Note: fbc.limit == 1 for i8xx */ cfb_pitch = params->cfb_size / FBC_LL_SIZE; if (params->fb.stride < cfb_pitch) cfb_pitch = params->fb.stride; @@ -148,16 +148,35 @@ static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; } +static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915) +{ + const struct intel_fbc_reg_params *params = &i915->fbc.params; + int limit = i915->fbc.limit; + + if (params->fb.format->cpp[0] == 2) + limit <<= 1; + + switch (limit) { + default: + MISSING_CASE(limit); + fallthrough; + case 1: + return DPFC_CTL_LIMIT_1X; + case 2: + return DPFC_CTL_LIMIT_2X; + case 4: + return DPFC_CTL_LIMIT_4X; + } +} + static void g4x_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; - if (params->fb.format->cpp[0] == 2) - dpfc_ctl |= DPFC_CTL_LIMIT_2X; - else - dpfc_ctl |= DPFC_CTL_LIMIT_1X; + + dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); if (params->fence_id >= 0) { dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; @@ -235,24 +254,10 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; - int threshold = dev_priv->fbc.threshold; dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); - if (params->fb.format->cpp[0] == 2) - threshold++; - switch (threshold) { - case 4: - case 3: - dpfc_ctl |= DPFC_CTL_LIMIT_4X; - break; - case 2: - dpfc_ctl |= DPFC_CTL_LIMIT_2X; - break; - case 1: - dpfc_ctl |= DPFC_CTL_LIMIT_1X; - break; - } + dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); if (params->fence_id >= 0) { dpfc_ctl |= DPFC_CTL_FENCE_EN; @@ -300,7 +305,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; - int threshold = dev_priv->fbc.threshold; /* Display WA #0529: skl, kbl, bxt. */ if (DISPLAY_VER(dev_priv) == 9) { @@ -318,21 +322,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) if (IS_IVYBRIDGE(dev_priv)) dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); - if (params->fb.format->cpp[0] == 2) - threshold++; - - switch (threshold) { - case 4: - case 3: - dpfc_ctl |= DPFC_CTL_LIMIT_4X; - break; - case 2: - dpfc_ctl |= DPFC_CTL_LIMIT_2X; - break; - case 1: - dpfc_ctl |= DPFC_CTL_LIMIT_1X; - break; - } + dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); if (params->fence_id >= 0) { dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; @@ -433,13 +423,8 @@ static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915) return BIT_ULL(32); } -static int find_compression_threshold(struct drm_i915_private *dev_priv, - struct drm_mm_node *node, - unsigned int size, - unsigned int fb_cpp) +static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv) { - int compression_threshold = 1; - int ret; u64 end; /* The FBC hardware for BDW/SKL doesn't have access to the stolen @@ -452,51 +437,69 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, else end = U64_MAX; - end = min(end, intel_fbc_cfb_base_max(dev_priv)); + return min(end, intel_fbc_cfb_base_max(dev_priv)); +} - /* HACK: This code depends on what we will do in *_enable_fbc. If that - * code changes, this code needs to change as well. - * - * The enable_fbc code will attempt to use one of our 2 compression - * thresholds, therefore, in that case, we only have 1 resort. +static int intel_fbc_max_limit(struct drm_i915_private *dev_priv, int fb_cpp) +{ + /* + * FIXME: FBC1 can have arbitrary cfb stride, + * so we could support different compression ratios. */ + if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) + return 1; + + /* WaFbcOnly1to1Ratio:ctg */ + if (IS_G4X(dev_priv)) + return 1; + + /* FBC2 can only do 1:1, 1:2, 1:4 */ + return fb_cpp == 2 ? 2 : 4; +} + +static int find_compression_limit(struct drm_i915_private *dev_priv, + unsigned int size, + unsigned int fb_cpp) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + u64 end = intel_fbc_stolen_end(dev_priv); + int ret, limit = 1; /* Try to over-allocate to reduce reallocations and fragmentation. */ - ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, - 4096, 0, end); + ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, + size <<= 1, 4096, 0, end); if (ret == 0) - return compression_threshold; + return limit; -again: - /* HW's ability to limit the CFB is 1:4 */ - if (compression_threshold > 4 || - (fb_cpp == 2 && compression_threshold == 2)) - return 0; - - ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, - 4096, 0, end); - if (ret && DISPLAY_VER(dev_priv) <= 4) { - return 0; - } else if (ret) { - compression_threshold <<= 1; - goto again; - } else { - return compression_threshold; + for (; limit <= intel_fbc_max_limit(dev_priv, fb_cpp); limit <<= 1) { + ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, + size >>= 1, 4096, 0, end); + if (ret == 0) + return limit; } + + return 0; } static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, unsigned int size, unsigned int fb_cpp) { struct intel_fbc *fbc = &dev_priv->fbc; - struct drm_mm_node *compressed_llb; int ret; drm_WARN_ON(&dev_priv->drm, drm_mm_node_allocated(&fbc->compressed_fb)); + drm_WARN_ON(&dev_priv->drm, + drm_mm_node_allocated(&fbc->compressed_llb)); - ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, - size, fb_cpp); + if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { + ret = i915_gem_stolen_insert_node(dev_priv, &fbc->compressed_llb, + 4096, 4096); + if (ret) + goto err; + } + + ret = find_compression_limit(dev_priv, size, fb_cpp); if (!ret) goto err_llb; else if (ret > 1) { @@ -504,51 +507,46 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); } - fbc->threshold = ret; + fbc->limit = ret; - if (DISPLAY_VER(dev_priv) >= 5) + drm_dbg_kms(&dev_priv->drm, + "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", + fbc->compressed_fb.size, fbc->limit); + + return 0; + +err_llb: + if (drm_mm_node_allocated(&fbc->compressed_llb)) + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); +err: + if (drm_mm_initialized(&dev_priv->mm.stolen)) + drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + return -ENOSPC; +} + +static void intel_fbc_program_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (DISPLAY_VER(dev_priv) >= 5) { intel_de_write(dev_priv, ILK_DPFC_CB_BASE, fbc->compressed_fb.start); - else if (IS_GM45(dev_priv)) { + } else if (IS_GM45(dev_priv)) { intel_de_write(dev_priv, DPFC_CB_BASE, fbc->compressed_fb.start); } else { - compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); - if (!compressed_llb) - goto err_fb; - - ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, - 4096, 4096); - if (ret) - goto err_fb; - - fbc->compressed_llb = compressed_llb; - GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, fbc->compressed_fb.start, U32_MAX)); GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, - fbc->compressed_llb->start, + fbc->compressed_llb.start, U32_MAX)); + intel_de_write(dev_priv, FBC_CFB_BASE, dev_priv->dsm.start + fbc->compressed_fb.start); intel_de_write(dev_priv, FBC_LL_BASE, - dev_priv->dsm.start + compressed_llb->start); + dev_priv->dsm.start + fbc->compressed_llb.start); } - - drm_dbg_kms(&dev_priv->drm, - "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", - fbc->compressed_fb.size, fbc->threshold); - - return 0; - -err_fb: - kfree(compressed_llb); - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); -err_llb: - if (drm_mm_initialized(&dev_priv->mm.stolen)) - drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); - return -ENOSPC; } static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) @@ -558,15 +556,10 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) return; - if (!drm_mm_node_allocated(&fbc->compressed_fb)) - return; - - if (fbc->compressed_llb) { - i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); - kfree(fbc->compressed_llb); - } - - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); + if (drm_mm_node_allocated(&fbc->compressed_llb)) + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); + if (drm_mm_node_allocated(&fbc->compressed_fb)) + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); } void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) @@ -753,7 +746,7 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) struct intel_fbc *fbc = &dev_priv->fbc; return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > - fbc->compressed_fb.size * fbc->threshold; + fbc->compressed_fb.size * fbc->limit; } static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv) @@ -763,7 +756,7 @@ static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv) if ((DISPLAY_VER(dev_priv) == 9) && cache->fb.modifier != I915_FORMAT_MOD_X_TILED) - return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->limit) * 8; else return 0; } @@ -1302,6 +1295,8 @@ void intel_fbc_enable(struct intel_atomic_state *state, fbc->no_fbc_reason = "FBC enabled but not active yet\n"; fbc->crtc = crtc; + + intel_fbc_program_cfb(dev_priv); out: mutex_unlock(&fbc->lock); } diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 4af40229f5ec..df05d285f0bd 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -335,32 +335,43 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) * fbcon), so we just find the biggest and use that. */ static bool intel_fbdev_init_bios(struct drm_device *dev, - struct intel_fbdev *ifbdev) + struct intel_fbdev *ifbdev) { struct drm_i915_private *i915 = to_i915(dev); struct intel_framebuffer *fb = NULL; - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; + struct intel_crtc *crtc; unsigned int max_size = 0; /* Find the largest fb */ - for_each_crtc(dev, crtc) { + for_each_intel_crtc(dev, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); struct drm_i915_gem_object *obj = - intel_fb_obj(crtc->primary->state->fb); - intel_crtc = to_intel_crtc(crtc); + intel_fb_obj(plane_state->uapi.fb); - if (!crtc->state->active || !obj) { + if (!crtc_state->uapi.active) { drm_dbg_kms(&i915->drm, - "pipe %c not active or no fb, skipping\n", - pipe_name(intel_crtc->pipe)); + "[CRTC:%d:%s] not active, skipping\n", + crtc->base.base.id, crtc->base.name); + continue; + } + + if (!obj) { + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] no fb, skipping\n", + plane->base.base.id, plane->base.name); continue; } if (obj->base.size > max_size) { drm_dbg_kms(&i915->drm, - "found possible fb from plane %c\n", - pipe_name(intel_crtc->pipe)); - fb = to_intel_framebuffer(crtc->primary->state->fb); + "found possible fb from [PLANE:%d:%s]\n", + plane->base.base.id, plane->base.name); + fb = to_intel_framebuffer(plane_state->uapi.fb); max_size = obj->base.size; } } @@ -372,60 +383,62 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, } /* Now make sure all the pipes will fit into it */ - for_each_crtc(dev, crtc) { + for_each_intel_crtc(dev, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); unsigned int cur_size; - intel_crtc = to_intel_crtc(crtc); - - if (!crtc->state->active) { + if (!crtc_state->uapi.active) { drm_dbg_kms(&i915->drm, - "pipe %c not active, skipping\n", - pipe_name(intel_crtc->pipe)); + "[CRTC:%d:%s] not active, skipping\n", + crtc->base.base.id, crtc->base.name); continue; } - drm_dbg_kms(&i915->drm, "checking plane %c for BIOS fb\n", - pipe_name(intel_crtc->pipe)); + drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n", + plane->base.base.id, plane->base.name); /* * See if the plane fb we found above will fit on this * pipe. Note we need to use the selected fb's pitch and bpp * rather than the current pipe's, since they differ. */ - cur_size = crtc->state->adjusted_mode.crtc_hdisplay; + cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay; cur_size = cur_size * fb->base.format->cpp[0]; if (fb->base.pitches[0] < cur_size) { drm_dbg_kms(&i915->drm, - "fb not wide enough for plane %c (%d vs %d)\n", - pipe_name(intel_crtc->pipe), + "fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n", + plane->base.base.id, plane->base.name, cur_size, fb->base.pitches[0]); fb = NULL; break; } - cur_size = crtc->state->adjusted_mode.crtc_vdisplay; + cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay; cur_size = intel_fb_align_height(&fb->base, 0, cur_size); cur_size *= fb->base.pitches[0]; drm_dbg_kms(&i915->drm, - "pipe %c area: %dx%d, bpp: %d, size: %d\n", - pipe_name(intel_crtc->pipe), - crtc->state->adjusted_mode.crtc_hdisplay, - crtc->state->adjusted_mode.crtc_vdisplay, + "[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n", + crtc->base.base.id, crtc->base.name, + crtc_state->uapi.adjusted_mode.crtc_hdisplay, + crtc_state->uapi.adjusted_mode.crtc_vdisplay, fb->base.format->cpp[0] * 8, cur_size); if (cur_size > max_size) { drm_dbg_kms(&i915->drm, - "fb not big enough for plane %c (%d vs %d)\n", - pipe_name(intel_crtc->pipe), + "fb not big enough for [PLANE:%d:%s] (%d vs %d)\n", + plane->base.base.id, plane->base.name, cur_size, max_size); fb = NULL; break; } drm_dbg_kms(&i915->drm, - "fb big enough for plane %c (%d >= %d)\n", - pipe_name(intel_crtc->pipe), + "fb big enough [PLANE:%d:%s] (%d >= %d)\n", + plane->base.base.id, plane->base.name, max_size, cur_size); } @@ -441,15 +454,20 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, drm_framebuffer_get(&ifbdev->fb->base); /* Final pass to check if any active pipes don't have fbs */ - for_each_crtc(dev, crtc) { - intel_crtc = to_intel_crtc(crtc); - - if (!crtc->state->active) + for_each_intel_crtc(dev, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + if (!crtc_state->uapi.active) continue; - drm_WARN(dev, !crtc->primary->state->fb, - "re-used BIOS config but lost an fb on crtc %d\n", - crtc->base.id); + drm_WARN(dev, !plane_state->uapi.fb, + "re-used BIOS config but lost an fb on [PLANE:%d:%s]\n", + plane->base.base.id, plane->base.name); } diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index cef1061fd6cb..e10b9cd8e86e 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -4,7 +4,6 @@ */ #include "intel_atomic.h" #include "intel_ddi.h" -#include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fdi.h" @@ -96,10 +95,10 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, } } -int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, - struct intel_crtc_state *pipe_config) +int ilk_fdi_compute_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { - struct drm_device *dev = intel_crtc->base.dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *i915 = to_i915(dev); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int lane, link_bw, fdi_dotclock, ret; @@ -125,7 +124,7 @@ retry: intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, link_bw, &pipe_config->fdi_m_n, false, false); - ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); + ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config); if (ret == -EDEADLK) return ret; @@ -569,9 +568,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, u32 temp, i, rx_ctl_val; int n_entries; - intel_ddi_get_buf_trans_fdi(dev_priv, &n_entries); + encoder->get_buf_trans(encoder, crtc_state, &n_entries); - intel_prepare_dp_ddi_buffers(encoder, crtc_state); + hsw_prepare_dp_ddi_buffers(encoder, crtc_state); /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the * mode set "sequence for CRT port" document: @@ -691,9 +690,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - enum pipe pipe = intel_crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp; @@ -726,11 +725,11 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) } } -void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc) +void ilk_fdi_pll_disable(struct intel_crtc *crtc) { - struct drm_device *dev = intel_crtc->base.dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = intel_crtc->pipe; + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp; diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 7e51c98c475e..852af2b23540 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -270,8 +270,8 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); int i; @@ -286,13 +286,13 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, intel_de_write(dev_priv, reg, val); for (i = 0; i < len; i += 4) { - intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), + intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0); + intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; @@ -349,8 +349,8 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); int i; @@ -368,13 +368,13 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, intel_de_write(dev_priv, reg, val); for (i = 0; i < len; i += 4) { - intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), + intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0); + intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; @@ -427,8 +427,8 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); int i; @@ -444,13 +444,13 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, for (i = 0; i < len; i += 4) { intel_de_write(dev_priv, - VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + VLV_TVIDEO_DIP_DATA(crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) intel_de_write(dev_priv, - VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); + VLV_TVIDEO_DIP_DATA(crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; @@ -1040,10 +1040,10 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; - i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); @@ -1099,9 +1099,9 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); assert_hdmi_port_disabled(intel_hdmi); @@ -1148,9 +1148,9 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); @@ -1465,14 +1465,12 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector, { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_crtc *crtc = connector->base.state->crtc; - struct intel_crtc *intel_crtc = container_of(crtc, - struct intel_crtc, base); + struct intel_crtc *crtc = to_intel_crtc(connector->base.state->crtc); u32 scanline; int ret; for (;;) { - scanline = intel_de_read(dev_priv, PIPEDSL(intel_crtc->pipe)); + scanline = intel_de_read(dev_priv, PIPEDSL(crtc->pipe)); if (scanline > 100 && scanline < 200) break; usleep_range(25, 50); diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 7f40e9f60bc2..e0381b0fce91 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -411,12 +411,12 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, struct intel_connector *intel_connector = lvds_encoder->attached_connector; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); unsigned int lvds_bpp; int ret; /* Should never happen!! */ - if (DISPLAY_VER(dev_priv) < 4 && intel_crtc->pipe == 0) { + if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) { drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 77865cf6641f..9643624fe160 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -265,32 +265,44 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) return val; } -static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) +static void intel_dp_get_su_granularity(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u16 val; ssize_t r; + u16 w; + u8 y; + + /* If sink don't have specific granularity requirements set legacy ones */ + if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) { + /* As PSR2 HW sends full lines, we do not care about x granularity */ + w = 4; + y = 4; + goto exit; + } - /* - * Returning the default X granularity if granularity not required or - * if DPCD read fails - */ - if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) - return 4; - - r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); + r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2); if (r != 2) drm_dbg_kms(&i915->drm, "Unable to read DP_PSR2_SU_X_GRANULARITY\n"); - /* * Spec says that if the value read is 0 the default granularity should * be used instead. */ - if (r != 2 || val == 0) - val = 4; + if (r != 2 || w == 0) + w = 4; - return val; + r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1); + if (r != 1) { + drm_dbg_kms(&i915->drm, + "Unable to read DP_PSR2_SU_Y_GRANULARITY\n"); + y = 4; + } + if (y == 0) + y = 1; + +exit: + intel_dp->psr.su_w_granularity = w; + intel_dp->psr.su_y_granularity = y; } void intel_psr_init_dpcd(struct intel_dp *intel_dp) @@ -346,8 +358,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) if (intel_dp->psr.sink_psr2_support) { intel_dp->psr.colorimetry_support = intel_dp_get_colorimetry_status(intel_dp); - intel_dp->psr.su_x_granularity = - intel_dp_get_su_x_granulartiy(intel_dp); + intel_dp_get_su_granularity(intel_dp); } } } @@ -407,6 +418,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) dpcd_val |= DP_PSR_CRC_VERIFICATION; } + if (intel_dp->psr.req_psr2_sdp_prior_scanline) + dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE; + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); @@ -531,7 +545,34 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1); val |= intel_psr2_get_tp_time(intel_dp); - if (DISPLAY_VER(dev_priv) >= 12) { + /* Wa_22012278275:adlp */ + if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D1)) { + static const u8 map[] = { + 2, /* 5 lines */ + 1, /* 6 lines */ + 0, /* 7 lines */ + 3, /* 8 lines */ + 6, /* 9 lines */ + 5, /* 10 lines */ + 4, /* 11 lines */ + 7, /* 12 lines */ + }; + /* + * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see + * comments bellow for more information + */ + u32 tmp, lines = 7; + + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + + tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; + tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT; + val |= tmp; + + tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; + tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT; + val |= tmp; + } else if (DISPLAY_VER(dev_priv) >= 12) { /* * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default * values from BSpec. In order to setting an optimal power @@ -547,6 +588,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FAST_WAKE(7); } + if (intel_dp->psr.req_psr2_sdp_prior_scanline) + val |= EDP_PSR2_SU_SDP_SCANLINE; + if (intel_dp->psr.psr2_sel_fetch_enabled) { /* WA 1408330847 */ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) || @@ -689,6 +733,10 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) return; + /* Wa_16011303918:adlp */ + if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0)) + return; + /* * DC3CO Exit time 200us B.Spec 49196 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 @@ -742,6 +790,63 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, return crtc_state->enable_psr2_sel_fetch = true; } +static bool psr2_granularity_check(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; + const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; + u16 y_granularity = 0; + + /* PSR2 HW only send full lines so we only need to validate the width */ + if (crtc_hdisplay % intel_dp->psr.su_w_granularity) + return false; + + if (crtc_vdisplay % intel_dp->psr.su_y_granularity) + return false; + + /* HW tracking is only aligned to 4 lines */ + if (!crtc_state->enable_psr2_sel_fetch) + return intel_dp->psr.su_y_granularity == 4; + + /* + * For SW tracking we can adjust the y to match sink requirement if + * multiple of 4 + */ + if (intel_dp->psr.su_y_granularity <= 2) + y_granularity = 4; + else if ((intel_dp->psr.su_y_granularity % 4) == 0) + y_granularity = intel_dp->psr.su_y_granularity; + + if (y_granularity == 0 || crtc_vdisplay % y_granularity) + return false; + + crtc_state->su_y_granularity = y_granularity; + return true; +} + +static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 hblank_total, hblank_ns, req_ns; + + hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; + hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock); + + /* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */ + req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000); + + if ((hblank_ns - req_ns) > 100) + return true; + + if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b) + return false; + + crtc_state->req_psr2_sdp_prior_scanline = true; + return true; +} + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -824,19 +929,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - /* - * HW sends SU blocks of size four scan lines, which means the starting - * X coordinate and Y granularity requirements will always be met. We - * only need to validate the SU block width is a multiple of - * x granularity. - */ - if (crtc_hdisplay % intel_dp->psr.su_x_granularity) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 not enabled, hdisplay(%d) not multiple of %d\n", - crtc_hdisplay, intel_dp->psr.su_x_granularity); - return false; - } - if (HAS_PSR2_SEL_FETCH(dev_priv)) { if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && !HAS_PSR_HW_TRACKING(dev_priv)) { @@ -853,6 +945,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (!psr2_granularity_check(intel_dp, crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n"); + return false; + } + if (!crtc_state->enable_psr2_sel_fetch && (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { drm_dbg_kms(&dev_priv->drm, @@ -862,6 +959,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"); + return false; + } + + /* Wa_16011303918:adlp */ + if (crtc_state->vrr.enable && + IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, not compatible with HW stepping + VRR\n"); + return false; + } + tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); return true; } @@ -1048,6 +1159,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, intel_dp->psr.psr2_sel_fetch_enabled ? IGNORE_PSR2_HW_TRACKING : 0); + + /* Wa_16011168373:adlp */ + if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) && + intel_dp->psr.psr2_enabled) + intel_de_rmw(dev_priv, + TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), + TRANS_SET_CONTEXT_LATENCY_MASK, + TRANS_SET_CONTEXT_LATENCY_VALUE(1)); } static bool psr_interrupt_error_check(struct intel_dp *intel_dp) @@ -1101,6 +1220,8 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, intel_dp->psr.dc3co_exit_delay = val; intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline; intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; + intel_dp->psr.req_psr2_sdp_prior_scanline = + crtc_state->req_psr2_sdp_prior_scanline; if (!psr_interrupt_error_check(intel_dp)) return; @@ -1225,6 +1346,13 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); + /* Wa_16011168373:adlp */ + if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) && + intel_dp->psr.psr2_enabled) + intel_de_rmw(dev_priv, + TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), + TRANS_SET_CONTEXT_LATENCY_MASK, 0); + /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); @@ -1432,6 +1560,16 @@ static void clip_area_update(struct drm_rect *overlap_damage_area, overlap_damage_area->y2 = damage_area->y2; } +static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state, + struct drm_rect *pipe_clip) +{ + const u16 y_alignment = crtc_state->su_y_granularity; + + pipe_clip->y1 -= pipe_clip->y1 % y_alignment; + if (pipe_clip->y2 % y_alignment) + pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment; +} + int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -1540,10 +1678,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if (full_update) goto skip_sel_fetch_set_loop; - /* It must be aligned to 4 lines */ - pipe_clip.y1 -= pipe_clip.y1 % 4; - if (pipe_clip.y2 % 4) - pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4; + intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip); /* * Now that we have the pipe damaged area check if it intersect with diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 98dd787b00e3..8a52b7a16774 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -53,6 +53,12 @@ static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915) drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n"); } +static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915) +{ + i915->quirks |= QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK; + drm_info(&i915->drm, "Applying no pps backlight power quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -72,6 +78,12 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) return 1; } +static int intel_dmi_no_pps_backlight(const struct dmi_system_id *id) +{ + DRM_INFO("No pps backlight support on %s\n", id->ident); + return 1; +} + static const struct intel_dmi_quirk intel_dmi_quirks[] = { { .dmi_id_list = &(const struct dmi_system_id[]) { @@ -96,6 +108,28 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = { }, .hook = quirk_invert_brightness, }, + { + .dmi_id_list = &(const struct dmi_system_id[]) { + { + .callback = intel_dmi_no_pps_backlight, + .ident = "Google Lillipup sku524294", + .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524294"), + }, + }, + { + .callback = intel_dmi_no_pps_backlight, + .ident = "Google Lillipup sku524295", + .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524295"), + }, + }, + { } + }, + .hook = quirk_no_pps_backlight_power_hook, + }, }; static struct intel_quirk intel_quirks[] = { diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index e4f91d7a5c60..6cb27599ea03 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1824,7 +1824,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); u32 temp; bool input1, input2; int i; @@ -1835,7 +1835,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state, intel_sdvo_write_sdvox(intel_sdvo, temp); for (i = 0; i < 2; i++) - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); + intel_wait_for_vblank(dev_priv, crtc->pipe); success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index c23c210a55f5..3ffece568ed9 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -556,7 +556,7 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) } static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, - int required_lanes) + int required_lanes, bool force_disconnect) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum tc_port_mode old_tc_mode = dig_port->tc_mode; @@ -572,7 +572,8 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, } icl_tc_phy_disconnect(dig_port); - icl_tc_phy_connect(dig_port, required_lanes); + if (!force_disconnect) + icl_tc_phy_connect(dig_port, required_lanes); drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n", dig_port->tc_port_name, @@ -662,7 +663,7 @@ bool intel_tc_port_connected(struct intel_encoder *encoder) } static void __intel_tc_port_lock(struct intel_digital_port *dig_port, - int required_lanes) + int required_lanes, bool force_disconnect) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); intel_wakeref_t wakeref; @@ -676,8 +677,9 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port, tc_cold_wref = tc_cold_block(dig_port); - if (intel_tc_port_needs_reset(dig_port)) - intel_tc_port_reset_mode(dig_port, required_lanes); + if (force_disconnect || intel_tc_port_needs_reset(dig_port)) + intel_tc_port_reset_mode(dig_port, required_lanes, + force_disconnect); tc_cold_unblock(dig_port, tc_cold_wref); } @@ -688,7 +690,7 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port, void intel_tc_port_lock(struct intel_digital_port *dig_port) { - __intel_tc_port_lock(dig_port, 1); + __intel_tc_port_lock(dig_port, 1, false); } void intel_tc_port_unlock(struct intel_digital_port *dig_port) @@ -702,6 +704,24 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port) wakeref); } +/** + * intel_tc_port_disconnect_phy: disconnect TypeC PHY from display port + * @dig_port: digital port + * + * Disconnect the given digital port from its TypeC PHY (handing back the + * control of the PHY to the TypeC subsystem). The only purpose of this + * function is to force the disconnect even with a TypeC display output still + * plugged to the TypeC connector, which is required by the TypeC firmwares + * during system suspend and shutdown. Otherwise - during the unplug event + * handling - the PHY ownership is released automatically by + * intel_tc_port_reset_mode(), when calling this function is not required. + */ +void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port) +{ + __intel_tc_port_lock(dig_port, 1, true); + intel_tc_port_unlock(dig_port); +} + bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) { return mutex_is_locked(&dig_port->tc_lock) || @@ -711,7 +731,7 @@ bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes) { - __intel_tc_port_lock(dig_port, required_lanes); + __intel_tc_port_lock(dig_port, required_lanes, false); dig_port->tc_link_refcount++; intel_tc_port_unlock(dig_port); } diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 0eacbd76ec15..0c881f645e27 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -13,6 +13,8 @@ struct intel_digital_port; struct intel_encoder; bool intel_tc_port_connected(struct intel_encoder *encoder); +void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port); + u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port); int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index aa52af7891f0..d02f09f7e750 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -1420,7 +1420,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_tv *intel_tv = enc_to_tv(encoder); const struct intel_tv_connector_state *tv_conn_state = to_intel_tv_connector_state(conn_state); @@ -1466,7 +1466,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, break; } - tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe); + tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe); switch (tv_mode->oversample) { case 8: @@ -1571,8 +1571,7 @@ static int intel_tv_detect_type(struct intel_tv *intel_tv, struct drm_connector *connector) { - struct drm_crtc *crtc = connector->state->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 tv_ctl, save_tv_ctl; @@ -1594,7 +1593,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, /* Poll for TV detection */ tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK); tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; - tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe); + tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe); tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); tv_dac |= (TVDAC_STATE_CHG_EN | @@ -1619,7 +1618,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, intel_de_write(dev_priv, TV_DAC, tv_dac); intel_de_posting_read(dev_priv, TV_DAC); - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); + intel_wait_for_vblank(dev_priv, crtc->pipe); type = -1; tv_dac = intel_de_read(dev_priv, TV_DAC); @@ -1652,7 +1651,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, intel_de_posting_read(dev_priv, TV_CTL); /* For unknown reasons the hw barfs if we don't do this vblank wait. */ - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); + intel_wait_for_vblank(dev_priv, crtc->pipe); /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c index f002b82ba9c0..fa779f7ea415 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.c +++ b/drivers/gpu/drm/i915/display/intel_vga.c @@ -29,6 +29,9 @@ void intel_vga_disable(struct drm_i915_private *dev_priv) i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); u8 sr1; + if (intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE) + return; + /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); outb(SR01, VGA_SR_INDEX); @@ -121,9 +124,9 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode) } static unsigned int -intel_vga_set_decode(void *cookie, bool enable_decode) +intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode) { - struct drm_i915_private *i915 = cookie; + struct drm_i915_private *i915 = pdev_to_i915(pdev); intel_vga_set_state(i915, enable_decode); @@ -136,6 +139,7 @@ intel_vga_set_decode(void *cookie, bool enable_decode) int intel_vga_register(struct drm_i915_private *i915) { + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); int ret; @@ -147,7 +151,7 @@ int intel_vga_register(struct drm_i915_private *i915) * then we do not take part in VGA arbitration and the * vga_client_register() fails with -ENODEV. */ - ret = vga_client_register(pdev, i915, NULL, intel_vga_set_decode); + ret = vga_client_register(pdev, intel_vga_set_decode); if (ret && ret != -ENODEV) return ret; @@ -158,5 +162,5 @@ void intel_vga_unregister(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - vga_client_register(pdev, NULL, NULL, NULL); + vga_client_unregister(pdev); } diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index 394b7bbf48d8..911a113ee006 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -96,9 +96,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, { struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; - struct intel_crtc *intel_crtc = - to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -141,7 +140,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " "Staged freeing scaler id %d scaler_users = 0x%x\n", - intel_crtc->pipe, scaler_user, *scaler_id, + crtc->pipe, scaler_user, *scaler_id, scaler_state->scaler_users); *scaler_id = -1; } @@ -167,7 +166,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: src %ux%u dst %ux%u " "size is out of scaler range\n", - intel_crtc->pipe, scaler_user, src_w, src_h, + crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); return -EINVAL; } @@ -176,7 +175,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, scaler_state->scaler_users |= (1 << scaler_user); drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", - intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, + crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, scaler_state->scaler_users); return 0; @@ -515,17 +514,17 @@ skl_program_plane_scaler(struct intel_plane *plane, (crtc_w << 16) | crtc_h); } -static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) +static void skl_detach_scaler(struct intel_crtc *crtc, int id) { - struct drm_device *dev = intel_crtc->base.dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0); - intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); - intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); + intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0); + intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0); + intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -535,15 +534,15 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) */ void skl_detach_scalers(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; int i; /* loop through and disable scalers that aren't in use */ - for (i = 0; i < intel_crtc->num_scalers; i++) { + for (i = 0; i < crtc->num_scalers; i++) { if (!scaler_state->scalers[i].in_use) - skl_detach_scaler(intel_crtc, i); + skl_detach_scaler(crtc, i); } } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 084c9c43b2ed..0ee4ff341e25 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -780,10 +780,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - struct drm_crtc *crtc = pipe_config->uapi.crtc; - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; enum port port; u32 val; bool glk_cold_boot = false; @@ -1389,7 +1388,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; enum port port; @@ -1397,7 +1396,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, u32 val, tmp; u16 mode_hdisplay; - drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(intel_crtc->pipe)); + drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(crtc->pipe)); mode_hdisplay = adjusted_mode->crtc_hdisplay; @@ -1424,7 +1423,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, intel_de_write(dev_priv, MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH); } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - enum pipe pipe = intel_crtc->pipe; + enum pipe pipe = crtc->pipe; tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); tmp &= ~BXT_PIPE_SELECT_MASK; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 548ddf39d853..93bf63bbaff1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -85,13 +85,10 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size) return -E2BIG; /* - * For now resort to CPU based clearing for device local-memory, in the - * near future this will use the blitter engine for accelerated, GPU - * based clearing. + * I915_BO_ALLOC_USER will make sure the object is cleared before + * any user access. */ - flags = 0; - if (mr->type == INTEL_MEMORY_LOCAL) - flags = I915_BO_ALLOC_CPU_CLEAR; + flags = I915_BO_ALLOC_USER; ret = mr->ops->init_object(mr, obj, size, flags); if (ret) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 4a6419d7be93..989ff064cf3a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2782,7 +2782,7 @@ __free_fence_array(struct eb_fence *fences, unsigned int n) while (n--) { drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); dma_fence_put(fences[n].dma_fence); - kfree(fences[n].chain_fence); + dma_fence_chain_free(fences[n].chain_fence); } kvfree(fences); } @@ -2896,9 +2896,7 @@ add_timeline_fence_array(struct i915_execbuffer *eb, return -EINVAL; } - f->chain_fence = - kmalloc(sizeof(*f->chain_fence), - GFP_KERNEL); + f->chain_fence = dma_fence_chain_alloc(); if (!f->chain_fence) { drm_syncobj_put(syncobj); dma_fence_put(fence); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 3b4aa28a076d..d539dffa1554 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -4,74 +4,10 @@ */ #include "intel_memory_region.h" -#include "intel_region_ttm.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_lmem.h" #include "i915_drv.h" -static void lmem_put_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node); - obj->mm.dirty = false; - sg_free_table(pages); - kfree(pages); -} - -static int lmem_get_pages(struct drm_i915_gem_object *obj) -{ - unsigned int flags; - struct sg_table *pages; - - flags = I915_ALLOC_MIN_PAGE_SIZE; - if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) - flags |= I915_ALLOC_CONTIGUOUS; - - obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region, - obj->base.size, - flags); - if (IS_ERR(obj->mm.st_mm_node)) - return PTR_ERR(obj->mm.st_mm_node); - - /* Range manager is always contigous */ - if (obj->mm.region->is_range_manager) - obj->flags |= I915_BO_ALLOC_CONTIGUOUS; - pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node); - if (IS_ERR(pages)) { - intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node); - return PTR_ERR(pages); - } - - __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); - - if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) { - void __iomem *vaddr = - i915_gem_object_lmem_io_map(obj, 0, obj->base.size); - - if (!vaddr) { - struct sg_table *pages = - __i915_gem_object_unset_pages(obj); - - if (!IS_ERR_OR_NULL(pages)) - lmem_put_pages(obj, pages); - } - - memset_io(vaddr, 0, obj->base.size); - io_mapping_unmap(vaddr); - } - - return 0; -} - -const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { - .name = "i915_gem_object_lmem", - .flags = I915_GEM_OBJECT_HAS_IOMEM, - - .get_pages = lmem_get_pages, - .put_pages = lmem_put_pages, - .release = i915_gem_object_release_memory_region, -}; - void __iomem * i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, unsigned long n, @@ -87,10 +23,50 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); } +/** + * i915_gem_object_validates_to_lmem - Whether the object is resident in + * lmem when pages are present. + * @obj: The object to check. + * + * Migratable objects residency may change from under us if the object is + * not pinned or locked. This function is intended to be used to check whether + * the object can only reside in lmem when pages are present. + * + * Return: Whether the object is always resident in lmem when pages are + * present. + */ +bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj) +{ + struct intel_memory_region *mr = READ_ONCE(obj->mm.region); + + return !i915_gem_object_migratable(obj) && + mr && (mr->type == INTEL_MEMORY_LOCAL || + mr->type == INTEL_MEMORY_STOLEN_LOCAL); +} + +/** + * i915_gem_object_is_lmem - Whether the object is resident in + * lmem + * @obj: The object to check. + * + * Even if an object is allowed to migrate and change memory region, + * this function checks whether it will always be present in lmem when + * valid *or* if that's not the case, whether it's currently resident in lmem. + * For migratable and evictable objects, the latter only makes sense when + * the object is locked. + * + * Return: Whether the object migratable but resident in lmem, or not + * migratable and will be present in lmem when valid. + */ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) { - struct intel_memory_region *mr = obj->mm.region; + struct intel_memory_region *mr = READ_ONCE(obj->mm.region); +#ifdef CONFIG_LOCKDEP + if (i915_gem_object_migratable(obj) && + i915_gem_object_evictable(obj)) + assert_object_held(obj); +#endif return mr && (mr->type == INTEL_MEMORY_LOCAL || mr->type == INTEL_MEMORY_STOLEN_LOCAL); } @@ -103,23 +79,3 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915, return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM], size, flags); } - -int __i915_gem_lmem_object_init(struct intel_memory_region *mem, - struct drm_i915_gem_object *obj, - resource_size_t size, - unsigned int flags) -{ - static struct lock_class_key lock_class; - struct drm_i915_private *i915 = mem->i915; - - drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags); - - obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT; - - i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); - - i915_gem_object_init_memory_region(obj, mem); - - return 0; -} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index fac6bc5a5ebb..ea76fd11ccb0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -26,9 +26,4 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915, resource_size_t size, unsigned int flags); -int __i915_gem_lmem_object_init(struct intel_memory_region *mem, - struct drm_i915_gem_object *obj, - resource_size_t size, - unsigned int flags); - #endif /* !__I915_GEM_LMEM_H */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 215326764606..4f50a508c7a0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -19,6 +19,7 @@ #include "i915_gem_mman.h" #include "i915_trace.h" #include "i915_user_extensions.h" +#include "i915_gem_ttm.h" #include "i915_vma.h" static inline bool @@ -624,6 +625,8 @@ mmap_offset_attach(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo; int err; + GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops); + mmo = lookup_mmo(obj, mmap_type); if (mmo) goto out; @@ -666,40 +669,47 @@ err: } static int -__assign_mmap_offset(struct drm_file *file, - u32 handle, +__assign_mmap_offset(struct drm_i915_gem_object *obj, enum i915_mmap_type mmap_type, - u64 *offset) + u64 *offset, struct drm_file *file) { - struct drm_i915_gem_object *obj; struct i915_mmap_offset *mmo; - int err; - obj = i915_gem_object_lookup(file, handle); - if (!obj) - return -ENOENT; + if (i915_gem_object_never_mmap(obj)) + return -ENODEV; - if (i915_gem_object_never_mmap(obj)) { - err = -ENODEV; - goto out; + if (obj->ops->mmap_offset) { + *offset = obj->ops->mmap_offset(obj); + return 0; } if (mmap_type != I915_MMAP_TYPE_GTT && !i915_gem_object_has_struct_page(obj) && - !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) { - err = -ENODEV; - goto out; - } + !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) + return -ENODEV; mmo = mmap_offset_attach(obj, mmap_type, file); - if (IS_ERR(mmo)) { - err = PTR_ERR(mmo); - goto out; - } + if (IS_ERR(mmo)) + return PTR_ERR(mmo); *offset = drm_vma_node_offset_addr(&mmo->vma_node); - err = 0; -out: + return 0; +} + +static int +__assign_mmap_offset_handle(struct drm_file *file, + u32 handle, + enum i915_mmap_type mmap_type, + u64 *offset) +{ + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_lookup(file, handle); + if (!obj) + return -ENOENT; + + err = __assign_mmap_offset(obj, mmap_type, offset, file); i915_gem_object_put(obj); return err; } @@ -719,7 +729,7 @@ i915_gem_dumb_mmap_offset(struct drm_file *file, else mmap_type = I915_MMAP_TYPE_GTT; - return __assign_mmap_offset(file, handle, mmap_type, offset); + return __assign_mmap_offset_handle(file, handle, mmap_type, offset); } /** @@ -787,7 +797,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - return __assign_mmap_offset(file, args->handle, type, &args->offset); + return __assign_mmap_offset_handle(file, args->handle, type, &args->offset); } static void vm_open(struct vm_area_struct *vma) @@ -891,8 +901,18 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) * destroyed and will be invalid when the vma manager lock * is released. */ - mmo = container_of(node, struct i915_mmap_offset, vma_node); - obj = i915_gem_object_get_rcu(mmo->obj); + if (!node->driver_private) { + mmo = container_of(node, struct i915_mmap_offset, vma_node); + obj = i915_gem_object_get_rcu(mmo->obj); + + GEM_BUG_ON(obj && obj->ops->mmap_ops); + } else { + obj = i915_gem_object_get_rcu + (container_of(node, struct drm_i915_gem_object, + base.vma_node)); + + GEM_BUG_ON(obj && !obj->ops->mmap_ops); + } } drm_vma_offset_unlock_lookup(dev->vma_offset_manager); rcu_read_unlock(); @@ -914,7 +934,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) } vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_private_data = mmo; + + if (i915_gem_object_has_iomem(obj)) + vma->vm_flags |= VM_IO; /* * We keep the ref on mmo->obj, not vm_file, but we require @@ -928,6 +950,15 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) /* Drop the initial creation reference, the vma is now holding one. */ fput(anon); + if (obj->ops->mmap_ops) { + vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags)); + vma->vm_ops = obj->ops->mmap_ops; + vma->vm_private_data = node->driver_private; + return 0; + } + + vma->vm_private_data = mmo; + switch (mmo->mmap_type) { case I915_MMAP_TYPE_WC: vma->vm_page_prot = diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 5706d471692d..cf18c430d51f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -172,7 +172,7 @@ static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *f } } -static void __i915_gem_free_object_rcu(struct rcu_head *head) +void __i915_gem_free_object_rcu(struct rcu_head *head) { struct drm_i915_gem_object *obj = container_of(head, typeof(*obj), rcu); @@ -208,59 +208,69 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) } } -static void __i915_gem_free_objects(struct drm_i915_private *i915, - struct llist_node *freed) +void __i915_gem_free_object(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj, *on; + trace_i915_gem_object_destroy(obj); - llist_for_each_entry_safe(obj, on, freed, freed) { - trace_i915_gem_object_destroy(obj); + if (!list_empty(&obj->vma.list)) { + struct i915_vma *vma; - if (!list_empty(&obj->vma.list)) { - struct i915_vma *vma; + /* + * Note that the vma keeps an object reference while + * it is active, so it *should* not sleep while we + * destroy it. Our debug code errs insits it *might*. + * For the moment, play along. + */ + spin_lock(&obj->vma.lock); + while ((vma = list_first_entry_or_null(&obj->vma.list, + struct i915_vma, + obj_link))) { + GEM_BUG_ON(vma->obj != obj); + spin_unlock(&obj->vma.lock); + + __i915_vma_put(vma); - /* - * Note that the vma keeps an object reference while - * it is active, so it *should* not sleep while we - * destroy it. Our debug code errs insits it *might*. - * For the moment, play along. - */ spin_lock(&obj->vma.lock); - while ((vma = list_first_entry_or_null(&obj->vma.list, - struct i915_vma, - obj_link))) { - GEM_BUG_ON(vma->obj != obj); - spin_unlock(&obj->vma.lock); + } + spin_unlock(&obj->vma.lock); + } - __i915_vma_put(vma); + __i915_gem_object_free_mmaps(obj); - spin_lock(&obj->vma.lock); - } - spin_unlock(&obj->vma.lock); - } + GEM_BUG_ON(!list_empty(&obj->lut_list)); - __i915_gem_object_free_mmaps(obj); + atomic_set(&obj->mm.pages_pin_count, 0); + __i915_gem_object_put_pages(obj); + GEM_BUG_ON(i915_gem_object_has_pages(obj)); + bitmap_free(obj->bit_17); - GEM_BUG_ON(!list_empty(&obj->lut_list)); + if (obj->base.import_attach) + drm_prime_gem_destroy(&obj->base, NULL); - atomic_set(&obj->mm.pages_pin_count, 0); - __i915_gem_object_put_pages(obj); - GEM_BUG_ON(i915_gem_object_has_pages(obj)); - bitmap_free(obj->bit_17); + drm_gem_free_mmap_offset(&obj->base); - if (obj->base.import_attach) - drm_prime_gem_destroy(&obj->base, NULL); + if (obj->ops->release) + obj->ops->release(obj); - drm_gem_free_mmap_offset(&obj->base); + if (obj->mm.n_placements > 1) + kfree(obj->mm.placements); - if (obj->ops->release) - obj->ops->release(obj); + if (obj->shares_resv_from) + i915_vm_resv_put(obj->shares_resv_from); +} - if (obj->mm.n_placements > 1) - kfree(obj->mm.placements); +static void __i915_gem_free_objects(struct drm_i915_private *i915, + struct llist_node *freed) +{ + struct drm_i915_gem_object *obj, *on; - if (obj->shares_resv_from) - i915_vm_resv_put(obj->shares_resv_from); + llist_for_each_entry_safe(obj, on, freed, freed) { + might_sleep(); + if (obj->ops->delayed_free) { + obj->ops->delayed_free(obj); + continue; + } + __i915_gem_free_object(obj); /* But keep the pointer alive for RCU-protected lookups */ call_rcu(&obj->rcu, __i915_gem_free_object_rcu); @@ -318,6 +328,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) * worker and performing frees directly from subsequent allocations for * crude but effective memory throttling. */ + if (llist_add(&obj->freed, &i915->mm.free_list)) queue_work(i915->wq, &i915->mm.free_work); } @@ -410,6 +421,60 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, return 0; } +/** + * i915_gem_object_evictable - Whether object is likely evictable after unbind. + * @obj: The object to check + * + * This function checks whether the object is likely unvictable after unbind. + * If the object is not locked when checking, the result is only advisory. + * If the object is locked when checking, and the function returns true, + * then an eviction should indeed be possible. But since unlocked vma + * unpinning and unbinding is currently possible, the object can actually + * become evictable even if this function returns false. + * + * Return: true if the object may be evictable. False otherwise. + */ +bool i915_gem_object_evictable(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + int pin_count = atomic_read(&obj->mm.pages_pin_count); + + if (!pin_count) + return true; + + spin_lock(&obj->vma.lock); + list_for_each_entry(vma, &obj->vma.list, obj_link) { + if (i915_vma_is_pinned(vma)) { + spin_unlock(&obj->vma.lock); + return false; + } + if (atomic_read(&vma->pages_count)) + pin_count--; + } + spin_unlock(&obj->vma.lock); + GEM_WARN_ON(pin_count < 0); + + return pin_count == 0; +} + +/** + * i915_gem_object_migratable - Whether the object is migratable out of the + * current region. + * @obj: Pointer to the object. + * + * Return: Whether the object is allowed to be resident in other + * regions than the current while pages are present. + */ +bool i915_gem_object_migratable(struct drm_i915_gem_object *obj) +{ + struct intel_memory_region *mr = READ_ONCE(obj->mm.region); + + if (!mr) + return false; + + return obj->mm.n_placements > 1; +} + void i915_gem_init__objects(struct drm_i915_private *i915) { INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 7c0eb425cb3b..e9eecebf5c9d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -200,6 +200,9 @@ static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) { + if (obj->ops->adjust_lru) + obj->ops->adjust_lru(obj); + dma_resv_unlock(obj->base.resv); } @@ -339,14 +342,14 @@ struct scatterlist * __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, struct i915_gem_object_page_iter *iter, unsigned int n, - unsigned int *offset, bool allow_alloc); + unsigned int *offset, bool allow_alloc, bool dma); static inline struct scatterlist * i915_gem_object_get_sg(struct drm_i915_gem_object *obj, unsigned int n, unsigned int *offset, bool allow_alloc) { - return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc); + return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc, false); } static inline struct scatterlist * @@ -354,7 +357,7 @@ i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, unsigned int n, unsigned int *offset, bool allow_alloc) { - return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc); + return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc, true); } struct page * @@ -587,6 +590,16 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); +void __i915_gem_free_object_rcu(struct rcu_head *head); + +void __i915_gem_free_object(struct drm_i915_gem_object *obj); + +bool i915_gem_object_evictable(struct drm_i915_gem_object *obj); + +bool i915_gem_object_migratable(struct drm_i915_gem_object *obj); + +bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj); + #ifdef CONFIG_MMU_NOTIFIER static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index d047ea126029..2a23b77424b3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -61,10 +61,26 @@ struct drm_i915_gem_object_ops { const struct drm_i915_gem_pread *arg); int (*pwrite)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg); + u64 (*mmap_offset)(struct drm_i915_gem_object *obj); int (*dmabuf_export)(struct drm_i915_gem_object *obj); + + /** + * adjust_lru - notify that the madvise value was updated + * @obj: The gem object + * + * The madvise value may have been updated, or object was recently + * referenced so act accordingly (Perhaps changing an LRU list etc). + */ + void (*adjust_lru)(struct drm_i915_gem_object *obj); + + /** + * delayed_free - Override the default delayed free implementation + */ + void (*delayed_free)(struct drm_i915_gem_object *obj); void (*release)(struct drm_i915_gem_object *obj); + const struct vm_operations_struct *mmap_ops; const char *name; /* friendly name for debug, e.g. lockdep classes */ }; @@ -187,12 +203,14 @@ struct drm_i915_gem_object { #define I915_BO_ALLOC_VOLATILE BIT(1) #define I915_BO_ALLOC_STRUCT_PAGE BIT(2) #define I915_BO_ALLOC_CPU_CLEAR BIT(3) +#define I915_BO_ALLOC_USER BIT(4) #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \ I915_BO_ALLOC_VOLATILE | \ I915_BO_ALLOC_STRUCT_PAGE | \ - I915_BO_ALLOC_CPU_CLEAR) -#define I915_BO_READONLY BIT(4) -#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */ + I915_BO_ALLOC_CPU_CLEAR | \ + I915_BO_ALLOC_USER) +#define I915_BO_READONLY BIT(5) +#define I915_TILING_QUIRK_BIT 6 /* unknown swizzling; do not release! */ /* * Is the object to be mapped as read-only to the GPU @@ -310,6 +328,12 @@ struct drm_i915_gem_object { bool dirty:1; } mm; + struct { + struct sg_table *cached_io_st; + struct i915_gem_object_page_iter get_io_page; + bool created:1; + } ttm; + /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 6444e097016d..086005c1c7ea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -467,9 +467,8 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, struct i915_gem_object_page_iter *iter, unsigned int n, unsigned int *offset, - bool allow_alloc) + bool allow_alloc, bool dma) { - const bool dma = iter == &obj->mm.get_dma_page; struct scatterlist *sg; unsigned int idx, count; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index f25e6646c5b7..d1f1840540dd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -18,11 +18,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, mutex_lock(&mem->objects.lock); - if (obj->flags & I915_BO_ALLOC_VOLATILE) - list_add(&obj->mm.region_link, &mem->objects.purgeable); - else - list_add(&obj->mm.region_link, &mem->objects.list); - + list_add(&obj->mm.region_link, &mem->objects.list); mutex_unlock(&mem->objects.lock); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c new file mode 100644 index 000000000000..bf33724bed5c --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_placement.h> + +#include "i915_drv.h" +#include "intel_memory_region.h" +#include "intel_region_ttm.h" + +#include "gem/i915_gem_object.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_mman.h" + +#define I915_PL_LMEM0 TTM_PL_PRIV +#define I915_PL_SYSTEM TTM_PL_SYSTEM +#define I915_PL_STOLEN TTM_PL_VRAM +#define I915_PL_GGTT TTM_PL_TT + +#define I915_TTM_PRIO_PURGE 0 +#define I915_TTM_PRIO_NO_PAGES 1 +#define I915_TTM_PRIO_HAS_PAGES 2 + +/** + * struct i915_ttm_tt - TTM page vector with additional private information + * @ttm: The base TTM page vector. + * @dev: The struct device used for dma mapping and unmapping. + * @cached_st: The cached scatter-gather table. + * + * Note that DMA may be going on right up to the point where the page- + * vector is unpopulated in delayed destroy. Hence keep the + * scatter-gather table mapped and cached up to that point. This is + * different from the cached gem object io scatter-gather table which + * doesn't have an associated dma mapping. + */ +struct i915_ttm_tt { + struct ttm_tt ttm; + struct device *dev; + struct sg_table *cached_st; +}; + +static const struct ttm_place lmem0_sys_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .mem_type = I915_PL_LMEM0, + .flags = 0, + }, { + .fpfn = 0, + .lpfn = 0, + .mem_type = I915_PL_SYSTEM, + .flags = 0, + } +}; + +static struct ttm_placement i915_lmem0_placement = { + .num_placement = 1, + .placement = &lmem0_sys_placement_flags[0], + .num_busy_placement = 1, + .busy_placement = &lmem0_sys_placement_flags[0], +}; + +static struct ttm_placement i915_sys_placement = { + .num_placement = 1, + .placement = &lmem0_sys_placement_flags[1], + .num_busy_placement = 1, + .busy_placement = &lmem0_sys_placement_flags[1], +}; + +static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj); + +static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, + uint32_t page_flags) +{ + struct ttm_resource_manager *man = + ttm_manager_type(bo->bdev, bo->resource->mem_type); + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct i915_ttm_tt *i915_tt; + int ret; + + i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL); + if (!i915_tt) + return NULL; + + if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && + man->use_tt) + page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; + + ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, ttm_write_combined); + if (ret) { + kfree(i915_tt); + return NULL; + } + + i915_tt->dev = obj->base.dev->dev; + + return &i915_tt->ttm; +} + +static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) +{ + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + + if (i915_tt->cached_st) { + dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st, + DMA_BIDIRECTIONAL, 0); + sg_free_table(i915_tt->cached_st); + kfree(i915_tt->cached_st); + i915_tt->cached_st = NULL; + } + ttm_pool_free(&bdev->pool, ttm); +} + +static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) +{ + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + + ttm_tt_destroy_common(bdev, ttm); + kfree(i915_tt); +} + +static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, + const struct ttm_place *place) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + + /* Will do for now. Our pinned objects are still on TTM's LRU lists */ + if (!i915_gem_object_evictable(obj)) + return false; + + /* This isn't valid with a buddy allocator */ + return ttm_bo_eviction_valuable(bo, place); +} + +static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, + struct ttm_placement *placement) +{ + *placement = i915_sys_placement; +} + +static int i915_ttm_move_notify(struct ttm_buffer_object *bo) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + int ret; + + ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); + if (ret) + return ret; + + ret = __i915_gem_object_put_pages(obj); + if (ret) + return ret; + + return 0; +} + +static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj) +{ + struct radix_tree_iter iter; + void __rcu **slot; + + if (!obj->ttm.cached_io_st) + return; + + rcu_read_lock(); + radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0) + radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index); + rcu_read_unlock(); + + sg_free_table(obj->ttm.cached_io_st); + kfree(obj->ttm.cached_io_st); + obj->ttm.cached_io_st = NULL; +} + +static void i915_ttm_purge(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = false, + }; + struct ttm_placement place = {}; + int ret; + + if (obj->mm.madv == __I915_MADV_PURGED) + return; + + /* TTM's purge interface. Note that we might be reentering. */ + ret = ttm_bo_validate(bo, &place, &ctx); + + if (!ret) { + i915_ttm_free_cached_io_st(obj); + obj->mm.madv = __I915_MADV_PURGED; + } +} + +static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + int ret = i915_ttm_move_notify(bo); + + GEM_WARN_ON(ret); + GEM_WARN_ON(obj->ttm.cached_io_st); + if (!ret && obj->mm.madv != I915_MADV_WILLNEED) + i915_ttm_purge(obj); +} + +static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + + if (likely(obj)) { + /* This releases all gem object bindings to the backend. */ + __i915_gem_free_object(obj); + } +} + +static struct intel_memory_region * +i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) +{ + struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); + + /* There's some room for optimization here... */ + GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && + ttm_mem_type < I915_PL_LMEM0); + if (ttm_mem_type == I915_PL_SYSTEM) + return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, + 0); + + return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, + ttm_mem_type - I915_PL_LMEM0); +} + +static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm) +{ + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + struct scatterlist *sg; + struct sg_table *st; + int ret; + + if (i915_tt->cached_st) + return i915_tt->cached_st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + sg = __sg_alloc_table_from_pages + (st, ttm->pages, ttm->num_pages, 0, + (unsigned long)ttm->num_pages << PAGE_SHIFT, + i915_sg_segment_size(), NULL, 0, GFP_KERNEL); + if (IS_ERR(sg)) { + kfree(st); + return ERR_CAST(sg); + } + + ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); + if (ret) { + sg_free_table(st); + kfree(st); + return ERR_PTR(ret); + } + + i915_tt->cached_st = st; + return st; +} + +static struct sg_table * +i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, + struct ttm_resource *res) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct ttm_resource_manager *man = + ttm_manager_type(bo->bdev, res->mem_type); + + if (man->use_tt) + return i915_ttm_tt_get_st(bo->ttm); + + return intel_region_ttm_node_to_st(obj->mm.region, res); +} + +static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *dst_mem, + struct ttm_place *hop) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct ttm_resource_manager *dst_man = + ttm_manager_type(bo->bdev, dst_mem->mem_type); + struct ttm_resource_manager *src_man = + ttm_manager_type(bo->bdev, bo->resource->mem_type); + struct intel_memory_region *dst_reg, *src_reg; + union { + struct ttm_kmap_iter_tt tt; + struct ttm_kmap_iter_iomap io; + } _dst_iter, _src_iter; + struct ttm_kmap_iter *dst_iter, *src_iter; + struct sg_table *dst_st; + int ret; + + dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); + src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); + GEM_BUG_ON(!dst_reg || !src_reg); + + /* Sync for now. We could do the actual copy async. */ + ret = ttm_bo_wait_ctx(bo, ctx); + if (ret) + return ret; + + ret = i915_ttm_move_notify(bo); + if (ret) + return ret; + + if (obj->mm.madv != I915_MADV_WILLNEED) { + i915_ttm_purge(obj); + ttm_resource_free(bo, &dst_mem); + return 0; + } + + /* Populate ttm with pages if needed. Typically system memory. */ + if (bo->ttm && (dst_man->use_tt || + (bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) { + ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (ret) + return ret; + } + + dst_st = i915_ttm_resource_get_st(obj, dst_mem); + if (IS_ERR(dst_st)) + return PTR_ERR(dst_st); + + /* If we start mapping GGTT, we can no longer use man::use_tt here. */ + dst_iter = dst_man->use_tt ? + ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) : + ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap, + dst_st, dst_reg->region.start); + + src_iter = src_man->use_tt ? + ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) : + ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap, + obj->ttm.cached_io_st, + src_reg->region.start); + + ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter); + ttm_bo_move_sync_cleanup(bo, dst_mem); + i915_ttm_free_cached_io_st(obj); + + if (!dst_man->use_tt) { + obj->ttm.cached_io_st = dst_st; + obj->ttm.get_io_page.sg_pos = dst_st->sgl; + obj->ttm.get_io_page.sg_idx = 0; + } + + return 0; +} + +static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) +{ + if (mem->mem_type < I915_PL_LMEM0) + return 0; + + mem->bus.caching = ttm_write_combined; + mem->bus.is_iomem = true; + + return 0; +} + +static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, + unsigned long page_offset) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start; + struct scatterlist *sg; + unsigned int ofs; + + GEM_WARN_ON(bo->ttm); + + sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true); + + return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs; +} + +static struct ttm_device_funcs i915_ttm_bo_driver = { + .ttm_tt_create = i915_ttm_tt_create, + .ttm_tt_unpopulate = i915_ttm_tt_unpopulate, + .ttm_tt_destroy = i915_ttm_tt_destroy, + .eviction_valuable = i915_ttm_eviction_valuable, + .evict_flags = i915_ttm_evict_flags, + .move = i915_ttm_move, + .swap_notify = i915_ttm_swap_notify, + .delete_mem_notify = i915_ttm_delete_mem_notify, + .io_mem_reserve = i915_ttm_io_mem_reserve, + .io_mem_pfn = i915_ttm_io_mem_pfn, +}; + +/** + * i915_ttm_driver - Return a pointer to the TTM device funcs + * + * Return: Pointer to statically allocated TTM device funcs. + */ +struct ttm_device_funcs *i915_ttm_driver(void) +{ + return &i915_ttm_bo_driver; +} + +static int i915_ttm_get_pages(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = false, + }; + struct sg_table *st; + int ret; + + /* Move to the requested placement. */ + ret = ttm_bo_validate(bo, &i915_lmem0_placement, &ctx); + if (ret) + return ret == -ENOSPC ? -ENXIO : ret; + + /* Object either has a page vector or is an iomem object */ + st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st; + if (IS_ERR(st)) + return PTR_ERR(st); + + __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); + + i915_ttm_adjust_lru(obj); + + return ret; +} + +static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *st) +{ + /* + * We're currently not called from a shrinker, so put_pages() + * typically means the object is about to destroyed, or called + * from move_notify(). So just avoid doing much for now. + * If the object is not destroyed next, The TTM eviction logic + * and shrinkers will move it out if needed. + */ + + i915_ttm_adjust_lru(obj); +} + +static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + + /* + * Don't manipulate the TTM LRUs while in TTM bo destruction. + * We're called through i915_ttm_delete_mem_notify(). + */ + if (!kref_read(&bo->kref)) + return; + + /* + * Put on the correct LRU list depending on the MADV status + */ + spin_lock(&bo->bdev->lru_lock); + if (obj->mm.madv != I915_MADV_WILLNEED) { + bo->priority = I915_TTM_PRIO_PURGE; + } else if (!i915_gem_object_has_pages(obj)) { + if (bo->priority < I915_TTM_PRIO_HAS_PAGES) + bo->priority = I915_TTM_PRIO_HAS_PAGES; + } else { + if (bo->priority > I915_TTM_PRIO_NO_PAGES) + bo->priority = I915_TTM_PRIO_NO_PAGES; + } + + ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); + spin_unlock(&bo->bdev->lru_lock); +} + +/* + * TTM-backed gem object destruction requires some clarification. + * Basically we have two possibilities here. We can either rely on the + * i915 delayed destruction and put the TTM object when the object + * is idle. This would be detected by TTM which would bypass the + * TTM delayed destroy handling. The other approach is to put the TTM + * object early and rely on the TTM destroyed handling, and then free + * the leftover parts of the GEM object once TTM's destroyed list handling is + * complete. For now, we rely on the latter for two reasons: + * a) TTM can evict an object even when it's on the delayed destroy list, + * which in theory allows for complete eviction. + * b) There is work going on in TTM to allow freeing an object even when + * it's not idle, and using the TTM destroyed list handling could help us + * benefit from that. + */ +static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) +{ + if (obj->ttm.created) { + ttm_bo_put(i915_gem_to_ttm(obj)); + } else { + __i915_gem_free_object(obj); + call_rcu(&obj->rcu, __i915_gem_free_object_rcu); + } +} + +static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) +{ + struct vm_area_struct *area = vmf->vma; + struct drm_i915_gem_object *obj = + i915_ttm_to_gem(area->vm_private_data); + + /* Sanity check that we allow writing into this object */ + if (unlikely(i915_gem_object_is_readonly(obj) && + area->vm_flags & VM_WRITE)) + return VM_FAULT_SIGBUS; + + return ttm_bo_vm_fault(vmf); +} + +static int +vm_access_ttm(struct vm_area_struct *area, unsigned long addr, + void *buf, int len, int write) +{ + struct drm_i915_gem_object *obj = + i915_ttm_to_gem(area->vm_private_data); + + if (i915_gem_object_is_readonly(obj) && write) + return -EACCES; + + return ttm_bo_vm_access(area, addr, buf, len, write); +} + +static void ttm_vm_open(struct vm_area_struct *vma) +{ + struct drm_i915_gem_object *obj = + i915_ttm_to_gem(vma->vm_private_data); + + GEM_BUG_ON(!obj); + i915_gem_object_get(obj); +} + +static void ttm_vm_close(struct vm_area_struct *vma) +{ + struct drm_i915_gem_object *obj = + i915_ttm_to_gem(vma->vm_private_data); + + GEM_BUG_ON(!obj); + i915_gem_object_put(obj); +} + +static const struct vm_operations_struct vm_ops_ttm = { + .fault = vm_fault_ttm, + .access = vm_access_ttm, + .open = ttm_vm_open, + .close = ttm_vm_close, +}; + +static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) +{ + /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */ + GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node)); + + return drm_vma_node_offset_addr(&obj->base.vma_node); +} + +const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { + .name = "i915_gem_object_ttm", + .flags = I915_GEM_OBJECT_HAS_IOMEM, + + .get_pages = i915_ttm_get_pages, + .put_pages = i915_ttm_put_pages, + .truncate = i915_ttm_purge, + .adjust_lru = i915_ttm_adjust_lru, + .delayed_free = i915_ttm_delayed_free, + .mmap_offset = i915_ttm_mmap_offset, + .mmap_ops = &vm_ops_ttm, +}; + +void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + + i915_gem_object_release_memory_region(obj); + mutex_destroy(&obj->ttm.get_io_page.lock); + if (obj->ttm.created) + call_rcu(&obj->rcu, __i915_gem_free_object_rcu); +} + +/** + * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object + * @mem: The initial memory region for the object. + * @obj: The gem object. + * @size: Object size in bytes. + * @flags: gem object flags. + * + * Return: 0 on success, negative error code on failure. + */ +int __i915_gem_ttm_object_init(struct intel_memory_region *mem, + struct drm_i915_gem_object *obj, + resource_size_t size, + unsigned int flags) +{ + static struct lock_class_key lock_class; + struct drm_i915_private *i915 = mem->i915; + enum ttm_bo_type bo_type; + size_t alignment = 0; + int ret; + + /* Adjust alignment to GPU- and CPU huge page sizes. */ + + if (mem->is_range_manager) { + if (size >= SZ_1G) + alignment = SZ_1G >> PAGE_SHIFT; + else if (size >= SZ_2M) + alignment = SZ_2M >> PAGE_SHIFT; + else if (size >= SZ_64K) + alignment = SZ_64K >> PAGE_SHIFT; + } + + drm_gem_private_object_init(&i915->drm, &obj->base, size); + i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags); + i915_gem_object_init_memory_region(obj, mem); + i915_gem_object_make_unshrinkable(obj); + obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT; + i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); + INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN); + mutex_init(&obj->ttm.get_io_page.lock); + + bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device : + ttm_bo_type_kernel; + + /* + * If this function fails, it will call the destructor, but + * our caller still owns the object. So no freeing in the + * destructor until obj->ttm.created is true. + * Similarly, in delayed_destroy, we can't call ttm_bo_put() + * until successful initialization. + */ + obj->base.vma_node.driver_private = i915_gem_to_ttm(obj); + ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size, + bo_type, &i915_sys_placement, alignment, + true, NULL, NULL, i915_ttm_bo_destroy); + + if (!ret) + obj->ttm.created = true; + + /* i915 wants -ENXIO when out of memory region space. */ + return (ret == -ENOSPC) ? -ENXIO : ret; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h new file mode 100644 index 000000000000..b8d3dcbb50df --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef _I915_GEM_TTM_H_ +#define _I915_GEM_TTM_H_ + +#include "gem/i915_gem_object_types.h" + +/** + * i915_gem_to_ttm - Convert a struct drm_i915_gem_object to a + * struct ttm_buffer_object. + * @obj: Pointer to the gem object. + * + * Return: Pointer to the embedded struct ttm_buffer_object. + */ +static inline struct ttm_buffer_object * +i915_gem_to_ttm(struct drm_i915_gem_object *obj) +{ + return &obj->__do_not_access; +} + +/* + * i915 ttm gem object destructor. Internal use only. + */ +void i915_ttm_bo_destroy(struct ttm_buffer_object *bo); + +/** + * i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding + * struct drm_i915_gem_object. + * + * Return: Pointer to the embedding struct ttm_buffer_object, or NULL + * if the object was not an i915 ttm object. + */ +static inline struct drm_i915_gem_object * +i915_ttm_to_gem(struct ttm_buffer_object *bo) +{ + if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy)) + return NULL; + + return container_of(bo, struct drm_i915_gem_object, __do_not_access); +} + +int __i915_gem_ttm_object_init(struct intel_memory_region *mem, + struct drm_i915_gem_object *obj, + resource_size_t size, + unsigned int flags); +#endif diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 5575172c66f5..bc32622a8c5f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -578,16 +578,17 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, int expected) { struct drm_i915_gem_object *obj; - struct i915_mmap_offset *mmo; + u64 offset; + int ret; obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) - return false; + return expected && expected == PTR_ERR(obj); - mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); + ret = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL); i915_gem_object_put(obj); - return PTR_ERR_OR_ZERO(mmo) == expected; + return ret == expected; } static void disable_retire_worker(struct drm_i915_private *i915) @@ -622,8 +623,8 @@ static int igt_mmap_offset_exhaustion(void *arg) struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; struct drm_i915_gem_object *obj; struct drm_mm_node *hole, *next; - struct i915_mmap_offset *mmo; int loop, err = 0; + u64 offset; /* Disable background reaper */ disable_retire_worker(i915); @@ -684,13 +685,13 @@ static int igt_mmap_offset_exhaustion(void *arg) obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); + pr_err("Unable to create object for reclaimed hole\n"); goto out; } - mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); - if (IS_ERR(mmo)) { + err = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL); + if (err) { pr_err("Unable to insert object into reclaimed hole\n"); - err = PTR_ERR(mmo); goto err_obj; } @@ -865,10 +866,10 @@ static int __igt_mmap(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - struct i915_mmap_offset *mmo; struct vm_area_struct *area; unsigned long addr; int err, i; + u64 offset; if (!can_mmap(obj, type)) return 0; @@ -879,11 +880,11 @@ static int __igt_mmap(struct drm_i915_private *i915, if (err) return err; - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) - return PTR_ERR(mmo); + err = __assign_mmap_offset(obj, type, &offset, NULL); + if (err) + return err; - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; @@ -897,13 +898,6 @@ static int __igt_mmap(struct drm_i915_private *i915, goto out_unmap; } - if (area->vm_private_data != mmo) { - pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n", - obj->mm.region->name); - err = -EINVAL; - goto out_unmap; - } - for (i = 0; i < obj->base.size / sizeof(u32); i++) { u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); u32 x; @@ -961,7 +955,7 @@ static int igt_mmap(void *arg) struct drm_i915_gem_object *obj; int err; - obj = i915_gem_object_create_region(mr, sizes[i], 0); + obj = i915_gem_object_create_region(mr, sizes[i], I915_BO_ALLOC_USER); if (obj == ERR_PTR(-ENODEV)) continue; @@ -1004,12 +998,12 @@ static int __igt_mmap_access(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - struct i915_mmap_offset *mmo; unsigned long __user *ptr; unsigned long A, B; unsigned long x, y; unsigned long addr; int err; + u64 offset; memset(&A, 0xAA, sizeof(A)); memset(&B, 0xBB, sizeof(B)); @@ -1017,11 +1011,11 @@ static int __igt_mmap_access(struct drm_i915_private *i915, if (!can_mmap(obj, type) || !can_access(obj)) return 0; - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) - return PTR_ERR(mmo); + err = __assign_mmap_offset(obj, type, &offset, NULL); + if (err) + return err; - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; ptr = (unsigned long __user *)addr; @@ -1081,7 +1075,7 @@ static int igt_mmap_access(void *arg) struct drm_i915_gem_object *obj; int err; - obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER); if (obj == ERR_PTR(-ENODEV)) continue; @@ -1111,11 +1105,11 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, enum i915_mmap_type type) { struct intel_engine_cs *engine; - struct i915_mmap_offset *mmo; unsigned long addr; u32 __user *ux; u32 bbe; int err; + u64 offset; /* * Verify that the mmap access into the backing store aligns with @@ -1132,11 +1126,11 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, if (err) return err; - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) - return PTR_ERR(mmo); + err = __assign_mmap_offset(obj, type, &offset, NULL); + if (err) + return err; - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; @@ -1226,7 +1220,7 @@ static int igt_mmap_gpu(void *arg) struct drm_i915_gem_object *obj; int err; - obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER); if (obj == ERR_PTR(-ENODEV)) continue; @@ -1303,18 +1297,18 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - struct i915_mmap_offset *mmo; unsigned long addr; int err; + u64 offset; if (!can_mmap(obj, type)) return 0; - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) - return PTR_ERR(mmo); + err = __assign_mmap_offset(obj, type, &offset, NULL); + if (err) + return err; - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; @@ -1350,10 +1344,20 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, } } - err = check_absent(addr, obj->base.size); - if (err) { - pr_err("%s: was not absent\n", obj->mm.region->name); - goto out_unmap; + if (!obj->ops->mmap_ops) { + err = check_absent(addr, obj->base.size); + if (err) { + pr_err("%s: was not absent\n", obj->mm.region->name); + goto out_unmap; + } + } else { + /* ttm allows access to evicted regions by design */ + + err = check_present(addr, obj->base.size); + if (err) { + pr_err("%s: was not present\n", obj->mm.region->name); + goto out_unmap; + } } out_unmap: @@ -1371,7 +1375,7 @@ static int igt_mmap_revoke(void *arg) struct drm_i915_gem_object *obj; int err; - obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER); if (obj == ERR_PTR(-ENODEV)) continue; diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index f7366b054f8e..4ae1f717a94c 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -9,6 +9,7 @@ #include "intel_region_ttm.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" +#include "gem/i915_gem_ttm.h" #include "intel_region_lmem.h" static int init_fake_lmem_bar(struct intel_memory_region *mem) @@ -107,7 +108,7 @@ out_no_io: static const struct intel_memory_region_ops intel_region_lmem_ops = { .init = region_lmem_init, .release = region_lmem_release, - .init_object = __i915_gem_lmem_object_init, + .init_object = __i915_gem_ttm_object_init, }; struct intel_memory_region * diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index cc745751ac53..0529576f069c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -636,7 +636,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) intel_uncore_read16(uncore, C0DRB3_BW)); seq_printf(m, "C1DRB3 = 0x%04x\n", intel_uncore_read16(uncore, C1DRB3_BW)); - } else if (INTEL_GEN(dev_priv) >= 6) { + } else if (GRAPHICS_VER(dev_priv) >= 6) { seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", intel_uncore_read(uncore, MAD_DIMM_C0)); seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 73de45472f60..30d8cd8c69b1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -561,7 +561,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) goto err_perf; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver); if (ret) goto err_ggtt; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b30397b04529..997fbe9532c1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -270,8 +270,10 @@ struct drm_i915_display_funcs { int (*bw_calc_min_cdclk)(struct intel_atomic_state *state); int (*get_fifo_size)(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane); - int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state); - int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state); + int (*compute_pipe_wm)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + int (*compute_intermediate_wm)(struct intel_atomic_state *state, + struct intel_crtc *crtc); void (*initial_watermarks)(struct intel_atomic_state *state, struct intel_crtc *crtc); void (*atomic_update_watermarks)(struct intel_atomic_state *state, @@ -346,13 +348,14 @@ struct intel_fbc { /* This is always the inner lock when overlapping with struct_mutex and * it's the outer lock when overlapping with stolen_lock. */ struct mutex lock; - unsigned threshold; unsigned int possible_framebuffer_bits; unsigned int busy_bits; struct intel_crtc *crtc; struct drm_mm_node compressed_fb; - struct drm_mm_node *compressed_llb; + struct drm_mm_node compressed_llb; + + u8 limit; bool false_color; @@ -467,6 +470,7 @@ struct i915_drrs { #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) #define QUIRK_INCREASE_T12_DELAY (1<<6) #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) +#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) struct intel_fbdev; struct intel_fbc_work; @@ -1134,6 +1138,8 @@ struct drm_i915_private { /* For i915gm/i945gm vblank irq workaround */ u8 vblank_enabled; + bool irq_enabled; + /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; @@ -1237,21 +1243,6 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) -/* - * Deprecated: this will be replaced by individual IP checks: - * GRAPHICS_VER(), MEDIA_VER() and DISPLAY_VER() - */ -#define INTEL_GEN(dev_priv) GRAPHICS_VER(dev_priv) -/* - * Deprecated: use IS_GRAPHICS_VER(), IS_MEDIA_VER() and IS_DISPLAY_VER() as - * appropriate. - */ -#define IS_GEN_RANGE(dev_priv, s, e) IS_GRAPHICS_VER(dev_priv, (s), (e)) -/* - * Deprecated: use GRAPHICS_VER(), MEDIA_VER() and DISPLAY_VER() as appropriate. - */ -#define IS_GEN(dev_priv, n) (GRAPHICS_VER(dev_priv) == (n)) - #define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics_ver) #define IS_GRAPHICS_VER(i915, from, until) \ (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 589388dec48a..6a0a3f0e36e1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1005,8 +1005,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, } } - if (obj->mm.madv != __I915_MADV_PURGED) + if (obj->mm.madv != __I915_MADV_PURGED) { obj->mm.madv = args->madv; + if (obj->ops->adjust_lru) + obj->ops->adjust_lru(obj); + } if (i915_gem_object_has_pages(obj)) { unsigned long flags; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c03943198089..1d4c683c9de9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2880,14 +2880,14 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, return true; } -int bdw_enable_vblank(struct drm_crtc *crtc) +int bdw_enable_vblank(struct drm_crtc *_crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; unsigned long irqflags; - if (gen11_dsi_configure_te(intel_crtc, true)) + if (gen11_dsi_configure_te(crtc, true)) return 0; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2898,7 +2898,7 @@ int bdw_enable_vblank(struct drm_crtc *crtc) * PSR is active as no frames are generated, so check only for PSR. */ if (HAS_PSR(dev_priv)) - drm_crtc_vblank_restore(crtc); + drm_crtc_vblank_restore(&crtc->base); return 0; } @@ -2952,14 +2952,14 @@ void ilk_disable_vblank(struct drm_crtc *crtc) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -void bdw_disable_vblank(struct drm_crtc *crtc) +void bdw_disable_vblank(struct drm_crtc *_crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; unsigned long irqflags; - if (gen11_dsi_configure_te(intel_crtc, false)) + if (gen11_dsi_configure_te(crtc, false)) return; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -4487,14 +4487,14 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ dev_priv->runtime_pm.irqs_enabled = true; - dev_priv->drm.irq_enabled = true; + dev_priv->irq_enabled = true; intel_irq_reset(dev_priv); ret = request_irq(irq, intel_irq_handler(dev_priv), IRQF_SHARED, DRIVER_NAME, dev_priv); if (ret < 0) { - dev_priv->drm.irq_enabled = false; + dev_priv->irq_enabled = false; return ret; } @@ -4520,10 +4520,10 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) * intel_modeset_driver_remove() calling us out of sequence. * Would be nice if it didn't do that... */ - if (!dev_priv->drm.irq_enabled) + if (!dev_priv->irq_enabled) return; - dev_priv->drm.irq_enabled = false; + dev_priv->irq_enabled = false; intel_irq_reset(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 83b500bb170c..7030e563985c 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -845,7 +845,6 @@ static const struct intel_device_info icl_info = { static const struct intel_device_info ehl_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), - .require_force_probe = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), .ppgtt_size = 36, }; @@ -853,7 +852,6 @@ static const struct intel_device_info ehl_info = { static const struct intel_device_info jsl_info = { GEN11_FEATURES, PLATFORM(INTEL_JASPERLAKE), - .require_force_probe = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), .ppgtt_size = 36, }; @@ -939,15 +937,48 @@ static const struct intel_device_info adl_s_info = { .dma_mask_size = 46, }; +#define XE_LPD_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = IVB_CURSOR_B_OFFSET, \ + [PIPE_C] = IVB_CURSOR_C_OFFSET, \ + [PIPE_D] = TGL_CURSOR_D_OFFSET, \ + } + #define XE_LPD_FEATURES \ - .display.ver = 13, \ - .display.has_psr_hw_tracking = 0, \ - .abox_mask = GENMASK(1, 0), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ - BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \ - .dbuf.size = 4096, \ - .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4) + .abox_mask = GENMASK(1, 0), \ + .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \ + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \ + .dbuf.size = 4096, \ + .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ + BIT(DBUF_S4), \ + .display.has_ddi = 1, \ + .display.has_dmc = 1, \ + .display.has_dp_mst = 1, \ + .display.has_dsb = 1, \ + .display.has_dsc = 1, \ + .display.has_fbc = 1, \ + .display.has_fpga_dbg = 1, \ + .display.has_hdcp = 1, \ + .display.has_hotplug = 1, \ + .display.has_ipc = 1, \ + .display.has_psr = 1, \ + .display.ver = 13, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + [TRANSCODER_D] = PIPE_D_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ + }, \ + XE_LPD_CURSOR_OFFSETS static const struct intel_device_info adl_p_info = { GEN12_FEATURES, @@ -956,6 +987,7 @@ static const struct intel_device_info adl_p_info = { .has_cdclk_crawl = 1, .require_force_probe = 1, .display.has_modular_fia = 1, + .display.has_psr_hw_tracking = 0, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .ppgtt_size = 48, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 94fde5ca26ae..16a19239d86d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4590,19 +4590,22 @@ enum { #define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28) #define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28) #define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */ +#define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */ #define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20) #define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20) #define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8 #define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13) #define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13) #define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5 -#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << 13) +#define TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT 13 +#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT) #define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13) #define EDP_PSR2_FAST_WAKE_MAX_LINES 8 #define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11) #define EDP_PSR2_FAST_WAKE_MASK (3 << 11) #define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5 -#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << 10) +#define TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT 10 +#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT) #define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10) #define EDP_PSR2_TP2_TIME_500us (0 << 8) #define EDP_PSR2_TP2_TIME_100us (1 << 8) @@ -7751,7 +7754,7 @@ enum { #define GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED (3 << 0) /* icl + */ /* DMC */ -#define DMC_PROGRAM(i) _MMIO(0x80000 + (i) * 4) +#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4) #define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0 #define DMC_HTP_ADDR_SKL 0x00500034 #define DMC_SSP_BASE _MMIO(0x8F074) @@ -8107,6 +8110,7 @@ enum { # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) #define CHICKEN_PAR1_1 _MMIO(0x42080) +#define IGNORE_KVMR_PIPE_A REG_BIT(23) #define KBL_ARB_FILL_SPARE_22 REG_BIT(22) #define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16) #define SKL_DE_COMPRESSED_HASH_MODE (1 << 15) @@ -10365,6 +10369,14 @@ enum skl_power_gate { #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) /* See DP_MSA_MISC_* for the bit definitions */ +#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C +#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C +#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C +#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C +#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(tran, _TRANS_A_SET_CONTEXT_LATENCY) +#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0) +#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x)) + /* LCPLL Control */ #define LCPLL_CTL _MMIO(0x130040) #define LCPLL_PLL_DISABLE (1 << 31) diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 50fdea84ba70..879b0f007be3 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -484,8 +484,7 @@ static int gen11_get_dram_info(struct drm_i915_private *i915) static int gen12_get_dram_info(struct drm_i915_private *i915) { - /* Always needed for GEN12+ */ - i915->dram_info.wm_lv_0_adjust_needed = true; + i915->dram_info.wm_lv_0_adjust_needed = false; return icl_pcode_read_mem_global_info(i915); } diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index e6024eb7cca4..12fb5423fd5e 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -149,7 +149,6 @@ intel_memory_region_create(struct drm_i915_private *i915, mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); - INIT_LIST_HEAD(&mem->objects.purgeable); INIT_LIST_HEAD(&mem->reserved); mutex_init(&mem->mm_lock); diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 1f7dac63abb7..c7e635d62e1a 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -101,7 +101,6 @@ struct intel_memory_region { struct { struct mutex lock; /* Protects access to objects */ struct list_head list; - struct list_head purgeable; } objects; size_t chunk_size; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 45fefa0ed160..74a8863b94c2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1370,11 +1370,11 @@ static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state, return true; } -static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) +static int g4x_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->uapi.state); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; int num_active_planes = hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); @@ -1451,20 +1451,21 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) return 0; } -static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) +static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; - struct intel_atomic_state *intel_state = - to_intel_atomic_state(new_crtc_state->uapi.state); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(intel_state, crtc); const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; enum plane_id plane_id; - if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { + if (!new_crtc_state->hw.active || + drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { *intermediate = *optimal; intermediate->cxsr = false; @@ -1890,12 +1891,12 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); } -static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) +static int vlv_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->uapi.state); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; @@ -2095,19 +2096,20 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, #undef VLV_FIFO -static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) +static int vlv_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; - struct intel_atomic_state *intel_state = - to_intel_atomic_state(new_crtc_state->uapi.state); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(intel_state, crtc); const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; int level; - if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { + if (!new_crtc_state->hw.active || + drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { *intermediate = *optimal; intermediate->cxsr = false; @@ -2906,24 +2908,25 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, if (wm[level] == 0) { for (i = level + 1; i <= max_level; i++) wm[i] = 0; + + max_level = level - 1; + break; } } /* - * WaWmMemoryReadLatency:skl+,glk + * WaWmMemoryReadLatency * * punit doesn't take into account the read latency so we need - * to add 2us to the various latency levels we retrieve from the - * punit when level 0 response data us 0us. + * to add proper adjustement to each valid level we retrieve + * from the punit when level 0 response data is 0us. */ if (wm[0] == 0) { - wm[0] += 2; - for (level = 1; level <= max_level; level++) { - if (wm[level] == 0) - break; - wm[level] += 2; - } + u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2; + + for (level = 0; level <= max_level; level++) + wm[level] += adjust; } /* @@ -2934,7 +2937,6 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, */ if (dev_priv->dram_info.wm_lv_0_adjust_needed) wm[0] += 1; - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD); @@ -3144,10 +3146,12 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, } /* Compute new watermarks for the pipe */ -static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) +static int ilk_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_pipe_wm *pipe_wm; struct intel_plane *plane; const struct intel_plane_state *plane_state; @@ -3220,16 +3224,16 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) * state and the new state. These can be programmed to the hardware * immediately. */ -static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; - struct intel_atomic_state *intel_state = - to_intel_atomic_state(newstate->uapi.state); - const struct intel_crtc_state *oldstate = - intel_atomic_get_old_crtc_state(intel_state, intel_crtc); - const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal; +static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; + const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; int level, max_level = ilk_wm_max_level(dev_priv); /* @@ -3237,9 +3241,10 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate) * currently active watermarks to get values that are safe both before * and after the vblank. */ - *a = newstate->wm.ilk.optimal; - if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) || - intel_state->skip_intermediate_wm) + *a = new_crtc_state->wm.ilk.optimal; + if (!new_crtc_state->hw.active || + drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) || + state->skip_intermediate_wm) return 0; a->pipe_enabled |= b->pipe_enabled; @@ -3270,8 +3275,8 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate) * If our intermediate WM are identical to the final WM, then we can * omit the post-vblank programming; only update if it's different. */ - if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0) - newstate->wm.need_postvbl_update = true; + if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) + new_crtc_state->wm.need_postvbl_update = true; return 0; } @@ -3283,12 +3288,12 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, int level, struct intel_wm_level *ret_wm) { - const struct intel_crtc *intel_crtc; + const struct intel_crtc *crtc; ret_wm->enable = true; - for_each_intel_crtc(&dev_priv->drm, intel_crtc) { - const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk; + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct intel_pipe_wm *active = &crtc->wm.active.ilk; const struct intel_wm_level *wm = &active->wm[level]; if (!active->pipe_enabled) @@ -3388,7 +3393,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, enum intel_ddb_partitioning partitioning, struct ilk_wm_values *results) { - struct intel_crtc *intel_crtc; + struct intel_crtc *crtc; int level, wm_lp; results->enable_fbc_wm = merged->fbc_wm_enabled; @@ -3433,9 +3438,9 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, } /* LP0 register values */ - for_each_intel_crtc(&dev_priv->drm, intel_crtc) { - enum pipe pipe = intel_crtc->pipe; - const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk; + for_each_intel_crtc(&dev_priv->drm, crtc) { + enum pipe pipe = crtc->pipe; + const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk; const struct intel_wm_level *r = &pipe_wm->wm[0]; if (drm_WARN_ON(&dev_priv->drm, !r->enable)) diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index 82a6727ede46..27fe0668d094 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -11,6 +11,7 @@ #include "intel_region_ttm.h" +#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */ /** * DOC: TTM support structure * @@ -20,9 +21,6 @@ * i915 GEM regions to TTM memory types and resource managers. */ -/* A Zero-initialized driver for now. We don't have a TTM backend yet. */ -static struct ttm_device_funcs i915_ttm_bo_driver; - /** * intel_region_ttm_device_init - Initialize a TTM device * @dev_priv: Pointer to an i915 device private structure. @@ -33,7 +31,7 @@ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv) { struct drm_device *drm = &dev_priv->drm; - return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver, + return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(), drm->dev, drm->anon_inode->i_mapping, drm->vma_offset_manager, false, false); } @@ -177,6 +175,7 @@ struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem, mem->region.start); } +#ifdef CONFIG_DRM_I915_SELFTEST /** * intel_region_ttm_node_alloc - Allocate memory resources from a region * @mem: The memory region, @@ -224,3 +223,4 @@ intel_region_ttm_node_alloc(struct intel_memory_region *mem, ret = -ENXIO; return ret ? ERR_PTR(ret) : res; } +#endif diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h index 11b0574ab791..e8cf830fda6f 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.h +++ b/drivers/gpu/drm/i915/intel_region_ttm.h @@ -12,6 +12,7 @@ struct drm_i915_private; struct intel_memory_region; struct ttm_resource; +struct ttm_device_funcs; int intel_region_ttm_device_init(struct drm_i915_private *dev_priv); @@ -24,11 +25,15 @@ void intel_region_ttm_fini(struct intel_memory_region *mem); struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem, struct ttm_resource *res); +void intel_region_ttm_node_free(struct intel_memory_region *mem, + struct ttm_resource *node); + +struct ttm_device_funcs *i915_ttm_driver(void); + +#ifdef CONFIG_DRM_I915_SELFTEST struct ttm_resource * intel_region_ttm_node_alloc(struct intel_memory_region *mem, resource_size_t size, unsigned int flags); - -void intel_region_ttm_node_free(struct intel_memory_region *mem, - struct ttm_resource *node); +#endif #endif /* _INTEL_REGION_TTM_H_ */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 1bed8f666048..7178bc6f8556 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1929,7 +1929,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) return -ENODEV; } - if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) + if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915)) uncore->flags |= UNCORE_HAS_FORCEWAKE; if (!intel_uncore_has_forcewake(uncore)) { diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.c b/drivers/gpu/drm/i915/selftests/igt_mmap.c index 583a4ff8b8c9..e920a461bd36 100644 --- a/drivers/gpu/drm/i915/selftests/igt_mmap.c +++ b/drivers/gpu/drm/i915/selftests/igt_mmap.c @@ -9,15 +9,28 @@ #include "i915_drv.h" #include "igt_mmap.h" -unsigned long igt_mmap_node(struct drm_i915_private *i915, - struct drm_vma_offset_node *node, - unsigned long addr, - unsigned long prot, - unsigned long flags) +unsigned long igt_mmap_offset(struct drm_i915_private *i915, + u64 offset, + unsigned long size, + unsigned long prot, + unsigned long flags) { + struct drm_vma_offset_node *node; struct file *file; + unsigned long addr; int err; + /* no need to refcount, we own this object */ + drm_vma_offset_lock_lookup(i915->drm.vma_offset_manager); + node = drm_vma_offset_exact_lookup_locked(i915->drm.vma_offset_manager, + offset / PAGE_SIZE, size / PAGE_SIZE); + drm_vma_offset_unlock_lookup(i915->drm.vma_offset_manager); + + if (GEM_WARN_ON(!node)) { + pr_info("Failed to lookup %llx\n", offset); + return -ENOENT; + } + /* Pretend to open("/dev/dri/card0") */ file = mock_drm_getfile(i915->drm.primary, O_RDWR); if (IS_ERR(file)) @@ -29,7 +42,7 @@ unsigned long igt_mmap_node(struct drm_i915_private *i915, goto out_file; } - addr = vm_mmap(file, addr, drm_vma_node_size(node) << PAGE_SHIFT, + addr = vm_mmap(file, 0, drm_vma_node_size(node) << PAGE_SHIFT, prot, flags, drm_vma_node_offset_addr(node)); drm_vma_node_revoke(node, file->private_data); diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.h b/drivers/gpu/drm/i915/selftests/igt_mmap.h index 6e716cb59d7e..acbe34d81a6d 100644 --- a/drivers/gpu/drm/i915/selftests/igt_mmap.h +++ b/drivers/gpu/drm/i915/selftests/igt_mmap.h @@ -7,13 +7,15 @@ #ifndef IGT_MMAP_H #define IGT_MMAP_H +#include <linux/types.h> + struct drm_i915_private; struct drm_vma_offset_node; -unsigned long igt_mmap_node(struct drm_i915_private *i915, - struct drm_vma_offset_node *node, - unsigned long addr, - unsigned long prot, - unsigned long flags); +unsigned long igt_mmap_offset(struct drm_i915_private *i915, + u64 offset, + unsigned long size, + unsigned long prot, + unsigned long flags); #endif /* IGT_MMAP_H */ diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c index 37ae68a7fba5..9b84df34a6a1 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-kms.c +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c @@ -93,11 +93,8 @@ static int dcss_kms_bridge_connector_init(struct dcss_kms_dev *kms) ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); - if (ret < 0) { - dev_err(ddev->dev, "Unable to attach bridge %pOF\n", - bridge->of_node); + if (ret < 0) return ret; - } kms->connector = drm_bridge_connector_init(ddev, encoder); if (IS_ERR(kms->connector)) { @@ -133,8 +130,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss) if (ret) goto cleanup_mode_config; - drm->irq_enabled = true; - ret = dcss_kms_bridge_connector_init(kms); if (ret) goto cleanup_mode_config; @@ -178,7 +173,6 @@ void dcss_kms_detach(struct dcss_kms_dev *kms) drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); drm_crtc_vblank_off(&kms->crtc.base); - drm->irq_enabled = false; drm_mode_config_cleanup(drm); dcss_crtc_deinit(&kms->crtc, drm); drm->dev_private = NULL; diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c index 044d3bdf313c..ac45d54acd4e 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-plane.c +++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c @@ -361,7 +361,6 @@ static void dcss_plane_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = dcss_plane_atomic_check, .atomic_update = dcss_plane_atomic_update, .atomic_disable = dcss_plane_atomic_disable, diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 76819a8ac37f..9558e9e1b431 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -208,17 +208,6 @@ static int imx_drm_bind(struct device *dev) return PTR_ERR(drm); /* - * enable drm irq mode. - * - with irq_enabled = true, we can use the vblank feature. - * - * P.S. note that we wouldn't use drm irq handler but - * just specific driver own one instead because - * drm framework supports only one irq handler and - * drivers can well take care of their interrupts - */ - drm->irq_enabled = true; - - /* * set max width and height as default value(4096x4096). * this value would be used to check framebuffer size limitation * at drm_mode_addfb(). diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 53132ddf9587..e5078d03020d 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -465,10 +465,8 @@ static int imx_ldb_register(struct drm_device *drm, if (imx_ldb_ch->bridge) { ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0); - if (ret) { - DRM_ERROR("Failed to initialize bridge with drm\n"); + if (ret) return ret; - } } else { /* * We want to add the connector whenever there is no bridge diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 8710f55d2579..ef114b6aa691 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -772,7 +772,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = ipu_plane_atomic_check, .atomic_disable = ipu_plane_atomic_disable, .atomic_update = ipu_plane_atomic_update, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index e0412e694fd9..a8aba0141ce7 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -294,11 +294,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) if (imxpd->next_bridge) { ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0); - if (ret < 0) { - dev_err(imxpd->dev, "failed to attach bridge: %d\n", - ret); + if (ret < 0) return ret; - } } else { drm_connector_helper_add(connector, &imx_pd_connector_helper_funcs); diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 5244f4763477..d261f7a03b18 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -33,7 +33,6 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_irq.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> @@ -799,8 +798,6 @@ static const struct drm_driver ingenic_drm_driver_data = { .fops = &ingenic_drm_fops, .gem_create_object = ingenic_drm_gem_create_object, DRM_GEM_CMA_DRIVER_OPS, - - .irq_handler = ingenic_drm_irq_handler, }; static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = { @@ -830,7 +827,6 @@ static const struct drm_plane_helper_funcs ingenic_drm_plane_helper_funcs = { .atomic_update = ingenic_drm_plane_atomic_update, .atomic_check = ingenic_drm_plane_atomic_check, .atomic_disable = ingenic_drm_plane_atomic_disable, - .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static const struct drm_crtc_helper_funcs ingenic_drm_crtc_helper_funcs = { @@ -1085,10 +1081,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) drm_encoder_helper_add(encoder, &ingenic_drm_encoder_helper_funcs); ret = drm_bridge_attach(encoder, bridge, NULL, 0); - if (ret) { - dev_err(dev, "Unable to attach bridge\n"); + if (ret) return ret; - } } drm_for_each_encoder(encoder, drm) { @@ -1099,7 +1093,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) encoder->possible_clones = clone_mask; } - ret = drm_irq_install(drm, irq); + ret = devm_request_irq(dev, irq, ingenic_drm_irq_handler, 0, drm->driver->name, drm); if (ret) { dev_err(dev, "Unable to install IRQ handler\n"); return ret; diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index 61b6d9fdbba1..aeb8a757d213 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -625,7 +625,6 @@ static const struct drm_plane_helper_funcs ingenic_ipu_plane_helper_funcs = { .atomic_update = ingenic_ipu_plane_atomic_update, .atomic_check = ingenic_ipu_plane_atomic_check, .atomic_disable = ingenic_ipu_plane_atomic_disable, - .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static int diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c index 231041b269f5..1793cd31b117 100644 --- a/drivers/gpu/drm/kmb/kmb_dsi.c +++ b/drivers/gpu/drm/kmb/kmb_dsi.c @@ -1441,7 +1441,6 @@ int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi) ret = drm_bridge_attach(encoder, adv_bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) { - DRM_ERROR("failed to attach bridge to MIPI\n"); drm_encoder_cleanup(encoder); return ret; } diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index ecf3267334ff..dba8329937a3 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -508,7 +508,8 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) INIT_WORK(&pipe->recover_work, lima_sched_recover_work); return drm_sched_init(&pipe->base, &lima_sched_ops, 1, - lima_job_hang_limit, msecs_to_jiffies(timeout), + lima_job_hang_limit, + msecs_to_jiffies(timeout), NULL, NULL, name); } diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 4ddc55d58f38..ce12a36e2db4 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -1479,7 +1479,6 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = { .update = mcde_display_update, .enable_vblank = mcde_display_enable_vblank, .disable_vblank = mcde_display_disable_vblank, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; int mcde_display_init(struct drm_device *drm) diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index 34a00d7e9c38..180ebbccbeda 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -760,7 +760,7 @@ static void mcde_dsi_start(struct mcde_dsi *d) DSI_MCTL_MAIN_DATA_CTL_BTA_EN | DSI_MCTL_MAIN_DATA_CTL_READ_EN | DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN; - if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)) + if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)) val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN; writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL); @@ -1052,7 +1052,6 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge, { struct mcde_dsi *d = bridge_to_mcde_dsi(bridge); struct drm_device *drm = bridge->dev; - int ret; if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) { dev_err(d->dev, "we need atomic updates\n"); @@ -1060,13 +1059,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge, } /* Attach the DSI bridge to the output (panel etc) bridge */ - ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags); - if (ret) { - dev_err(d->dev, "failed to attach the DSI bridge\n"); - return ret; - } - - return 0; + return drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags); } static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = { diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index bced555648b0..96d35770c844 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -714,10 +714,8 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data) ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); - if (ret) { - dev_err(dev, "Failed to attach bridge: %d\n", ret); + if (ret) goto err_cleanup; - } dpi->connector = drm_bridge_connector_init(drm_dev, &dpi->encoder); if (IS_ERR(dpi->connector)) { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index b46bdb8985da..9b60bec33d3b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -270,12 +270,6 @@ static int mtk_drm_kms_init(struct drm_device *drm) goto err_component_unbind; } - /* - * We don't use the drm_irq_install() helpers provided by the DRM - * core, so we need to set this manually in order to allow the - * DRM_IOCTL_WAIT_VBLANK to operate correctly. - */ - drm->irq_enabled = true; ret = drm_vblank_init(drm, MAX_CRTC); if (ret < 0) goto err_component_unbind; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index b5582dcf564c..1667a7e7de38 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -227,7 +227,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = mtk_plane_atomic_check, .atomic_update = mtk_plane_atomic_update, .atomic_disable = mtk_plane_atomic_disable, diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index ae403c67cbd9..93b40c245f00 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -404,7 +404,7 @@ static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi) if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) tmp_reg |= HSTX_CKLP_EN; - if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)) + if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)) tmp_reg |= DIS_EOT; writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL); @@ -481,7 +481,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) timing->da_hs_zero + timing->da_hs_exit + 3; delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12; - delta += dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET ? 2 : 0; + delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 2 : 0; horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index c1651a83700d..5838c44cbf6f 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1293,11 +1293,8 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge, if (hdmi->next_bridge) { ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge, bridge, flags); - if (ret) { - dev_err(hdmi->dev, - "Failed to attach external bridge: %d\n", ret); + if (ret) return ret; - } } mtk_cec_set_hpd_event(hdmi->cec_dev, mtk_hdmi_hpd_event, hdmi->dev); diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index a7388bf7c838..bc0d60df04ae 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -21,7 +21,6 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_irq.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -94,9 +93,6 @@ DEFINE_DRM_GEM_CMA_FOPS(fops); static const struct drm_driver meson_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - /* IRQ */ - .irq_handler = meson_irq, - /* CMA Ops */ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create), @@ -285,7 +281,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) * Remove early framebuffers (ie. simplefb). The framebuffer can be * located anywhere in RAM */ - ret = drm_aperture_remove_framebuffers(false, "meson-drm-fb"); + ret = drm_aperture_remove_framebuffers(false, &meson_driver); if (ret) goto free_drm; @@ -335,7 +331,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (ret) goto free_drm; - ret = drm_irq_install(drm, priv->vsync_irq); + ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm); if (ret) goto free_drm; @@ -354,7 +350,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) return 0; uninstall_irq: - drm_irq_uninstall(drm); + free_irq(priv->vsync_irq, drm); free_drm: drm_dev_put(drm); @@ -382,7 +378,7 @@ static void meson_drv_unbind(struct device *dev) drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); component_unbind_all(dev, drm); - drm_irq_uninstall(drm); + free_irq(priv->vsync_irq, drm); drm_dev_put(drm); if (priv->afbcd.ops) { diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c index ed063152aecd..dfef8afcc245 100644 --- a/drivers/gpu/drm/meson/meson_overlay.c +++ b/drivers/gpu/drm/meson/meson_overlay.c @@ -747,7 +747,6 @@ static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = { .atomic_check = meson_overlay_atomic_check, .atomic_disable = meson_overlay_atomic_disable, .atomic_update = meson_overlay_atomic_update, - .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static bool meson_overlay_format_mod_supported(struct drm_plane *plane, diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index a18510dae4c8..8640a8a8a469 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -422,7 +422,6 @@ static const struct drm_plane_helper_funcs meson_plane_helper_funcs = { .atomic_check = meson_plane_atomic_check, .atomic_disable = meson_plane_atomic_disable, .atomic_update = meson_plane_atomic_update, - .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static bool meson_plane_format_mod_supported(struct drm_plane *plane, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index a701d9563257..6b9243713b3c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -262,17 +262,26 @@ static void mgag200_g200se_init_unique_id(struct mga_device *mdev) mdev->model.g200se.unique_rev_id); } -static int mgag200_device_init(struct mga_device *mdev, unsigned long flags) +static struct mga_device * +mgag200_device_create(struct pci_dev *pdev, enum mga_type type, unsigned long flags) { - struct drm_device *dev = &mdev->base; + struct mga_device *mdev; + struct drm_device *dev; int ret; - mdev->flags = mgag200_flags_from_driver_data(flags); - mdev->type = mgag200_type_from_driver_data(flags); + mdev = devm_drm_dev_alloc(&pdev->dev, &mgag200_driver, struct mga_device, base); + if (IS_ERR(mdev)) + return mdev; + dev = &mdev->base; + + pci_set_drvdata(pdev, dev); + + mdev->flags = flags; + mdev->type = type; ret = mgag200_regs_init(mdev); if (ret) - return ret; + return ERR_PTR(ret); if (mdev->type == G200_PCI || mdev->type == G200_AGP) mgag200_g200_init_refclk(mdev); @@ -281,33 +290,9 @@ static int mgag200_device_init(struct mga_device *mdev, unsigned long flags) ret = mgag200_mm_init(mdev); if (ret) - return ret; + return ERR_PTR(ret); ret = mgag200_modeset_init(mdev); - if (ret) { - drm_err(dev, "Fatal error during modeset init: %d\n", ret); - return ret; - } - - return 0; -} - -static struct mga_device * -mgag200_device_create(struct pci_dev *pdev, unsigned long flags) -{ - struct drm_device *dev; - struct mga_device *mdev; - int ret; - - mdev = devm_drm_dev_alloc(&pdev->dev, &mgag200_driver, - struct mga_device, base); - if (IS_ERR(mdev)) - return mdev; - dev = &mdev->base; - - pci_set_drvdata(pdev, dev); - - ret = mgag200_device_init(mdev, flags); if (ret) return ERR_PTR(ret); @@ -335,14 +320,27 @@ static const struct pci_device_id mgag200_pciidlist[] = { MODULE_DEVICE_TABLE(pci, mgag200_pciidlist); +static enum mga_type mgag200_type_from_driver_data(kernel_ulong_t driver_data) +{ + return (enum mga_type)(driver_data & MGAG200_TYPE_MASK); +} + +static unsigned long mgag200_flags_from_driver_data(kernel_ulong_t driver_data) +{ + return driver_data & MGAG200_FLAG_MASK; +} + static int mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + kernel_ulong_t driver_data = ent->driver_data; + enum mga_type type = mgag200_type_from_driver_data(driver_data); + unsigned long flags = mgag200_flags_from_driver_data(driver_data); struct mga_device *mdev; struct drm_device *dev; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &mgag200_driver); if (ret) return ret; @@ -350,12 +348,12 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - mdev = mgag200_device_create(pdev, ent->driver_data); + mdev = mgag200_device_create(pdev, type, flags); if (IS_ERR(mdev)) return PTR_ERR(mdev); dev = &mdev->base; - ret = drm_dev_register(dev, ent->driver_data); + ret = drm_dev_register(dev, 0); if (ret) return ret; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 749a075fe9e4..f7a0537c0d0a 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -166,8 +166,6 @@ struct mga_device { enum mga_type type; - int bpp_shifts[4]; - int fb_mtrr; union { @@ -192,18 +190,6 @@ static inline struct mga_device *to_mga_device(struct drm_device *dev) return container_of(dev, struct mga_device, base); } -static inline enum mga_type -mgag200_type_from_driver_data(kernel_ulong_t driver_data) -{ - return (enum mga_type)(driver_data & MGAG200_TYPE_MASK); -} - -static inline unsigned long -mgag200_flags_from_driver_data(kernel_ulong_t driver_data) -{ - return driver_data & MGAG200_FLAG_MASK; -} - /* mgag200_mode.c */ int mgag200_modeset_init(struct mga_device *mdev); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 9d576240faed..3b3059f471c2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1137,10 +1137,11 @@ static void mgag200_set_mode_regs(struct mga_device *mdev, WREG8(MGA_MISC_OUT, misc); } -static u8 mgag200_get_bpp_shift(struct mga_device *mdev, - const struct drm_format_info *format) +static u8 mgag200_get_bpp_shift(const struct drm_format_info *format) { - return mdev->bpp_shifts[format->cpp[0] - 1]; + static const u8 bpp_shift[] = {0, 1, 0, 2}; + + return bpp_shift[format->cpp[0] - 1]; } /* @@ -1152,7 +1153,7 @@ static u32 mgag200_calculate_offset(struct mga_device *mdev, const struct drm_framebuffer *fb) { u32 offset = fb->pitches[0] / fb->format->cpp[0]; - u8 bppshift = mgag200_get_bpp_shift(mdev, fb->format); + u8 bppshift = mgag200_get_bpp_shift(fb->format); if (fb->format->cpp[0] * 8 == 24) offset = (offset * 3) >> (4 - bppshift); @@ -1189,7 +1190,7 @@ static void mgag200_set_format_regs(struct mga_device *mdev, bpp = format->cpp[0] * 8; - bppshift = mgag200_get_bpp_shift(mdev, format); + bppshift = mgag200_get_bpp_shift(format); switch (bpp) { case 24: scale = ((1 << bppshift) * 3) - 1; @@ -1699,11 +1700,6 @@ int mgag200_modeset_init(struct mga_device *mdev) size_t format_count = ARRAY_SIZE(mgag200_simple_display_pipe_formats); int ret; - mdev->bpp_shifts[0] = 0; - mdev->bpp_shifts[1] = 1; - mdev->bpp_shifts[2] = 0; - mdev->bpp_shifts[3] = 2; - mgag200_init_regs(mdev); ret = drmm_mode_config_init(dev); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index ed504fe5074f..b466a4af7c3e 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -849,11 +849,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, if (flags & MIPI_DSI_MODE_VIDEO) { if (flags & MIPI_DSI_MODE_VIDEO_HSE) data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE; - if (flags & MIPI_DSI_MODE_VIDEO_HFP) + if (flags & MIPI_DSI_MODE_VIDEO_NO_HFP) data |= DSI_VID_CFG0_HFP_POWER_STOP; - if (flags & MIPI_DSI_MODE_VIDEO_HBP) + if (flags & MIPI_DSI_MODE_VIDEO_NO_HBP) data |= DSI_VID_CFG0_HBP_POWER_STOP; - if (flags & MIPI_DSI_MODE_VIDEO_HSA) + if (flags & MIPI_DSI_MODE_VIDEO_NO_HSA) data |= DSI_VID_CFG0_HSA_POWER_STOP; /* Always set low power stop mode for BLLP * to let command engine send packets @@ -908,7 +908,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK); data = 0; - if (!(flags & MIPI_DSI_MODE_EOT_PACKET)) + if (!(flags & MIPI_DSI_MODE_NO_EOT_PACKET)) data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data); diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 227404077e39..67fae60f2fa5 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -169,7 +169,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) } /* the fw fb could be anywhere in memory */ - ret = drm_aperture_remove_framebuffers(false, "msm"); + ret = drm_aperture_remove_framebuffers(false, dev->driver); if (ret) goto fini; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index fc25a85eb1ca..fdc5367aecaa 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -665,9 +665,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, break; } - post_deps[i].chain = - kmalloc(sizeof(*post_deps[i].chain), - GFP_KERNEL); + post_deps[i].chain = dma_fence_chain_alloc(); if (!post_deps[i].chain) { ret = -ENOMEM; break; @@ -684,7 +682,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, if (ret) { for (j = 0; j <= i; ++j) { - kfree(post_deps[j].chain); + dma_fence_chain_free(post_deps[j].chain); if (post_deps[j].syncobj) drm_syncobj_put(post_deps[j].syncobj); } diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 793dd853b799..bd54c1412649 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -101,7 +101,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ret = drm_sched_init(&ring->sched, &msm_sched_ops, num_hw_submissions, 0, sched_timeout, - NULL, to_msm_bo(ring->bo)->name); + NULL, NULL, to_msm_bo(ring->bo)->name); if (ret) { goto fail; } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 6da93551e2e5..c277d3f61a5e 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -51,6 +51,7 @@ static const struct mxsfb_devdata mxsfb_devdata[] = { .hs_wdth_mask = 0xff, .hs_wdth_shift = 24, .has_overlay = false, + .has_ctrl2 = false, }, [MXSFB_V4] = { .transfer_count = LCDC_V4_TRANSFER_COUNT, @@ -59,6 +60,7 @@ static const struct mxsfb_devdata mxsfb_devdata[] = { .hs_wdth_mask = 0x3fff, .hs_wdth_shift = 18, .has_overlay = false, + .has_ctrl2 = true, }, [MXSFB_V6] = { .transfer_count = LCDC_V4_TRANSFER_COUNT, @@ -67,6 +69,7 @@ static const struct mxsfb_devdata mxsfb_devdata[] = { .hs_wdth_mask = 0x3fff, .hs_wdth_shift = 18, .has_overlay = true, + .has_ctrl2 = true, }, }; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h index 399d23e91ed1..7c720e226fdf 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h @@ -22,6 +22,7 @@ struct mxsfb_devdata { unsigned int hs_wdth_mask; unsigned int hs_wdth_shift; bool has_overlay; + bool has_ctrl2; }; struct mxsfb_drm_private { diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 300e7bab0f43..89dd618d78f3 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -47,16 +47,13 @@ static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) * Setup the MXSFB registers for decoding the pixels out of the framebuffer and * outputting them on the bus. */ -static void mxsfb_set_formats(struct mxsfb_drm_private *mxsfb) +static void mxsfb_set_formats(struct mxsfb_drm_private *mxsfb, + const u32 bus_format) { struct drm_device *drm = mxsfb->drm; const u32 format = mxsfb->crtc.primary->state->fb->format->format; - u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; u32 ctrl, ctrl1; - if (mxsfb->connector->display_info.num_bus_formats) - bus_format = mxsfb->connector->display_info.bus_formats[0]; - DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n", bus_format); @@ -107,6 +104,14 @@ static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb) clk_prepare_enable(mxsfb->clk_disp_axi); clk_prepare_enable(mxsfb->clk); + /* Increase number of outstanding requests on all supported IPs */ + if (mxsfb->devdata->has_ctrl2) { + reg = readl(mxsfb->base + LCDC_V4_CTRL2); + reg &= ~CTRL2_SET_OUTSTANDING_REQS_MASK; + reg |= CTRL2_SET_OUTSTANDING_REQS_16; + writel(reg, mxsfb->base + LCDC_V4_CTRL2); + } + /* If it was disabled, re-enable the mode again */ writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_SET); @@ -115,6 +120,35 @@ static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb) reg |= VDCTRL4_SYNC_SIGNALS_ON; writel(reg, mxsfb->base + LCDC_VDCTRL4); + /* + * Enable recovery on underflow. + * + * There is some sort of corner case behavior of the controller, + * which could rarely be triggered at least on i.MX6SX connected + * to 800x480 DPI panel and i.MX8MM connected to DPI->DSI->LVDS + * bridged 1920x1080 panel (and likely on other setups too), where + * the image on the panel shifts to the right and wraps around. + * This happens either when the controller is enabled on boot or + * even later during run time. The condition does not correct + * itself automatically, i.e. the display image remains shifted. + * + * It seems this problem is known and is due to sporadic underflows + * of the LCDIF FIFO. While the LCDIF IP does have underflow/overflow + * IRQs, neither of the IRQs trigger and neither IRQ status bit is + * asserted when this condition occurs. + * + * All known revisions of the LCDIF IP have CTRL1 RECOVER_ON_UNDERFLOW + * bit, which is described in the reference manual since i.MX23 as + * " + * Set this bit to enable the LCDIF block to recover in the next + * field/frame if there was an underflow in the current field/frame. + * " + * Enable this bit to mitigate the sporadic underflows. + */ + reg = readl(mxsfb->base + LCDC_CTRL1); + reg |= CTRL1_RECOVER_ON_UNDERFLOW; + writel(reg, mxsfb->base + LCDC_CTRL1); + writel(CTRL_RUN, mxsfb->base + LCDC_CTRL + REG_SET); } @@ -185,7 +219,8 @@ static dma_addr_t mxsfb_get_fb_paddr(struct drm_plane *plane) return gem->paddr; } -static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) +static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb, + const u32 bus_format) { struct drm_device *drm = mxsfb->crtc.dev; struct drm_display_mode *m = &mxsfb->crtc.state->adjusted_mode; @@ -206,11 +241,14 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) /* Clear the FIFOs */ writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET); + readl(mxsfb->base + LCDC_CTRL1); + writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_CLR); + readl(mxsfb->base + LCDC_CTRL1); if (mxsfb->devdata->has_overlay) writel(0, mxsfb->base + LCDC_AS_CTRL); - mxsfb_set_formats(mxsfb); + mxsfb_set_formats(mxsfb, bus_format); clk_set_rate(mxsfb->clk, m->crtc_clock * 1000); @@ -308,7 +346,9 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev); + struct drm_bridge_state *bridge_state; struct drm_device *drm = mxsfb->drm; + u32 bus_format = 0; dma_addr_t paddr; pm_runtime_get_sync(drm->dev); @@ -316,7 +356,23 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc, drm_crtc_vblank_on(crtc); - mxsfb_crtc_mode_set_nofb(mxsfb); + /* If there is a bridge attached to the LCDIF, use its bus format */ + if (mxsfb->bridge) { + bridge_state = + drm_atomic_get_new_bridge_state(state, + mxsfb->bridge); + bus_format = bridge_state->input_bus_cfg.format; + } + + /* If there is no bridge, use bus format from connector */ + if (!bus_format && mxsfb->connector->display_info.num_bus_formats) + bus_format = mxsfb->connector->display_info.bus_formats[0]; + + /* If all else fails, default to RGB888_1X24 */ + if (!bus_format) + bus_format = MEDIA_BUS_FMT_RGB888_1X24; + + mxsfb_crtc_mode_set_nofb(mxsfb, bus_format); /* Write cur_buf as well to avoid an initial corrupt frame */ paddr = mxsfb_get_fb_paddr(crtc->primary); @@ -500,13 +556,11 @@ static bool mxsfb_format_mod_supported(struct drm_plane *plane, } static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_primary_atomic_update, }; static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_overlay_atomic_update, }; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h index 55d28a27f912..694fea13e893 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h +++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h @@ -15,6 +15,7 @@ #define LCDC_CTRL 0x00 #define LCDC_CTRL1 0x10 #define LCDC_V3_TRANSFER_COUNT 0x20 +#define LCDC_V4_CTRL2 0x20 #define LCDC_V4_TRANSFER_COUNT 0x30 #define LCDC_V4_CUR_BUF 0x40 #define LCDC_V4_NEXT_BUF 0x50 @@ -54,12 +55,20 @@ #define CTRL_DF24 BIT(1) #define CTRL_RUN BIT(0) +#define CTRL1_RECOVER_ON_UNDERFLOW BIT(24) #define CTRL1_FIFO_CLEAR BIT(21) #define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16) #define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf) #define CTRL1_CUR_FRAME_DONE_IRQ_EN BIT(13) #define CTRL1_CUR_FRAME_DONE_IRQ BIT(9) +#define CTRL2_SET_OUTSTANDING_REQS_1 0 +#define CTRL2_SET_OUTSTANDING_REQS_2 (0x1 << 21) +#define CTRL2_SET_OUTSTANDING_REQS_4 (0x2 << 21) +#define CTRL2_SET_OUTSTANDING_REQS_8 (0x3 << 21) +#define CTRL2_SET_OUTSTANDING_REQS_16 (0x4 << 21) +#define CTRL2_SET_OUTSTANDING_REQS_MASK (0x7 << 21) + #define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16) #define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff) #define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index f949767698fc..093e1f7163b3 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -31,6 +31,7 @@ #include <linux/dma-mapping.h> #include <linux/hdmi.h> #include <linux/component.h> +#include <linux/iopoll.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -1649,15 +1650,30 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head, core->func->sor->ctrl(core, nv_encoder->or, nv_encoder->ctrl, asyh); } +/* TODO: Should we extend this to PWM-only backlights? + * As well, should we add a DRM helper for waiting for the backlight to acknowledge + * the panel backlight has been shut off? Intel doesn't seem to do this, and uses a + * fixed time delay from the vbios… + */ static void nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder); + struct nouveau_backlight *backlight = nv_connector->backlight; struct drm_dp_aux *aux = &nv_connector->aux; + int ret; u8 pwr; + if (backlight && backlight->uses_dpcd) { + ret = drm_edp_backlight_disable(aux, &backlight->edp_info); + if (ret < 0) + NV_ERROR(drm, "Failed to disable backlight on [CONNECTOR:%d:%s]: %d\n", + nv_connector->base.base.id, nv_connector->base.name, ret); + } + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr); @@ -1696,6 +1712,9 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_connector *nv_connector; +#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT + struct nouveau_backlight *backlight; +#endif struct nvbios *bios = &drm->vbios; bool hda = false; u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM; @@ -1770,6 +1789,14 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B; nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode); + +#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT + backlight = nv_connector->backlight; + if (backlight && backlight->uses_dpcd) + drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info, + (u16)backlight->dev->props.brightness); +#endif + break; default: BUG(); @@ -2295,6 +2322,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) nv50_crc_atomic_start_reporting(state); if (!flushed) nv50_crc_atomic_release_notifier_contexts(state); + drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_helper_commit_cleanup_done(state); diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 72f35a2babcb..1cbd71abc80a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -42,11 +42,6 @@ static struct ida bl_ida; #define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0' -struct nouveau_backlight { - struct backlight_device *dev; - int id; -}; - static bool nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], struct nouveau_backlight *bl) @@ -148,6 +143,98 @@ static const struct backlight_ops nv50_bl_ops = { .update_status = nv50_set_intensity, }; +/* + * eDP brightness callbacks need to happen under lock, since we need to + * enable/disable the backlight ourselves for modesets + */ +static int +nv50_edp_get_brightness(struct backlight_device *bd) +{ + struct drm_connector *connector = dev_get_drvdata(bd->dev.parent); + struct drm_device *dev = connector->dev; + struct drm_crtc *crtc; + struct drm_modeset_acquire_ctx ctx; + int ret = 0; + + drm_modeset_acquire_init(&ctx, 0); + +retry: + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); + if (ret == -EDEADLK) + goto deadlock; + else if (ret < 0) + goto out; + + crtc = connector->state->crtc; + if (!crtc) + goto out; + + ret = drm_modeset_lock(&crtc->mutex, &ctx); + if (ret == -EDEADLK) + goto deadlock; + else if (ret < 0) + goto out; + + if (!crtc->state->active) + goto out; + + ret = bd->props.brightness; +out: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return ret; +deadlock: + drm_modeset_backoff(&ctx); + goto retry; +} + +static int +nv50_edp_set_brightness(struct backlight_device *bd) +{ + struct drm_connector *connector = dev_get_drvdata(bd->dev.parent); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_device *dev = connector->dev; + struct drm_crtc *crtc; + struct drm_dp_aux *aux = &nv_connector->aux; + struct nouveau_backlight *nv_bl = nv_connector->backlight; + struct drm_modeset_acquire_ctx ctx; + int ret = 0; + + drm_modeset_acquire_init(&ctx, 0); +retry: + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); + if (ret == -EDEADLK) + goto deadlock; + else if (ret < 0) + goto out; + + crtc = connector->state->crtc; + if (!crtc) + goto out; + + ret = drm_modeset_lock(&crtc->mutex, &ctx); + if (ret == -EDEADLK) + goto deadlock; + else if (ret < 0) + goto out; + + if (crtc->state->active) + ret = drm_edp_backlight_set_level(aux, &nv_bl->edp_info, bd->props.brightness); + +out: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return ret; +deadlock: + drm_modeset_backoff(&ctx); + goto retry; +} + +static const struct backlight_ops nv50_edp_bl_ops = { + .get_brightness = nv50_edp_get_brightness, + .update_status = nv50_edp_set_brightness, +}; + static int nva3_get_intensity(struct backlight_device *bd) { @@ -194,8 +281,13 @@ static const struct backlight_ops nva3_bl_ops = { .update_status = nva3_set_intensity, }; +/* FIXME: perform backlight probing for eDP _before_ this, this only gets called after connector + * registration which happens after the initial modeset + */ static int -nv50_backlight_init(struct nouveau_encoder *nv_encoder, +nv50_backlight_init(struct nouveau_backlight *bl, + struct nouveau_connector *nv_conn, + struct nouveau_encoder *nv_encoder, struct backlight_properties *props, const struct backlight_ops **ops) { @@ -205,6 +297,41 @@ nv50_backlight_init(struct nouveau_encoder *nv_encoder, if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) return -ENODEV; + if (nv_conn->type == DCB_CONNECTOR_eDP) { + int ret; + u16 current_level; + u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + u8 current_mode; + + ret = drm_dp_dpcd_read(&nv_conn->aux, DP_EDP_DPCD_REV, edp_dpcd, + EDP_DISPLAY_CTL_CAP_SIZE); + if (ret < 0) + return ret; + + if (drm_edp_backlight_supported(edp_dpcd)) { + NV_DEBUG(drm, "DPCD backlight controls supported on %s\n", + nv_conn->base.name); + + ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info, 0, edp_dpcd, + ¤t_level, ¤t_mode); + if (ret < 0) + return ret; + + ret = drm_edp_backlight_enable(&nv_conn->aux, &bl->edp_info, current_level); + if (ret < 0) { + NV_ERROR(drm, "Failed to enable backlight on %s: %d\n", + nv_conn->base.name, ret); + return ret; + } + + *ops = &nv50_edp_bl_ops; + props->brightness = current_level; + props->max_brightness = bl->edp_info.max; + bl->uses_dpcd = true; + return 0; + } + } + if (drm->client.device.info.chipset <= 0xa0 || drm->client.device.info.chipset == 0xaa || drm->client.device.info.chipset == 0xac) @@ -245,6 +372,10 @@ nouveau_backlight_init(struct drm_connector *connector) if (!nv_encoder) return 0; + bl = kzalloc(sizeof(*bl), GFP_KERNEL); + if (!bl) + return -ENOMEM; + switch (device->info.family) { case NV_DEVICE_INFO_V0_CURIE: ret = nv40_backlight_init(nv_encoder, &props, &ops); @@ -257,20 +388,19 @@ nouveau_backlight_init(struct drm_connector *connector) case NV_DEVICE_INFO_V0_VOLTA: case NV_DEVICE_INFO_V0_TURING: case NV_DEVICE_INFO_V0_AMPERE: //XXX: not confirmed - ret = nv50_backlight_init(nv_encoder, &props, &ops); + ret = nv50_backlight_init(bl, nouveau_connector(connector), + nv_encoder, &props, &ops); break; default: - return 0; + ret = 0; + goto fail_alloc; } - if (ret == -ENODEV) - return 0; - else if (ret) - return ret; - - bl = kzalloc(sizeof(*bl), GFP_KERNEL); - if (!bl) - return -ENOMEM; + if (ret) { + if (ret == -ENODEV) + ret = 0; + goto fail_alloc; + } if (!nouveau_get_backlight_name(backlight_name, bl)) { NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n"); @@ -287,7 +417,9 @@ nouveau_backlight_init(struct drm_connector *connector) } nouveau_connector(connector)->backlight = bl; - bl->dev->props.brightness = bl->dev->ops->get_brightness(bl->dev); + if (!bl->dev->props.brightness) + bl->dev->props.brightness = + bl->dev->ops->get_brightness(bl->dev); backlight_update_status(bl->dev); return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index d0b859c4a80e..40f90e353540 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -46,7 +46,14 @@ struct nvkm_i2c_port; struct dcb_output; #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT -struct nouveau_backlight; +struct nouveau_backlight { + struct backlight_device *dev; + + struct drm_edp_backlight_info edp_info; + bool uses_dpcd : 1; + + int id; +}; #endif #define nouveau_conn_atom(p) \ diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index a616cf4573b8..5e1ff870823b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -553,8 +553,6 @@ nouveau_drm_device_init(struct drm_device *dev) if (ret) goto fail_master; - dev->irq_enabled = true; - nvxx_client(&drm->client.base)->debug = nvkm_dbgopt(nouveau_debug, "DRM"); @@ -738,7 +736,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, nvkm_device_del(&device); /* Remove conflicting drivers (vesafb, efifb etc). */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "nouveaufb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci); if (ret) return ret; @@ -795,7 +793,6 @@ nouveau_drm_device_remove(struct drm_device *dev) drm_dev_unregister(dev); - dev->irq_enabled = false; client = nvxx_client(&drm->client.base); device = nvkm_device_find(client->device); diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index 1ffcc0a491fd..77c2fed76e8b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -30,6 +30,7 @@ #include <subdev/bios/dcb.h> #include <drm/drm_encoder_slave.h> +#include <drm/drm_dp_helper.h> #include <drm/drm_dp_mst_helper.h> #include "dispnv04/disp.h" struct nv50_head_atom; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 6b43918035df..05d0b3eb3690 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -358,7 +358,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e fobj = dma_resv_shared_list(resv); fence = dma_resv_excl_fence(resv); - if (fence && (!exclusive || !fobj || !fobj->shared_count)) { + if (fence) { struct nouveau_channel *prev = NULL; bool must_wait = true; diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 7c4b374b3eca..60cd8c0463df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -11,9 +11,9 @@ #include "nouveau_vga.h" static unsigned int -nouveau_vga_set_decode(void *priv, bool state) +nouveau_vga_set_decode(struct pci_dev *pdev, bool state) { - struct nouveau_drm *drm = nouveau_drm(priv); + struct nouveau_drm *drm = nouveau_drm(pci_get_drvdata(pdev)); struct nvif_object *device = &drm->client.device.object; if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE && @@ -94,7 +94,7 @@ nouveau_vga_init(struct nouveau_drm *drm) return; pdev = to_pci_dev(dev->dev); - vga_client_register(pdev, dev, NULL, nouveau_vga_set_decode); + vga_client_register(pdev, nouveau_vga_set_decode); /* don't register Thunderbolt eGPU with vga_switcheroo */ if (pci_is_thunderbolt_attached(pdev)) @@ -118,7 +118,7 @@ nouveau_vga_fini(struct nouveau_drm *drm) return; pdev = to_pci_dev(dev->dev); - vga_client_register(pdev, NULL, NULL, NULL); + vga_client_unregister(pdev); if (pci_is_thunderbolt_attached(pdev)) return; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 8632139e0f01..f86e20578143 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -290,12 +290,8 @@ static int omap_modeset_init(struct drm_device *dev) ret = drm_bridge_attach(pipe->encoder, pipe->output->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); - if (ret < 0) { - dev_err(priv->dev, - "unable to attach bridge %pOF\n", - pipe->output->bridge->of_node); + if (ret < 0) return ret; - } } id = omap_display_id(pipe->output); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index d6f136984da9..591d4c273f02 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -48,6 +48,8 @@ struct omap_drm_private { struct dss_device *dss; struct dispc_device *dispc; + bool irq_enabled; + unsigned int num_pipes; struct omap_drm_pipeline pipes[8]; struct omap_drm_pipeline *channels[8]; diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index 15148d4b35b5..4aca14dab927 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c @@ -253,13 +253,6 @@ static const u32 omap_underflow_irqs[] = { [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, }; -/* - * We need a special version, instead of just using drm_irq_install(), - * because we need to register the irq via omapdss. Once omapdss and - * omapdrm are merged together we can assign the dispc hwmod data to - * ourselves and drop these and just use drm_irq_{install,uninstall}() - */ - int omap_drm_irq_install(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; @@ -291,7 +284,7 @@ int omap_drm_irq_install(struct drm_device *dev) if (ret < 0) return ret; - dev->irq_enabled = true; + priv->irq_enabled = true; return 0; } @@ -300,10 +293,10 @@ void omap_drm_irq_uninstall(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; - if (!dev->irq_enabled) + if (!priv->irq_enabled) return; - dev->irq_enabled = false; + priv->irq_enabled = false; dispc_free_irq(priv->dispc, dev); } diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 801da917507d..512af976b7e9 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -6,6 +6,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> #include "omap_dmm_tiler.h" @@ -29,6 +30,8 @@ static int omap_plane_prepare_fb(struct drm_plane *plane, if (!new_state->fb) return 0; + drm_gem_plane_helper_prepare_fb(plane, new_state); + return omap_framebuffer_pin(new_state->fb); } diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index ef87d92cdf49..6b3eb041182c 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -82,6 +82,7 @@ config DRM_PANEL_SIMPLE depends on BACKLIGHT_CLASS_DEVICE depends on PM select VIDEOMODE_HELPERS + select DRM_DP_AUX_BUS help DRM panel driver for dumb panels that need at most a regulator and a GPIO to be powered up. Optionally a backlight can be attached so @@ -133,6 +134,15 @@ config DRM_PANEL_ILITEK_ILI9881C Say Y if you want to enable support for panels based on the Ilitek ILI9881c controller. +config DRM_PANEL_INNOLUX_EJ030NA + tristate "Innolux EJ030NA 320x480 LCD panel" + depends on OF && SPI + select REGMAP_SPI + help + Say Y here to enable support for the Innolux/Chimei EJ030NA + 320x480 3.0" panel as found in the RS97 V2.1, RG300(non-ips) + and LDK handheld gaming consoles. + config DRM_PANEL_INNOLUX_P079ZCA tristate "Innolux P079ZCA panel" depends on OF @@ -343,6 +353,16 @@ config DRM_PANEL_RONBO_RB070D30 Say Y here if you want to enable support for Ronbo Electronics RB070D30 1024x600 DSI panel. +config DRM_PANEL_SAMSUNG_DB7430 + tristate "Samsung DB7430-based DPI panels" + depends on OF && SPI && GPIOLIB + depends on BACKLIGHT_CLASS_DEVICE + select DRM_MIPI_DBI + help + Say Y here if you want to enable support for the Samsung + DB7430 DPI display controller used in such devices as the + LMS397KF04 480x800 DPI panel. + config DRM_PANEL_SAMSUNG_S6D16D0 tristate "Samsung S6D16D0 DSI video mode panel" depends on OF @@ -377,6 +397,7 @@ config DRM_PANEL_SAMSUNG_S6E63M0_SPI depends on SPI depends on DRM_PANEL_SAMSUNG_S6E63M0 default DRM_PANEL_SAMSUNG_S6E63M0 + select DRM_MIPI_DBI help Say Y here if you want to be able to access the Samsung S6E63M0 panel using SPI. @@ -553,6 +574,16 @@ config DRM_PANEL_VISIONOX_RM69299 Say Y here if you want to enable support for Visionox RM69299 DSI Video Mode panel. +config DRM_PANEL_WIDECHIPS_WS2401 + tristate "Widechips WS2401 DPI panel driver" + depends on SPI && GPIOLIB + depends on BACKLIGHT_CLASS_DEVICE + select DRM_MIPI_DBI + help + Say Y here if you want to enable support for the Widechips WS2401 DPI + 480x800 display controller used in panels such as Samsung LMS380KF01. + This display is used in the Samsung Galaxy Ace 2 GT-I8160 (Codina). + config DRM_PANEL_XINPENG_XPP055C272 tristate "Xinpeng XPP055C272 panel driver" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index cae4d976c069..08debae9b314 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o +obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o @@ -33,6 +34,7 @@ obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67191) += panel-raydium-rm67191.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o @@ -58,4 +60,5 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o +obj-$(CONFIG_DRM_PANEL_WIDECHIPS_WS2401) += panel-widechips-ws2401.o obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c index e95bc9f60b3f..44674ebedf59 100644 --- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c +++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c @@ -302,7 +302,7 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_EOT_PACKET | + MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs, diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c index 5fbfb71ca3d9..da4a69067e18 100644 --- a/drivers/gpu/drm/panel/panel-dsi-cm.c +++ b/drivers/gpu/drm/panel/panel-dsi-cm.c @@ -574,7 +574,7 @@ static int dsicm_probe(struct mipi_dsi_device *dsi) dsi->lanes = 2; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS | - MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_NO_EOT_PACKET; dsi->hs_rate = ddata->panel_data->max_hs_rate; dsi->lp_rate = ddata->panel_data->max_lp_rate; diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c index 4787f0833264..80227617a4d6 100644 --- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c +++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c @@ -273,7 +273,7 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi) dsi->lanes = 1; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_CLOCK_NON_CONTINUOUS; drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs, diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c new file mode 100644 index 000000000000..34b98f70bd22 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Innolux/Chimei EJ030NA TFT LCD panel driver + * + * Copyright (C) 2020, Paul Cercueil <paul@crapouillou.net> + * Copyright (C) 2020, Christophe Branchereau <cbranchereau@gmail.com> + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct ej030na_info { + const struct drm_display_mode *display_modes; + unsigned int num_modes; + u16 width_mm, height_mm; + u32 bus_format, bus_flags; +}; + +struct ej030na { + struct drm_panel panel; + struct spi_device *spi; + struct regmap *map; + + const struct ej030na_info *panel_info; + + struct regulator *supply; + struct gpio_desc *reset_gpio; +}; + +static inline struct ej030na *to_ej030na(struct drm_panel *panel) +{ + return container_of(panel, struct ej030na, panel); +} + +static const struct reg_sequence ej030na_init_sequence[] = { + { 0x05, 0x1e }, + { 0x05, 0x5c }, + { 0x02, 0x14 }, + { 0x03, 0x40 }, + { 0x04, 0x07 }, + { 0x06, 0x12 }, + { 0x07, 0xd2 }, + { 0x0c, 0x06 }, + { 0x0d, 0x40 }, + { 0x0e, 0x40 }, + { 0x0f, 0x40 }, + { 0x10, 0x40 }, + { 0x11, 0x40 }, + { 0x2f, 0x40 }, + { 0x5a, 0x02 }, + + { 0x30, 0x07 }, + { 0x31, 0x57 }, + { 0x32, 0x53 }, + { 0x33, 0x77 }, + { 0x34, 0xb8 }, + { 0x35, 0xbd }, + { 0x36, 0xb8 }, + { 0x37, 0xe7 }, + { 0x38, 0x04 }, + { 0x39, 0xff }, + + { 0x40, 0x0b }, + { 0x41, 0xb8 }, + { 0x42, 0xab }, + { 0x43, 0xb9 }, + { 0x44, 0x6a }, + { 0x45, 0x56 }, + { 0x46, 0x61 }, + { 0x47, 0x08 }, + { 0x48, 0x0f }, + { 0x49, 0x0f }, + + { 0x2b, 0x01 }, +}; + +static int ej030na_prepare(struct drm_panel *panel) +{ + struct ej030na *priv = to_ej030na(panel); + struct device *dev = &priv->spi->dev; + int err; + + err = regulator_enable(priv->supply); + if (err) { + dev_err(dev, "Failed to enable power supply: %d\n", err); + return err; + } + + /* Reset the chip */ + gpiod_set_value_cansleep(priv->reset_gpio, 1); + usleep_range(50, 150); + gpiod_set_value_cansleep(priv->reset_gpio, 0); + usleep_range(50, 150); + + err = regmap_multi_reg_write(priv->map, ej030na_init_sequence, + ARRAY_SIZE(ej030na_init_sequence)); + if (err) { + dev_err(dev, "Failed to init registers: %d\n", err); + goto err_disable_regulator; + } + + msleep(120); + + return 0; + +err_disable_regulator: + regulator_disable(priv->supply); + return err; +} + +static int ej030na_unprepare(struct drm_panel *panel) +{ + struct ej030na *priv = to_ej030na(panel); + + gpiod_set_value_cansleep(priv->reset_gpio, 1); + regulator_disable(priv->supply); + + return 0; +} + +static int ej030na_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct ej030na *priv = to_ej030na(panel); + const struct ej030na_info *panel_info = priv->panel_info; + struct drm_display_mode *mode; + unsigned int i; + + for (i = 0; i < panel_info->num_modes; i++) { + mode = drm_mode_duplicate(connector->dev, + &panel_info->display_modes[i]); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER; + if (panel_info->num_modes == 1) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + } + + connector->display_info.bpc = 8; + connector->display_info.width_mm = panel_info->width_mm; + connector->display_info.height_mm = panel_info->height_mm; + + drm_display_info_set_bus_formats(&connector->display_info, + &panel_info->bus_format, 1); + connector->display_info.bus_flags = panel_info->bus_flags; + + return panel_info->num_modes; +} + +static const struct drm_panel_funcs ej030na_funcs = { + .prepare = ej030na_prepare, + .unprepare = ej030na_unprepare, + .get_modes = ej030na_get_modes, +}; + +static const struct regmap_config ej030na_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x5a, +}; + +static int ej030na_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct ej030na *priv; + int err; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->spi = spi; + spi_set_drvdata(spi, priv); + + priv->map = devm_regmap_init_spi(spi, &ej030na_regmap_config); + if (IS_ERR(priv->map)) { + dev_err(dev, "Unable to init regmap\n"); + return PTR_ERR(priv->map); + } + + priv->panel_info = of_device_get_match_data(dev); + if (!priv->panel_info) + return -EINVAL; + + priv->supply = devm_regulator_get(dev, "power"); + if (IS_ERR(priv->supply)) { + dev_err(dev, "Failed to get power supply\n"); + return PTR_ERR(priv->supply); + } + + priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(priv->reset_gpio)) { + dev_err(dev, "Failed to get reset GPIO\n"); + return PTR_ERR(priv->reset_gpio); + } + + drm_panel_init(&priv->panel, dev, &ej030na_funcs, + DRM_MODE_CONNECTOR_DPI); + + err = drm_panel_of_backlight(&priv->panel); + if (err) + return err; + + drm_panel_add(&priv->panel); + + return 0; +} + +static int ej030na_remove(struct spi_device *spi) +{ + struct ej030na *priv = spi_get_drvdata(spi); + + drm_panel_remove(&priv->panel); + drm_panel_disable(&priv->panel); + drm_panel_unprepare(&priv->panel); + + return 0; +} + +static const struct drm_display_mode ej030na_modes[] = { + { /* 60 Hz */ + .clock = 14400, + .hdisplay = 320, + .hsync_start = 320 + 10, + .hsync_end = 320 + 10 + 37, + .htotal = 320 + 10 + 37 + 33, + .vdisplay = 480, + .vsync_start = 480 + 102, + .vsync_end = 480 + 102 + 9 + 9, + .vtotal = 480 + 102 + 9 + 9, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + }, + { /* 50 Hz */ + .clock = 12000, + .hdisplay = 320, + .hsync_start = 320 + 10, + .hsync_end = 320 + 10 + 37, + .htotal = 320 + 10 + 37 + 33, + .vdisplay = 480, + .vsync_start = 480 + 102, + .vsync_end = 480 + 102 + 9, + .vtotal = 480 + 102 + 9 + 9, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + }, +}; + +static const struct ej030na_info ej030na_info = { + .display_modes = ej030na_modes, + .num_modes = ARRAY_SIZE(ej030na_modes), + .width_mm = 70, + .height_mm = 51, + .bus_format = MEDIA_BUS_FMT_RGB888_3X8_DELTA, + .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE | DRM_BUS_FLAG_DE_LOW, +}; + +static const struct of_device_id ej030na_of_match[] = { + { .compatible = "innolux,ej030na", .data = &ej030na_info }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ej030na_of_match); + +static struct spi_driver ej030na_driver = { + .driver = { + .name = "panel-innolux-ej030na", + .of_match_table = ej030na_of_match, + }, + .probe = ej030na_probe, + .remove = ej030na_remove, +}; +module_spi_driver(ej030na_driver); + +MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); +MODULE_AUTHOR("Christophe Branchereau <cbranchereau@gmail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c index 8f6ac1a40c31..a3ec4cbdbf7a 100644 --- a/drivers/gpu/drm/panel/panel-khadas-ts050.c +++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c @@ -809,7 +809,7 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050), GFP_KERNEL); diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c index ed0d5f959037..a5a414920430 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c @@ -593,7 +593,7 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; drm_panel_init(&ctx->panel, &dsi->dev, <k050h3146w_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c index 3c00e4f8f803..21e48923836d 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c @@ -442,7 +442,7 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; drm_panel_init(&ctx->panel, &dsi->dev, <k500hd1829_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c index 45b975dee587..198493a6eb6a 100644 --- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c +++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c @@ -184,7 +184,7 @@ static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | - MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_NO_EOT_PACKET; osd101t2587 = devm_kzalloc(&dsi->dev, sizeof(*osd101t2587), GFP_KERNEL); if (!osd101t2587) diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c new file mode 100644 index 000000000000..ead479719f00 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c @@ -0,0 +1,347 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Panel driver for the Samsung LMS397KF04 480x800 DPI RGB panel. + * According to the data sheet the display controller is called DB7430. + * Found in the Samsung Galaxy Beam GT-I8350 mobile phone. + * Linus Walleij <linus.walleij@linaro.org> + */ +#include <drm/drm_mipi_dbi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <video/mipi_display.h> + +#define DB7430_ACCESS_PROT_OFF 0xb0 +#define DB7430_UNKNOWN_B4 0xb4 +#define DB7430_USER_SELECT 0xb5 +#define DB7430_UNKNOWN_B7 0xb7 +#define DB7430_UNKNOWN_B8 0xb8 +#define DB7430_PANEL_DRIVING 0xc0 +#define DB7430_SOURCE_CONTROL 0xc1 +#define DB7430_GATE_INTERFACE 0xc4 +#define DB7430_DISPLAY_H_TIMING 0xc5 +#define DB7430_RGB_SYNC_OPTION 0xc6 +#define DB7430_GAMMA_SET_RED 0xc8 +#define DB7430_GAMMA_SET_GREEN 0xc9 +#define DB7430_GAMMA_SET_BLUE 0xca +#define DB7430_BIAS_CURRENT_CTRL 0xd1 +#define DB7430_DDV_CTRL 0xd2 +#define DB7430_GAMMA_CTRL_REF 0xd3 +#define DB7430_UNKNOWN_D4 0xd4 +#define DB7430_DCDC_CTRL 0xd5 +#define DB7430_VCL_CTRL 0xd6 +#define DB7430_UNKNOWN_F8 0xf8 +#define DB7430_UNKNOWN_FC 0xfc + +#define DATA_MASK 0x100 + +/** + * struct db7430 - state container for a panel controlled by the DB7430 + * controller + */ +struct db7430 { + /** @dev: the container device */ + struct device *dev; + /** @dbi: the DBI bus abstraction handle */ + struct mipi_dbi dbi; + /** @panel: the DRM panel instance for this device */ + struct drm_panel panel; + /** @width: the width of this panel in mm */ + u32 width; + /** @height: the height of this panel in mm */ + u32 height; + /** @reset: reset GPIO line */ + struct gpio_desc *reset; + /** @regulators: VCCIO and VIO supply regulators */ + struct regulator_bulk_data regulators[2]; +}; + +static const struct drm_display_mode db7430_480_800_mode = { + /* + * 31 ns period min (htotal*vtotal*vrefresh)/1000 + * gives a Vrefresh of ~71 Hz. + */ + .clock = 32258, + .hdisplay = 480, + .hsync_start = 480 + 10, + .hsync_end = 480 + 10 + 4, + .htotal = 480 + 10 + 4 + 40, + .vdisplay = 800, + .vsync_start = 800 + 6, + .vsync_end = 800 + 6 + 1, + .vtotal = 800 + 6 + 1 + 7, + .width_mm = 53, + .height_mm = 87, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static inline struct db7430 *to_db7430(struct drm_panel *panel) +{ + return container_of(panel, struct db7430, panel); +} + +static int db7430_power_on(struct db7430 *db) +{ + struct mipi_dbi *dbi = &db->dbi; + int ret; + + /* Power up */ + ret = regulator_bulk_enable(ARRAY_SIZE(db->regulators), + db->regulators); + if (ret) { + dev_err(db->dev, "failed to enable regulators: %d\n", ret); + return ret; + } + msleep(50); + + /* Assert reset >=1 ms */ + gpiod_set_value_cansleep(db->reset, 1); + usleep_range(1000, 5000); + /* De-assert reset */ + gpiod_set_value_cansleep(db->reset, 0); + /* Wait >= 10 ms */ + msleep(10); + dev_dbg(db->dev, "de-asserted RESET\n"); + + /* + * This is set to 0x0a (RGB/BGR order + horizontal flip) in order + * to make the display behave normally. If this is not set the displays + * normal output behaviour is horizontally flipped and BGR ordered. Do + * it twice because the first message doesn't always "take". + */ + mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, 0x0a); + mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, 0x0a); + mipi_dbi_command(dbi, DB7430_ACCESS_PROT_OFF, 0x00); + mipi_dbi_command(dbi, DB7430_PANEL_DRIVING, 0x28, 0x08); + mipi_dbi_command(dbi, DB7430_SOURCE_CONTROL, + 0x01, 0x30, 0x15, 0x05, 0x22); + mipi_dbi_command(dbi, DB7430_GATE_INTERFACE, + 0x10, 0x01, 0x00); + mipi_dbi_command(dbi, DB7430_DISPLAY_H_TIMING, + 0x06, 0x55, 0x03, 0x07, 0x0b, + 0x33, 0x00, 0x01, 0x03); + /* + * 0x00 in datasheet 0x01 in vendor code 0x00, it seems 0x01 means + * DE active high and 0x00 means DE active low. + */ + mipi_dbi_command(dbi, DB7430_RGB_SYNC_OPTION, 0x01); + mipi_dbi_command(dbi, DB7430_GAMMA_SET_RED, + /* R positive gamma */ 0x00, + 0x0A, 0x31, 0x3B, 0x4E, 0x58, 0x59, 0x5B, 0x58, 0x5E, 0x62, + 0x60, 0x61, 0x5E, 0x62, 0x55, 0x55, 0x7F, 0x08, + /* R negative gamma */ 0x00, + 0x0A, 0x31, 0x3B, 0x4E, 0x58, 0x59, 0x5B, 0x58, 0x5E, 0x62, + 0x60, 0x61, 0x5E, 0x62, 0x55, 0x55, 0x7F, 0x08); + mipi_dbi_command(dbi, DB7430_GAMMA_SET_GREEN, + /* G positive gamma */ 0x00, + 0x25, 0x15, 0x28, 0x3D, 0x4A, 0x48, 0x4C, 0x4A, 0x52, 0x59, + 0x59, 0x5B, 0x56, 0x60, 0x5D, 0x55, 0x7F, 0x0A, + /* G negative gamma */ 0x00, + 0x25, 0x15, 0x28, 0x3D, 0x4A, 0x48, 0x4C, 0x4A, 0x52, 0x59, + 0x59, 0x5B, 0x56, 0x60, 0x5D, 0x55, 0x7F, 0x0A); + mipi_dbi_command(dbi, DB7430_GAMMA_SET_BLUE, + /* B positive gamma */ 0x00, + 0x48, 0x10, 0x1F, 0x2F, 0x35, 0x38, 0x3D, 0x3C, 0x45, 0x4D, + 0x4E, 0x52, 0x51, 0x60, 0x7F, 0x7E, 0x7F, 0x0C, + /* B negative gamma */ 0x00, + 0x48, 0x10, 0x1F, 0x2F, 0x35, 0x38, 0x3D, 0x3C, 0x45, 0x4D, + 0x4E, 0x52, 0x51, 0x60, 0x7F, 0x7E, 0x7F, 0x0C); + mipi_dbi_command(dbi, DB7430_BIAS_CURRENT_CTRL, 0x33, 0x13); + mipi_dbi_command(dbi, DB7430_DDV_CTRL, 0x11, 0x00, 0x00); + mipi_dbi_command(dbi, DB7430_GAMMA_CTRL_REF, 0x50, 0x50); + mipi_dbi_command(dbi, DB7430_DCDC_CTRL, 0x2f, 0x11, 0x1e, 0x46); + mipi_dbi_command(dbi, DB7430_VCL_CTRL, 0x11, 0x0a); + + return 0; +} + +static int db7430_power_off(struct db7430 *db) +{ + /* Go into RESET and disable regulators */ + gpiod_set_value_cansleep(db->reset, 1); + return regulator_bulk_disable(ARRAY_SIZE(db->regulators), + db->regulators); +} + +static int db7430_unprepare(struct drm_panel *panel) +{ + return db7430_power_off(to_db7430(panel)); +} + +static int db7430_disable(struct drm_panel *panel) +{ + struct db7430 *db = to_db7430(panel); + struct mipi_dbi *dbi = &db->dbi; + + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); + msleep(25); + mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); + msleep(120); + + return 0; +} + +static int db7430_prepare(struct drm_panel *panel) +{ + return db7430_power_on(to_db7430(panel)); +} + +static int db7430_enable(struct drm_panel *panel) +{ + struct db7430 *db = to_db7430(panel); + struct mipi_dbi *dbi = &db->dbi; + + /* Exit sleep mode */ + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); + msleep(20); + + /* NVM (non-volatile memory) load sequence */ + mipi_dbi_command(dbi, DB7430_UNKNOWN_D4, 0x52, 0x5e); + mipi_dbi_command(dbi, DB7430_UNKNOWN_F8, 0x01, 0xf5, 0xf2, 0x71, 0x44); + mipi_dbi_command(dbi, DB7430_UNKNOWN_FC, 0x00, 0x08); + msleep(150); + + /* CABC turn on sequence (BC = backlight control) */ + mipi_dbi_command(dbi, DB7430_UNKNOWN_B4, 0x0f, 0x00, 0x50); + mipi_dbi_command(dbi, DB7430_USER_SELECT, 0x80); + mipi_dbi_command(dbi, DB7430_UNKNOWN_B7, 0x24); + mipi_dbi_command(dbi, DB7430_UNKNOWN_B8, 0x01); + + /* Turn on display */ + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); + + return 0; +} + +/** + * db7430_get_modes() - return the mode + * @panel: the panel to get the mode for + * @connector: reference to the central DRM connector control structure + */ +static int db7430_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct db7430 *db = to_db7430(panel); + struct drm_display_mode *mode; + static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + + mode = drm_mode_duplicate(connector->dev, &db7430_480_800_mode); + if (!mode) { + dev_err(db->dev, "failed to add mode\n"); + return -ENOMEM; + } + + connector->display_info.bpc = 8; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + connector->display_info.bus_flags = + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; + drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); + + drm_mode_set_name(mode); + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs db7430_drm_funcs = { + .disable = db7430_disable, + .unprepare = db7430_unprepare, + .prepare = db7430_prepare, + .enable = db7430_enable, + .get_modes = db7430_get_modes, +}; + +static int db7430_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct db7430 *db; + int ret; + + db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL); + if (!db) + return -ENOMEM; + db->dev = dev; + + /* + * VCI is the analog voltage supply + * VCCIO is the digital I/O voltage supply + */ + db->regulators[0].supply = "vci"; + db->regulators[1].supply = "vccio"; + ret = devm_regulator_bulk_get(dev, + ARRAY_SIZE(db->regulators), + db->regulators); + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); + + db->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(db->reset)) { + ret = PTR_ERR(db->reset); + return dev_err_probe(dev, ret, "no RESET GPIO\n"); + } + + ret = mipi_dbi_spi_init(spi, &db->dbi, NULL); + if (ret) + return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); + + drm_panel_init(&db->panel, dev, &db7430_drm_funcs, + DRM_MODE_CONNECTOR_DPI); + + /* FIXME: if no external backlight, use internal backlight */ + ret = drm_panel_of_backlight(&db->panel); + if (ret) + return dev_err_probe(dev, ret, "failed to add backlight\n"); + + spi_set_drvdata(spi, db); + + drm_panel_add(&db->panel); + dev_dbg(dev, "added panel\n"); + + return 0; +} + +static int db7430_remove(struct spi_device *spi) +{ + struct db7430 *db = spi_get_drvdata(spi); + + drm_panel_remove(&db->panel); + return 0; +} + +/* + * The DB7430 display controller may be used in several display products, + * so list the different variants here and add per-variant data if needed. + */ +static const struct of_device_id db7430_match[] = { + { .compatible = "samsung,lms397kf04", }, + {}, +}; +MODULE_DEVICE_TABLE(of, db7430_match); + +static struct spi_driver db7430_driver = { + .probe = db7430_probe, + .remove = db7430_remove, + .driver = { + .name = "db7430-panel", + .of_match_table = db7430_match, + }, +}; +module_spi_driver(db7430_driver); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("Samsung DB7430 panel driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c index b962c817fb30..ccc8ed6fe3ae 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c @@ -446,7 +446,7 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi) dsi->lanes = 1; dsi->format = MIPI_DSI_FMT_RGB888; - dsi->mode_flags = MIPI_DSI_MODE_EOT_PACKET; + dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET; ctx->supplies[0].supply = "vdd3"; ctx->supplies[1].supply = "vci"; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c index 07a48f621289..e0b1a7e354f3 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c @@ -16,7 +16,8 @@ #define MCS_GLOBAL_PARAM 0xb0 #define S6E63M0_DSI_MAX_CHUNK 15 /* CMD + 15 bytes max */ -static int s6e63m0_dsi_dcs_read(struct device *dev, const u8 cmd, u8 *data) +static int s6e63m0_dsi_dcs_read(struct device *dev, void *trsp, + const u8 cmd, u8 *data) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); int ret; @@ -32,7 +33,8 @@ static int s6e63m0_dsi_dcs_read(struct device *dev, const u8 cmd, u8 *data) return 0; } -static int s6e63m0_dsi_dcs_write(struct device *dev, const u8 *data, size_t len) +static int s6e63m0_dsi_dcs_write(struct device *dev, void *trsp, + const u8 *data, size_t len) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); const u8 *seqp = data; @@ -99,8 +101,8 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST; - ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write, - true); + ret = s6e63m0_probe(dev, NULL, s6e63m0_dsi_dcs_read, + s6e63m0_dsi_dcs_write, true); if (ret) return ret; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c index 326deb3177b6..3669cc3719ce 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c @@ -5,62 +5,38 @@ #include <linux/spi/spi.h> #include <linux/delay.h> +#include <drm/drm_mipi_dbi.h> #include <drm/drm_print.h> #include "panel-samsung-s6e63m0.h" -#define DATA_MASK 0x100 +static const u8 s6e63m0_dbi_read_commands[] = { + MCS_READ_ID1, + MCS_READ_ID2, + MCS_READ_ID3, + 0, /* sentinel */ +}; -static int s6e63m0_spi_dcs_read(struct device *dev, const u8 cmd, u8 *data) +static int s6e63m0_spi_dcs_read(struct device *dev, void *trsp, + const u8 cmd, u8 *data) { - struct spi_device *spi = to_spi_device(dev); - u16 buf[1]; - u16 rbuf[1]; + struct mipi_dbi *dbi = trsp; int ret; - /* SPI buffers are always in CPU order */ - buf[0] = (u16)cmd; - ret = spi_write_then_read(spi, buf, 2, rbuf, 2); - dev_dbg(dev, "READ CMD: %04x RET: %04x\n", buf[0], rbuf[0]); - if (!ret) - /* These high 8 bits of the 9 contains the readout */ - *data = (rbuf[0] & 0x1ff) >> 1; + ret = mipi_dbi_command_read(dbi, cmd, data); + if (ret) + dev_err(dev, "error on DBI read command %02x\n", cmd); return ret; } -static int s6e63m0_spi_write_word(struct device *dev, u16 data) -{ - struct spi_device *spi = to_spi_device(dev); - - /* SPI buffers are always in CPU order */ - return spi_write(spi, &data, 2); -} - -static int s6e63m0_spi_dcs_write(struct device *dev, const u8 *data, size_t len) +static int s6e63m0_spi_dcs_write(struct device *dev, void *trsp, + const u8 *data, size_t len) { - int ret = 0; - - dev_dbg(dev, "SPI writing dcs seq: %*ph\n", (int)len, data); - - /* - * This sends 9 bits with the first bit (bit 8) set to 0 - * This indicates that this is a command. Anything after the - * command is data. - */ - ret = s6e63m0_spi_write_word(dev, *data); - - while (!ret && --len) { - ++data; - /* This sends 9 bits with the first bit (bit 8) set to 1 */ - ret = s6e63m0_spi_write_word(dev, *data | DATA_MASK); - } - - if (ret) { - dev_err(dev, "SPI error %d writing dcs seq: %*ph\n", ret, - (int)len, data); - } + struct mipi_dbi *dbi = trsp; + int ret; + ret = mipi_dbi_command_stackbuf(dbi, data[0], (data + 1), (len - 1)); usleep_range(300, 310); return ret; @@ -69,18 +45,21 @@ static int s6e63m0_spi_dcs_write(struct device *dev, const u8 *data, size_t len) static int s6e63m0_spi_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct mipi_dbi *dbi; int ret; - spi->bits_per_word = 9; - /* Preserve e.g. SPI_3WIRE setting */ - spi->mode |= SPI_MODE_3; - ret = spi_setup(spi); - if (ret < 0) { - dev_err(dev, "spi setup failed.\n"); - return ret; - } - return s6e63m0_probe(dev, s6e63m0_spi_dcs_read, s6e63m0_spi_dcs_write, - false); + dbi = devm_kzalloc(dev, sizeof(*dbi), GFP_KERNEL); + if (!dbi) + return -ENOMEM; + + ret = mipi_dbi_spi_init(spi, dbi, NULL); + if (ret) + return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); + /* Register our custom MCS read commands */ + dbi->read_commands = s6e63m0_dbi_read_commands; + + return s6e63m0_probe(dev, dbi, s6e63m0_spi_dcs_read, + s6e63m0_spi_dcs_write, false); } static int s6e63m0_spi_remove(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c index 603c5dfe8768..35d72ac663d6 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c @@ -22,31 +22,6 @@ #include "panel-samsung-s6e63m0.h" -/* Manufacturer Command Set */ -#define MCS_ELVSS_ON 0xb1 -#define MCS_TEMP_SWIRE 0xb2 -#define MCS_PENTILE_1 0xb3 -#define MCS_PENTILE_2 0xb4 -#define MCS_GAMMA_DELTA_Y_RED 0xb5 -#define MCS_GAMMA_DELTA_X_RED 0xb6 -#define MCS_GAMMA_DELTA_Y_GREEN 0xb7 -#define MCS_GAMMA_DELTA_X_GREEN 0xb8 -#define MCS_GAMMA_DELTA_Y_BLUE 0xb9 -#define MCS_GAMMA_DELTA_X_BLUE 0xba -#define MCS_MIECTL1 0xc0 -#define MCS_BCMODE 0xc1 -#define MCS_ERROR_CHECK 0xd5 -#define MCS_READ_ID1 0xda -#define MCS_READ_ID2 0xdb -#define MCS_READ_ID3 0xdc -#define MCS_LEVEL_2_KEY 0xf0 -#define MCS_MTP_KEY 0xf1 -#define MCS_DISCTL 0xf2 -#define MCS_SRCCTL 0xf6 -#define MCS_IFCTL 0xf7 -#define MCS_PANELCTL 0xf8 -#define MCS_PGAMMACTL 0xfa - #define S6E63M0_LCD_ID_VALUE_M2 0xA4 #define S6E63M0_LCD_ID_VALUE_SM2 0xB4 #define S6E63M0_LCD_ID_VALUE_SM2_1 0xB6 @@ -283,8 +258,9 @@ static u8 const s6e63m0_elvss_per_gamma[NUM_GAMMA_LEVELS] = { struct s6e63m0 { struct device *dev; - int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val); - int (*dcs_write)(struct device *dev, const u8 *data, size_t len); + void *transport_data; + int (*dcs_read)(struct device *dev, void *trsp, const u8 cmd, u8 *val); + int (*dcs_write)(struct device *dev, void *trsp, const u8 *data, size_t len); struct drm_panel panel; struct backlight_device *bl_dev; u8 lcd_type; @@ -340,7 +316,7 @@ static void s6e63m0_dcs_read(struct s6e63m0 *ctx, const u8 cmd, u8 *data) if (ctx->error < 0) return; - ctx->error = ctx->dcs_read(ctx->dev, cmd, data); + ctx->error = ctx->dcs_read(ctx->dev, ctx->transport_data, cmd, data); } static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len) @@ -348,7 +324,7 @@ static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len) if (ctx->error < 0 || len == 0) return; - ctx->error = ctx->dcs_write(ctx->dev, data, len); + ctx->error = ctx->dcs_write(ctx->dev, ctx->transport_data, data, len); } #define s6e63m0_dcs_write_seq_static(ctx, seq ...) \ @@ -713,9 +689,9 @@ static int s6e63m0_backlight_register(struct s6e63m0 *ctx, u32 max_brightness) return ret; } -int s6e63m0_probe(struct device *dev, - int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val), - int (*dcs_write)(struct device *dev, const u8 *data, size_t len), +int s6e63m0_probe(struct device *dev, void *trsp, + int (*dcs_read)(struct device *dev, void *trsp, const u8 cmd, u8 *val), + int (*dcs_write)(struct device *dev, void *trsp, const u8 *data, size_t len), bool dsi_mode) { struct s6e63m0 *ctx; @@ -726,6 +702,7 @@ int s6e63m0_probe(struct device *dev, if (!ctx) return -ENOMEM; + ctx->transport_data = trsp; ctx->dsi_mode = dsi_mode; ctx->dcs_read = dcs_read; ctx->dcs_write = dcs_write; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h index c669fec91763..306605ed1117 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h @@ -3,9 +3,36 @@ #ifndef _PANEL_SAMSUNG_S6E63M0_H #define _PANEL_SAMSUNG_S6E63M0_H -int s6e63m0_probe(struct device *dev, - int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val), - int (*dcs_write)(struct device *dev, const u8 *data, +/* Manufacturer Command Set */ +#define MCS_ELVSS_ON 0xb1 +#define MCS_TEMP_SWIRE 0xb2 +#define MCS_PENTILE_1 0xb3 +#define MCS_PENTILE_2 0xb4 +#define MCS_GAMMA_DELTA_Y_RED 0xb5 +#define MCS_GAMMA_DELTA_X_RED 0xb6 +#define MCS_GAMMA_DELTA_Y_GREEN 0xb7 +#define MCS_GAMMA_DELTA_X_GREEN 0xb8 +#define MCS_GAMMA_DELTA_Y_BLUE 0xb9 +#define MCS_GAMMA_DELTA_X_BLUE 0xba +#define MCS_MIECTL1 0xc0 +#define MCS_BCMODE 0xc1 +#define MCS_ERROR_CHECK 0xd5 +#define MCS_READ_ID1 0xda +#define MCS_READ_ID2 0xdb +#define MCS_READ_ID3 0xdc +#define MCS_LEVEL_2_KEY 0xf0 +#define MCS_MTP_KEY 0xf1 +#define MCS_DISCTL 0xf2 +#define MCS_SRCCTL 0xf6 +#define MCS_IFCTL 0xf7 +#define MCS_PANELCTL 0xf8 +#define MCS_PGAMMACTL 0xfa + +int s6e63m0_probe(struct device *dev, void *trsp, + int (*dcs_read)(struct device *dev, void *trsp, + const u8 cmd, u8 *val), + int (*dcs_write)(struct device *dev, void *trsp, + const u8 *data, size_t len), bool dsi_mode); int s6e63m0_remove(struct device *dev); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c index 527371120266..9b3599d6d2de 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c @@ -990,8 +990,8 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST - | MIPI_DSI_MODE_VIDEO_HFP | MIPI_DSI_MODE_VIDEO_HBP - | MIPI_DSI_MODE_VIDEO_HSA | MIPI_DSI_MODE_EOT_PACKET + | MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP + | MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT; ret = s6e8aa0_parse_dt(ctx); diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c index 16dbf0f353ed..b937e24dac8e 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c @@ -282,7 +282,7 @@ static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_CLOCK_NON_CONTINUOUS | - MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_NO_EOT_PACKET; sharp_nt = devm_kzalloc(&dsi->dev, sizeof(*sharp_nt), GFP_KERNEL); if (!sharp_nt) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 21939d4352cf..dff3dedd734b 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -36,6 +36,8 @@ #include <drm/drm_crtc.h> #include <drm/drm_device.h> +#include <drm/drm_dp_aux_bus.h> +#include <drm/drm_dp_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> @@ -131,6 +133,22 @@ struct panel_desc { unsigned int prepare_to_enable; /** + * @delay.power_to_enable: Time for the power to enable the display on. + * + * The time (in milliseconds) to wait after powering up the display + * before asserting its enable pin. + */ + unsigned int power_to_enable; + + /** + * @delay.disable_to_power_off: Time for the disable to power the display off. + * + * The time (in milliseconds) to wait before powering off the display + * after deasserting its enable pin. + */ + unsigned int disable_to_power_off; + + /** * @delay.enable: Time for the panel to display a valid frame. * * The time (in milliseconds) that it takes for the panel to @@ -185,6 +203,7 @@ struct panel_simple { struct regulator *supply; struct i2c_adapter *ddc; + struct drm_dp_aux *aux; struct gpio_desc *enable_gpio; struct gpio_desc *hpd_gpio; @@ -344,6 +363,10 @@ static int panel_simple_suspend(struct device *dev) struct panel_simple *p = dev_get_drvdata(dev); gpiod_set_value_cansleep(p->enable_gpio, 0); + + if (p->desc->delay.disable_to_power_off) + msleep(p->desc->delay.disable_to_power_off); + regulator_disable(p->supply); p->unprepared_time = ktime_get(); @@ -404,6 +427,9 @@ static int panel_simple_prepare_once(struct panel_simple *p) return err; } + if (p->desc->delay.power_to_enable) + msleep(p->desc->delay.power_to_enable); + gpiod_set_value_cansleep(p->enable_gpio, 1); delay = p->desc->delay.prepare; @@ -657,7 +683,8 @@ static void panel_simple_parse_panel_timing_node(struct device *dev, dev_err(dev, "Reject override mode: No display_timing found\n"); } -static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) +static int panel_simple_probe(struct device *dev, const struct panel_desc *desc, + struct drm_dp_aux *aux) { struct panel_simple *panel; struct display_timing dt; @@ -673,6 +700,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) panel->enabled = false; panel->prepared_time = 0; panel->desc = desc; + panel->aux = aux; panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd"); if (!panel->no_hpd) { @@ -707,6 +735,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) if (!panel->ddc) return -EPROBE_DEFER; + } else if (aux) { + panel->ddc = &aux->ddc; } if (desc == &panel_dpi) { @@ -742,10 +772,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) desc->bpc != 8); break; case DRM_MODE_CONNECTOR_eDP: - if (desc->bus_format == 0) - dev_warn(dev, "Specify missing bus_format\n"); - if (desc->bpc != 6 && desc->bpc != 8) - dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc); + if (desc->bpc != 6 && desc->bpc != 8 && desc->bpc != 10) + dev_warn(dev, "Expected bpc in {6,8,10} but got: %u\n", desc->bpc); break; case DRM_MODE_CONNECTOR_DSI: if (desc->bpc != 6 && desc->bpc != 8) @@ -775,6 +803,11 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) break; } + if (!panel->enable_gpio && desc->delay.disable_to_power_off) + dev_warn(dev, "Need a delay after disabling panel GPIO, but a GPIO wasn't provided\n"); + if (!panel->enable_gpio && desc->delay.power_to_enable) + dev_warn(dev, "Need a delay before enabling panel GPIO, but a GPIO wasn't provided\n"); + dev_set_drvdata(dev, panel); /* @@ -793,6 +826,15 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) if (err) goto disable_pm_runtime; + if (!panel->base.backlight && panel->aux) { + pm_runtime_get_sync(dev); + err = drm_panel_dp_aux_backlight(&panel->base, panel->aux); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + if (err) + goto disable_pm_runtime; + } + drm_panel_add(&panel->base); return 0; @@ -801,7 +843,7 @@ disable_pm_runtime: pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); free_ddc: - if (panel->ddc) + if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc)) put_device(&panel->ddc->dev); return err; @@ -817,7 +859,7 @@ static int panel_simple_remove(struct device *dev) pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); - if (panel->ddc) + if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc)) put_device(&panel->ddc->dev); return 0; @@ -1080,6 +1122,36 @@ static const struct panel_desc auo_b133xtn01 = { }, }; +static const struct drm_display_mode auo_b133han05_mode = { + .clock = 142600, + .hdisplay = 1920, + .hsync_start = 1920 + 58, + .hsync_end = 1920 + 58 + 42, + .htotal = 1920 + 58 + 42 + 60, + .vdisplay = 1080, + .vsync_start = 1080 + 3, + .vsync_end = 1080 + 3 + 5, + .vtotal = 1080 + 3 + 5 + 54, +}; + +static const struct panel_desc auo_b133han05 = { + .modes = &auo_b133han05_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 293, + .height = 165, + }, + .delay = { + .prepare = 100, + .enable = 20, + .unprepare = 50, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB, + .connector_type = DRM_MODE_CONNECTOR_eDP, +}; + static const struct drm_display_mode auo_b133htn01_mode = { .clock = 150660, .hdisplay = 1920, @@ -1107,6 +1179,36 @@ static const struct panel_desc auo_b133htn01 = { }, }; +static const struct drm_display_mode auo_b140han06_mode = { + .clock = 141000, + .hdisplay = 1920, + .hsync_start = 1920 + 16, + .hsync_end = 1920 + 16 + 16, + .htotal = 1920 + 16 + 16 + 152, + .vdisplay = 1080, + .vsync_start = 1080 + 3, + .vsync_end = 1080 + 3 + 14, + .vtotal = 1080 + 3 + 14 + 19, +}; + +static const struct panel_desc auo_b140han06 = { + .modes = &auo_b140han06_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 309, + .height = 174, + }, + .delay = { + .prepare = 100, + .enable = 20, + .unprepare = 50, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB, + .connector_type = DRM_MODE_CONNECTOR_eDP, +}; + static const struct display_timing auo_g070vvn01_timings = { .pixelclock = { 33300000, 34209000, 45000000 }, .hactive = { 800, 800, 800 }, @@ -1179,6 +1281,8 @@ static const struct panel_desc auo_g104sn02 = { .width = 211, .height = 158, }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode auo_g121ean01_mode = { @@ -1929,6 +2033,32 @@ static const struct panel_desc edt_et035012dm6 = { .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, }; +static const struct drm_display_mode edt_etm0350g0dh6_mode = { + .clock = 6520, + .hdisplay = 320, + .hsync_start = 320 + 20, + .hsync_end = 320 + 20 + 68, + .htotal = 320 + 20 + 68, + .vdisplay = 240, + .vsync_start = 240 + 4, + .vsync_end = 240 + 4 + 18, + .vtotal = 240 + 4 + 18, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc edt_etm0350g0dh6 = { + .modes = &edt_etm0350g0dh6_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 70, + .height = 53, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, +}; + static const struct drm_display_mode edt_etm043080dh6gp_mode = { .clock = 10870, .hdisplay = 480, @@ -1980,6 +2110,9 @@ static const struct panel_desc edt_etm0430g0dh6 = { .width = 95, .height = 54, }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, }; static const struct drm_display_mode edt_et057090dhu_mode = { @@ -2044,6 +2177,33 @@ static const struct panel_desc edt_etm0700g0bdh6 = { }, .bus_format = MEDIA_BUS_FMT_RGB666_1X18, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, +}; + +static const struct drm_display_mode edt_etmv570g2dhu_mode = { + .clock = 25175, + .hdisplay = 640, + .hsync_start = 640, + .hsync_end = 640 + 16, + .htotal = 640 + 16 + 30 + 114, + .vdisplay = 480, + .vsync_start = 480 + 10, + .vsync_end = 480 + 10 + 3, + .vtotal = 480 + 10 + 3 + 35, + .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_PHSYNC, +}; + +static const struct panel_desc edt_etmv570g2dhu = { + .modes = &edt_etmv570g2dhu_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 115, + .height = 86, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, }; static const struct display_timing evervision_vgg804821_timing = { @@ -3521,6 +3681,36 @@ static const struct panel_desc rocktech_rk101ii01d_ct = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct drm_display_mode samsung_atna33xc20_mode = { + .clock = 138770, + .hdisplay = 1920, + .hsync_start = 1920 + 48, + .hsync_end = 1920 + 48 + 32, + .htotal = 1920 + 48 + 32 + 80, + .vdisplay = 1080, + .vsync_start = 1080 + 8, + .vsync_end = 1080 + 8 + 8, + .vtotal = 1080 + 8 + 8 + 16, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, +}; + +static const struct panel_desc samsung_atna33xc20 = { + .modes = &samsung_atna33xc20_mode, + .num_modes = 1, + .bpc = 10, + .size = { + .width = 294, + .height = 165, + }, + .delay = { + .disable_to_power_off = 200, + .power_to_enable = 400, + .hpd_absent_delay = 200, + .unprepare = 500, + }, + .connector_type = DRM_MODE_CONNECTOR_eDP, +}; + static const struct drm_display_mode samsung_lsn122dl01_c01_mode = { .clock = 271560, .hdisplay = 2560, @@ -4234,9 +4424,15 @@ static const struct of_device_id platform_of_match[] = { .compatible = "auo,b116xw03", .data = &auo_b116xw03, }, { + .compatible = "auo,b133han05", + .data = &auo_b133han05, + }, { .compatible = "auo,b133htn01", .data = &auo_b133htn01, }, { + .compatible = "auo,b140han06", + .data = &auo_b140han06, + }, { .compatible = "auo,b133xtn01", .data = &auo_b133xtn01, }, { @@ -4330,6 +4526,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "edt,et035012dm6", .data = &edt_et035012dm6, }, { + .compatible = "edt,etm0350g0dh6", + .data = &edt_etm0350g0dh6, + }, { .compatible = "edt,etm043080dh6gp", .data = &edt_etm043080dh6gp, }, { @@ -4351,6 +4550,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "edt,etm0700g0edh6", .data = &edt_etm0700g0bdh6, }, { + .compatible = "edt,etmv570g2dhu", + .data = &edt_etmv570g2dhu, + }, { .compatible = "evervision,vgg804821", .data = &evervision_vgg804821, }, { @@ -4522,6 +4724,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "rocktech,rk101ii01d-ct", .data = &rocktech_rk101ii01d_ct, }, { + .compatible = "samsung,atna33xc20", + .data = &samsung_atna33xc20, + }, { .compatible = "samsung,lsn122dl01-c01", .data = &samsung_lsn122dl01_c01, }, { @@ -4632,7 +4837,7 @@ static int panel_simple_platform_probe(struct platform_device *pdev) if (!id) return -ENODEV; - return panel_simple_probe(&pdev->dev, id->data); + return panel_simple_probe(&pdev->dev, id->data, NULL); } static int panel_simple_platform_remove(struct platform_device *pdev) @@ -4867,7 +5072,7 @@ static const struct panel_desc_dsi osd101t2045_53ts = { }, .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | - MIPI_DSI_MODE_EOT_PACKET, + MIPI_DSI_MODE_NO_EOT_PACKET, .format = MIPI_DSI_FMT_RGB888, .lanes = 4, }; @@ -4912,7 +5117,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi) desc = id->data; - err = panel_simple_probe(&dsi->dev, &desc->desc); + err = panel_simple_probe(&dsi->dev, &desc->desc, NULL); if (err < 0) return err; @@ -4957,6 +5162,38 @@ static struct mipi_dsi_driver panel_simple_dsi_driver = { .shutdown = panel_simple_dsi_shutdown, }; +static int panel_simple_dp_aux_ep_probe(struct dp_aux_ep_device *aux_ep) +{ + const struct of_device_id *id; + + id = of_match_node(platform_of_match, aux_ep->dev.of_node); + if (!id) + return -ENODEV; + + return panel_simple_probe(&aux_ep->dev, id->data, aux_ep->aux); +} + +static void panel_simple_dp_aux_ep_remove(struct dp_aux_ep_device *aux_ep) +{ + panel_simple_remove(&aux_ep->dev); +} + +static void panel_simple_dp_aux_ep_shutdown(struct dp_aux_ep_device *aux_ep) +{ + panel_simple_shutdown(&aux_ep->dev); +} + +static struct dp_aux_ep_driver panel_simple_dp_aux_ep_driver = { + .driver = { + .name = "panel-simple-dp-aux", + .of_match_table = platform_of_match, /* Same as platform one! */ + .pm = &panel_simple_pm_ops, + }, + .probe = panel_simple_dp_aux_ep_probe, + .remove = panel_simple_dp_aux_ep_remove, + .shutdown = panel_simple_dp_aux_ep_shutdown, +}; + static int __init panel_simple_init(void) { int err; @@ -4965,15 +5202,25 @@ static int __init panel_simple_init(void) if (err < 0) return err; + err = dp_aux_dp_driver_register(&panel_simple_dp_aux_ep_driver); + if (err < 0) + goto err_did_platform_register; + if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) { err = mipi_dsi_driver_register(&panel_simple_dsi_driver); - if (err < 0) { - platform_driver_unregister(&panel_simple_platform_driver); - return err; - } + if (err < 0) + goto err_did_aux_ep_register; } return 0; + +err_did_aux_ep_register: + dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver); + +err_did_platform_register: + platform_driver_unregister(&panel_simple_platform_driver); + + return err; } module_init(panel_simple_init); @@ -4982,6 +5229,7 @@ static void __exit panel_simple_exit(void) if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) mipi_dsi_driver_unregister(&panel_simple_dsi_driver); + dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver); platform_driver_unregister(&panel_simple_platform_driver); } module_exit(panel_simple_exit); diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c index 95659a4d15e9..9536d56a94a5 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c +++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c @@ -40,7 +40,6 @@ struct acx424akp { struct drm_panel panel; struct device *dev; - struct backlight_device *bl; struct regulator *supply; struct gpio_desc *reset_gpio; bool video_mode; @@ -102,6 +101,18 @@ static int acx424akp_set_brightness(struct backlight_device *bl) u8 par; int ret; + if (backlight_is_blank(bl)) { + /* Disable backlight */ + par = 0x00; + ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, + &par, 1); + if (ret) { + dev_err(acx->dev, "failed to disable display backlight (%d)\n", ret); + return ret; + } + return 0; + } + /* Calculate the PWM duty cycle in n/256's */ pwm_ratio = max(((duty_ns * 256) / period_ns) - 1, 1); pwm_div = max(1, @@ -172,6 +183,12 @@ static const struct backlight_ops acx424akp_bl_ops = { .update_status = acx424akp_set_brightness, }; +static const struct backlight_properties acx424akp_bl_props = { + .type = BACKLIGHT_RAW, + .brightness = 512, + .max_brightness = 1023, +}; + static int acx424akp_read_id(struct acx424akp *acx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev); @@ -310,8 +327,6 @@ static int acx424akp_prepare(struct drm_panel *panel) } } - acx->bl->props.power = FB_BLANK_NORMAL; - return 0; err_power_off: @@ -323,18 +338,8 @@ static int acx424akp_unprepare(struct drm_panel *panel) { struct acx424akp *acx = panel_to_acx424akp(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev); - u8 par; int ret; - /* Disable backlight */ - par = 0x00; - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - &par, 1); - if (ret) { - dev_err(acx->dev, "failed to disable display backlight (%d)\n", ret); - return ret; - } - ret = mipi_dsi_dcs_set_display_off(dsi); if (ret) { dev_err(acx->dev, "failed to turn display off (%d)\n", ret); @@ -350,36 +355,10 @@ static int acx424akp_unprepare(struct drm_panel *panel) msleep(85); acx424akp_power_off(acx); - acx->bl->props.power = FB_BLANK_POWERDOWN; - - return 0; -} - -static int acx424akp_enable(struct drm_panel *panel) -{ - struct acx424akp *acx = panel_to_acx424akp(panel); - - /* - * The backlight is on as long as the display is on - * so no use to call backlight_enable() here. - */ - acx->bl->props.power = FB_BLANK_UNBLANK; return 0; } -static int acx424akp_disable(struct drm_panel *panel) -{ - struct acx424akp *acx = panel_to_acx424akp(panel); - - /* - * The backlight is on as long as the display is on - * so no use to call backlight_disable() here. - */ - acx->bl->props.power = FB_BLANK_NORMAL; - - return 0; -} static int acx424akp_get_modes(struct drm_panel *panel, struct drm_connector *connector) @@ -409,10 +388,8 @@ static int acx424akp_get_modes(struct drm_panel *panel, } static const struct drm_panel_funcs acx424akp_drm_funcs = { - .disable = acx424akp_disable, .unprepare = acx424akp_unprepare, .prepare = acx424akp_prepare, - .enable = acx424akp_enable, .get_modes = acx424akp_get_modes, }; @@ -458,25 +435,18 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi) /* This asserts RESET by default */ acx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(acx->reset_gpio)) { - ret = PTR_ERR(acx->reset_gpio); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to request GPIO (%d)\n", ret); - return ret; - } + if (IS_ERR(acx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(acx->reset_gpio), + "failed to request GPIO\n"); drm_panel_init(&acx->panel, dev, &acx424akp_drm_funcs, DRM_MODE_CONNECTOR_DSI); - acx->bl = devm_backlight_device_register(dev, "acx424akp", dev, acx, - &acx424akp_bl_ops, NULL); - if (IS_ERR(acx->bl)) { - dev_err(dev, "failed to register backlight device\n"); - return PTR_ERR(acx->bl); - } - acx->bl->props.max_brightness = 1023; - acx->bl->props.brightness = 512; - acx->bl->props.power = FB_BLANK_POWERDOWN; + acx->panel.backlight = devm_backlight_device_register(dev, "acx424akp", dev, acx, + &acx424akp_bl_ops, &acx424akp_bl_props); + if (IS_ERR(acx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(acx->panel.backlight), + "failed to register backlight device\n"); drm_panel_add(&acx->panel); diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c new file mode 100644 index 000000000000..8bc976f54b80 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Panel driver for the WideChips WS2401 480x800 DPI RGB panel, used in + * the Samsung Mobile Display (SMD) LMS380KF01. + * Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone. + * Linus Walleij <linus.walleij@linaro.org> + * Inspired by code and know-how in the vendor driver by Gareth Phillips. + */ +#include <drm/drm_mipi_dbi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <video/mipi_display.h> + +#define WS2401_RESCTL 0xb8 /* Resolution select control */ +#define WS2401_PSMPS 0xbd /* SMPS positive control */ +#define WS2401_NSMPS 0xbe /* SMPS negative control */ +#define WS2401_SMPS 0xbf +#define WS2401_BCMODE 0xc1 /* Backlight control mode */ +#define WS2401_WRBLCTL 0xc3 /* Backlight control */ +#define WS2401_WRDISBV 0xc4 /* Write manual brightness */ +#define WS2401_WRCTRLD 0xc6 /* Write BL control */ +#define WS2401_WRMIE 0xc7 /* Write MIE mode */ +#define WS2401_READ_ID1 0xda /* Read panel ID 1 */ +#define WS2401_READ_ID2 0xdb /* Read panel ID 2 */ +#define WS2401_READ_ID3 0xdc /* Read panel ID 3 */ +#define WS2401_GAMMA_R1 0xe7 /* Gamma red 1 */ +#define WS2401_GAMMA_G1 0xe8 /* Gamma green 1 */ +#define WS2401_GAMMA_B1 0xe9 /* Gamma blue 1 */ +#define WS2401_GAMMA_R2 0xea /* Gamma red 2 */ +#define WS2401_GAMMA_G2 0xeb /* Gamma green 2 */ +#define WS2401_GAMMA_B2 0xec /* Gamma blue 2 */ +#define WS2401_PASSWD1 0xf0 /* Password command for level 2 */ +#define WS2401_DISCTL 0xf2 /* Display control */ +#define WS2401_PWRCTL 0xf3 /* Power control */ +#define WS2401_VCOMCTL 0xf4 /* VCOM control */ +#define WS2401_SRCCTL 0xf5 /* Source control */ +#define WS2401_PANELCTL 0xf6 /* Panel control */ + +static const u8 ws2401_dbi_read_commands[] = { + WS2401_READ_ID1, + WS2401_READ_ID2, + WS2401_READ_ID3, + 0, /* sentinel */ +}; + +/** + * struct ws2401 - state container for a panel controlled by the WS2401 + * controller + */ +struct ws2401 { + /** @dev: the container device */ + struct device *dev; + /** @dbi: the DBI bus abstraction handle */ + struct mipi_dbi dbi; + /** @panel: the DRM panel instance for this device */ + struct drm_panel panel; + /** @width: the width of this panel in mm */ + u32 width; + /** @height: the height of this panel in mm */ + u32 height; + /** @reset: reset GPIO line */ + struct gpio_desc *reset; + /** @regulators: VCCIO and VIO supply regulators */ + struct regulator_bulk_data regulators[2]; + /** @internal_bl: If using internal backlight */ + bool internal_bl; +}; + +static const struct drm_display_mode lms380kf01_480_800_mode = { + /* + * The vendor driver states that the "SMD panel" has a clock + * frequency of 49920000 Hz / 2 = 24960000 Hz. + */ + .clock = 24960, + .hdisplay = 480, + .hsync_start = 480 + 8, + .hsync_end = 480 + 8 + 10, + .htotal = 480 + 8 + 10 + 8, + .vdisplay = 800, + .vsync_start = 800 + 8, + .vsync_end = 800 + 8 + 2, + .vtotal = 800 + 8 + 2 + 18, + .width_mm = 50, + .height_mm = 84, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static inline struct ws2401 *to_ws2401(struct drm_panel *panel) +{ + return container_of(panel, struct ws2401, panel); +} + +static void ws2401_read_mtp_id(struct ws2401 *ws) +{ + struct mipi_dbi *dbi = &ws->dbi; + u8 id1, id2, id3; + int ret; + + ret = mipi_dbi_command_read(dbi, WS2401_READ_ID1, &id1); + if (ret) { + dev_err(ws->dev, "unable to read MTP ID 1\n"); + return; + } + ret = mipi_dbi_command_read(dbi, WS2401_READ_ID2, &id2); + if (ret) { + dev_err(ws->dev, "unable to read MTP ID 2\n"); + return; + } + ret = mipi_dbi_command_read(dbi, WS2401_READ_ID3, &id3); + if (ret) { + dev_err(ws->dev, "unable to read MTP ID 3\n"); + return; + } + dev_info(ws->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); +} + +static int ws2401_power_on(struct ws2401 *ws) +{ + struct mipi_dbi *dbi = &ws->dbi; + int ret; + + /* Power up */ + ret = regulator_bulk_enable(ARRAY_SIZE(ws->regulators), + ws->regulators); + if (ret) { + dev_err(ws->dev, "failed to enable regulators: %d\n", ret); + return ret; + } + msleep(10); + + /* Assert reset >=1 ms */ + gpiod_set_value_cansleep(ws->reset, 1); + usleep_range(1000, 5000); + /* De-assert reset */ + gpiod_set_value_cansleep(ws->reset, 0); + /* Wait >= 10 ms */ + msleep(10); + dev_dbg(ws->dev, "de-asserted RESET\n"); + + /* + * Exit sleep mode and initialize display - some hammering is + * necessary. + */ + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); + msleep(50); + + /* Magic to unlock level 2 control of the display */ + mipi_dbi_command(dbi, WS2401_PASSWD1, 0x5a, 0x5a); + /* Configure resolution to 480RGBx800 */ + mipi_dbi_command(dbi, WS2401_RESCTL, 0x12); + /* Set addressing mode Flip V(d0), Flip H(d1) RGB/BGR(d3) */ + mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, 0x01); + /* Set pixel format: 24 bpp */ + mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, 0x70); + mipi_dbi_command(dbi, WS2401_SMPS, 0x00, 0x0f); + mipi_dbi_command(dbi, WS2401_PSMPS, 0x06, 0x03, /* DDVDH: 4.6v */ + 0x7e, 0x03, 0x12, 0x37); + mipi_dbi_command(dbi, WS2401_NSMPS, 0x06, 0x03, /* DDVDH: -4.6v */ + 0x7e, 0x02, 0x15, 0x37); + mipi_dbi_command(dbi, WS2401_SMPS, 0x02, 0x0f); + mipi_dbi_command(dbi, WS2401_PWRCTL, 0x10, 0xA9, 0x00, 0x01, 0x44, + 0xb4, /* VGH:16.1v, VGL:-13.8v */ + 0x50, /* GREFP:4.2v (default) */ + 0x50, /* GREFN:-4.2v (default) */ + 0x00, + 0x44); /* VOUTL:-10v (default) */ + mipi_dbi_command(dbi, WS2401_DISCTL, 0x01, 0x00, 0x00, 0x00, 0x14, + 0x16); + mipi_dbi_command(dbi, WS2401_VCOMCTL, 0x30, 0x53, 0x53); + mipi_dbi_command(dbi, WS2401_SRCCTL, 0x03, 0x0C, 0x00, 0x00, 0x00, + 0x01, /* 2 dot inversion */ + 0x01, 0x06, 0x03); + mipi_dbi_command(dbi, WS2401_PANELCTL, 0x14, 0x00, 0x80, 0x00); + mipi_dbi_command(dbi, WS2401_WRMIE, 0x01); + + /* Set up gamma, probably these are P-gamma and N-gamma for each color */ + mipi_dbi_command(dbi, WS2401_GAMMA_R1, 0x00, + 0x5b, 0x42, 0x41, 0x3f, 0x42, 0x3d, 0x38, 0x2e, + 0x2b, 0x2a, 0x27, 0x22, 0x27, 0x0f, 0x00, 0x00); + mipi_dbi_command(dbi, WS2401_GAMMA_R2, 0x00, + 0x5b, 0x42, 0x41, 0x3f, 0x42, 0x3d, 0x38, 0x2e, + 0x2b, 0x2a, 0x27, 0x22, 0x27, 0x0f, 0x00, 0x00); + mipi_dbi_command(dbi, WS2401_GAMMA_G1, 0x00, + 0x59, 0x40, 0x3f, 0x3e, 0x41, 0x3d, 0x39, 0x2f, + 0x2c, 0x2b, 0x29, 0x25, 0x29, 0x19, 0x08, 0x00); + mipi_dbi_command(dbi, WS2401_GAMMA_G2, 0x00, + 0x59, 0x40, 0x3f, 0x3e, 0x41, 0x3d, 0x39, 0x2f, + 0x2c, 0x2b, 0x29, 0x25, 0x29, 0x19, 0x08, 0x00); + mipi_dbi_command(dbi, WS2401_GAMMA_B1, 0x00, + 0x57, 0x3b, 0x3a, 0x3b, 0x3f, 0x3b, 0x38, 0x27, + 0x38, 0x2a, 0x26, 0x22, 0x34, 0x0c, 0x09, 0x00); + mipi_dbi_command(dbi, WS2401_GAMMA_B2, 0x00, + 0x57, 0x3b, 0x3a, 0x3b, 0x3f, 0x3b, 0x38, 0x27, + 0x38, 0x2a, 0x26, 0x22, 0x34, 0x0c, 0x09, 0x00); + + if (ws->internal_bl) { + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x2c); + } else { + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x00); + /* + * When not using internal backlight we do not need any further + * L2 accesses to the panel so we close the door on our way out. + * Otherwise we need to leave the L2 door open. + */ + mipi_dbi_command(dbi, WS2401_PASSWD1, 0xa5, 0xa5); + } + + return 0; +} + +static int ws2401_power_off(struct ws2401 *ws) +{ + /* Go into RESET and disable regulators */ + gpiod_set_value_cansleep(ws->reset, 1); + return regulator_bulk_disable(ARRAY_SIZE(ws->regulators), + ws->regulators); +} + +static int ws2401_unprepare(struct drm_panel *panel) +{ + struct ws2401 *ws = to_ws2401(panel); + struct mipi_dbi *dbi = &ws->dbi; + + /* Make sure we disable backlight, if any */ + if (ws->internal_bl) + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x00); + mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); + msleep(120); + return ws2401_power_off(to_ws2401(panel)); +} + +static int ws2401_disable(struct drm_panel *panel) +{ + struct ws2401 *ws = to_ws2401(panel); + struct mipi_dbi *dbi = &ws->dbi; + + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); + msleep(25); + + return 0; +} + +static int ws2401_prepare(struct drm_panel *panel) +{ + return ws2401_power_on(to_ws2401(panel)); +} + +static int ws2401_enable(struct drm_panel *panel) +{ + struct ws2401 *ws = to_ws2401(panel); + struct mipi_dbi *dbi = &ws->dbi; + + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); + + return 0; +} + +/** + * ws2401_get_modes() - return the mode + * @panel: the panel to get the mode for + * @connector: reference to the central DRM connector control structure + */ +static int ws2401_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct ws2401 *ws = to_ws2401(panel); + struct drm_display_mode *mode; + static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + + /* + * We just support the LMS380KF01 so far, if we implement more panels + * this mode, the following connector display_info settings and + * probably the custom DCS sequences needs to selected based on what + * the target panel needs. + */ + mode = drm_mode_duplicate(connector->dev, &lms380kf01_480_800_mode); + if (!mode) { + dev_err(ws->dev, "failed to add mode\n"); + return -ENOMEM; + } + + connector->display_info.bpc = 8; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + connector->display_info.bus_flags = + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; + drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); + + drm_mode_set_name(mode); + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs ws2401_drm_funcs = { + .disable = ws2401_disable, + .unprepare = ws2401_unprepare, + .prepare = ws2401_prepare, + .enable = ws2401_enable, + .get_modes = ws2401_get_modes, +}; + +static int ws2401_set_brightness(struct backlight_device *bl) +{ + struct ws2401 *ws = bl_get_data(bl); + struct mipi_dbi *dbi = &ws->dbi; + u8 brightness = backlight_get_brightness(bl); + + if (backlight_is_blank(bl)) { + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x00); + } else { + mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x2c); + mipi_dbi_command(dbi, WS2401_WRDISBV, brightness); + } + + return 0; +} + +static const struct backlight_ops ws2401_bl_ops = { + .update_status = ws2401_set_brightness, +}; + +static const struct backlight_properties ws2401_bl_props = { + .type = BACKLIGHT_PLATFORM, + .brightness = 120, + .max_brightness = U8_MAX, +}; + +static int ws2401_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct ws2401 *ws; + int ret; + + ws = devm_kzalloc(dev, sizeof(*ws), GFP_KERNEL); + if (!ws) + return -ENOMEM; + ws->dev = dev; + + /* + * VCI is the analog voltage supply + * VCCIO is the digital I/O voltage supply + */ + ws->regulators[0].supply = "vci"; + ws->regulators[1].supply = "vccio"; + ret = devm_regulator_bulk_get(dev, + ARRAY_SIZE(ws->regulators), + ws->regulators); + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); + + ws->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ws->reset)) { + ret = PTR_ERR(ws->reset); + return dev_err_probe(dev, ret, "no RESET GPIO\n"); + } + + ret = mipi_dbi_spi_init(spi, &ws->dbi, NULL); + if (ret) + return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); + ws->dbi.read_commands = ws2401_dbi_read_commands; + + ws2401_power_on(ws); + ws2401_read_mtp_id(ws); + ws2401_power_off(ws); + + drm_panel_init(&ws->panel, dev, &ws2401_drm_funcs, + DRM_MODE_CONNECTOR_DPI); + + ret = drm_panel_of_backlight(&ws->panel); + if (ret) + return dev_err_probe(dev, ret, + "failed to get external backlight device\n"); + + if (!ws->panel.backlight) { + dev_dbg(dev, "no external backlight, using internal backlight\n"); + ws->panel.backlight = + devm_backlight_device_register(dev, "ws2401", dev, ws, + &ws2401_bl_ops, &ws2401_bl_props); + if (IS_ERR(ws->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ws->panel.backlight), + "failed to register backlight device\n"); + } else { + dev_dbg(dev, "using external backlight\n"); + } + + spi_set_drvdata(spi, ws); + + drm_panel_add(&ws->panel); + dev_dbg(dev, "added panel\n"); + + return 0; +} + +static int ws2401_remove(struct spi_device *spi) +{ + struct ws2401 *ws = spi_get_drvdata(spi); + + drm_panel_remove(&ws->panel); + return 0; +} + +/* + * Samsung LMS380KF01 is the one instance of this display controller that we + * know about, but if more are found, the controller can be parameterized + * here and used for other configurations. + */ +static const struct of_device_id ws2401_match[] = { + { .compatible = "samsung,lms380kf01", }, + {}, +}; +MODULE_DEVICE_TABLE(of, ws2401_match); + +static struct spi_driver ws2401_driver = { + .probe = ws2401_probe, + .remove = ws2401_remove, + .driver = { + .name = "ws2401-panel", + .of_match_table = ws2401_match, + }, +}; +module_spi_driver(ws2401_driver); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("Samsung WS2401 panel driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c index 55172d63a922..d17aae8b71d7 100644 --- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c +++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c @@ -311,7 +311,7 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; drm_panel_init(&ctx->panel, &dsi->dev, &xpp055c272_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 3644652f726f..194af7f607a6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -106,7 +106,8 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) if (ret) { /* Continue if the optional regulator is missing */ if (ret != -ENODEV) { - DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n"); + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n"); return ret; } } diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index 125ed973feaa..bd9b7be63b0f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -54,7 +54,8 @@ static int panfrost_clk_init(struct panfrost_device *pfdev) if (IS_ERR(pfdev->bus_clock)) { dev_err(pfdev->dev, "get bus_clock failed %ld\n", PTR_ERR(pfdev->bus_clock)); - return PTR_ERR(pfdev->bus_clock); + err = PTR_ERR(pfdev->bus_clock); + goto disable_clock; } if (pfdev->bus_clock) { @@ -291,55 +292,100 @@ void panfrost_device_fini(struct panfrost_device *pfdev) panfrost_clk_fini(pfdev); } -const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code) -{ - switch (exception_code) { - /* Non-Fault Status code */ - case 0x00: return "NOT_STARTED/IDLE/OK"; - case 0x01: return "DONE"; - case 0x02: return "INTERRUPTED"; - case 0x03: return "STOPPED"; - case 0x04: return "TERMINATED"; - case 0x08: return "ACTIVE"; - /* Job exceptions */ - case 0x40: return "JOB_CONFIG_FAULT"; - case 0x41: return "JOB_POWER_FAULT"; - case 0x42: return "JOB_READ_FAULT"; - case 0x43: return "JOB_WRITE_FAULT"; - case 0x44: return "JOB_AFFINITY_FAULT"; - case 0x48: return "JOB_BUS_FAULT"; - case 0x50: return "INSTR_INVALID_PC"; - case 0x51: return "INSTR_INVALID_ENC"; - case 0x52: return "INSTR_TYPE_MISMATCH"; - case 0x53: return "INSTR_OPERAND_FAULT"; - case 0x54: return "INSTR_TLS_FAULT"; - case 0x55: return "INSTR_BARRIER_FAULT"; - case 0x56: return "INSTR_ALIGN_FAULT"; - case 0x58: return "DATA_INVALID_FAULT"; - case 0x59: return "TILE_RANGE_FAULT"; - case 0x5A: return "ADDR_RANGE_FAULT"; - case 0x60: return "OUT_OF_MEMORY"; - /* GPU exceptions */ - case 0x80: return "DELAYED_BUS_FAULT"; - case 0x88: return "SHAREABILITY_FAULT"; - /* MMU exceptions */ - case 0xC1: return "TRANSLATION_FAULT_LEVEL1"; - case 0xC2: return "TRANSLATION_FAULT_LEVEL2"; - case 0xC3: return "TRANSLATION_FAULT_LEVEL3"; - case 0xC4: return "TRANSLATION_FAULT_LEVEL4"; - case 0xC8: return "PERMISSION_FAULT"; - case 0xC9 ... 0xCF: return "PERMISSION_FAULT"; - case 0xD1: return "TRANSTAB_BUS_FAULT_LEVEL1"; - case 0xD2: return "TRANSTAB_BUS_FAULT_LEVEL2"; - case 0xD3: return "TRANSTAB_BUS_FAULT_LEVEL3"; - case 0xD4: return "TRANSTAB_BUS_FAULT_LEVEL4"; - case 0xD8: return "ACCESS_FLAG"; - case 0xD9 ... 0xDF: return "ACCESS_FLAG"; - case 0xE0 ... 0xE7: return "ADDRESS_SIZE_FAULT"; - case 0xE8 ... 0xEF: return "MEMORY_ATTRIBUTES_FAULT"; +#define PANFROST_EXCEPTION(id) \ + [DRM_PANFROST_EXCEPTION_ ## id] = { \ + .name = #id, \ } - return "UNKNOWN"; +struct panfrost_exception_info { + const char *name; +}; + +static const struct panfrost_exception_info panfrost_exception_infos[] = { + PANFROST_EXCEPTION(OK), + PANFROST_EXCEPTION(DONE), + PANFROST_EXCEPTION(INTERRUPTED), + PANFROST_EXCEPTION(STOPPED), + PANFROST_EXCEPTION(TERMINATED), + PANFROST_EXCEPTION(KABOOM), + PANFROST_EXCEPTION(EUREKA), + PANFROST_EXCEPTION(ACTIVE), + PANFROST_EXCEPTION(JOB_CONFIG_FAULT), + PANFROST_EXCEPTION(JOB_POWER_FAULT), + PANFROST_EXCEPTION(JOB_READ_FAULT), + PANFROST_EXCEPTION(JOB_WRITE_FAULT), + PANFROST_EXCEPTION(JOB_AFFINITY_FAULT), + PANFROST_EXCEPTION(JOB_BUS_FAULT), + PANFROST_EXCEPTION(INSTR_INVALID_PC), + PANFROST_EXCEPTION(INSTR_INVALID_ENC), + PANFROST_EXCEPTION(INSTR_TYPE_MISMATCH), + PANFROST_EXCEPTION(INSTR_OPERAND_FAULT), + PANFROST_EXCEPTION(INSTR_TLS_FAULT), + PANFROST_EXCEPTION(INSTR_BARRIER_FAULT), + PANFROST_EXCEPTION(INSTR_ALIGN_FAULT), + PANFROST_EXCEPTION(DATA_INVALID_FAULT), + PANFROST_EXCEPTION(TILE_RANGE_FAULT), + PANFROST_EXCEPTION(ADDR_RANGE_FAULT), + PANFROST_EXCEPTION(IMPRECISE_FAULT), + PANFROST_EXCEPTION(OOM), + PANFROST_EXCEPTION(OOM_AFBC), + PANFROST_EXCEPTION(UNKNOWN), + PANFROST_EXCEPTION(DELAYED_BUS_FAULT), + PANFROST_EXCEPTION(GPU_SHAREABILITY_FAULT), + PANFROST_EXCEPTION(SYS_SHAREABILITY_FAULT), + PANFROST_EXCEPTION(GPU_CACHEABILITY_FAULT), + PANFROST_EXCEPTION(TRANSLATION_FAULT_0), + PANFROST_EXCEPTION(TRANSLATION_FAULT_1), + PANFROST_EXCEPTION(TRANSLATION_FAULT_2), + PANFROST_EXCEPTION(TRANSLATION_FAULT_3), + PANFROST_EXCEPTION(TRANSLATION_FAULT_4), + PANFROST_EXCEPTION(TRANSLATION_FAULT_IDENTITY), + PANFROST_EXCEPTION(PERM_FAULT_0), + PANFROST_EXCEPTION(PERM_FAULT_1), + PANFROST_EXCEPTION(PERM_FAULT_2), + PANFROST_EXCEPTION(PERM_FAULT_3), + PANFROST_EXCEPTION(TRANSTAB_BUS_FAULT_0), + PANFROST_EXCEPTION(TRANSTAB_BUS_FAULT_1), + PANFROST_EXCEPTION(TRANSTAB_BUS_FAULT_2), + PANFROST_EXCEPTION(TRANSTAB_BUS_FAULT_3), + PANFROST_EXCEPTION(ACCESS_FLAG_0), + PANFROST_EXCEPTION(ACCESS_FLAG_1), + PANFROST_EXCEPTION(ACCESS_FLAG_2), + PANFROST_EXCEPTION(ACCESS_FLAG_3), + PANFROST_EXCEPTION(ADDR_SIZE_FAULT_IN0), + PANFROST_EXCEPTION(ADDR_SIZE_FAULT_IN1), + PANFROST_EXCEPTION(ADDR_SIZE_FAULT_IN2), + PANFROST_EXCEPTION(ADDR_SIZE_FAULT_IN3), + PANFROST_EXCEPTION(ADDR_SIZE_FAULT_OUT0), + PANFROST_EXCEPTION(ADDR_SIZE_FAULT_OUT1), + PANFROST_EXCEPTION(ADDR_SIZE_FAULT_OUT2), + PANFROST_EXCEPTION(ADDR_SIZE_FAULT_OUT3), + PANFROST_EXCEPTION(MEM_ATTR_FAULT_0), + PANFROST_EXCEPTION(MEM_ATTR_FAULT_1), + PANFROST_EXCEPTION(MEM_ATTR_FAULT_2), + PANFROST_EXCEPTION(MEM_ATTR_FAULT_3), + PANFROST_EXCEPTION(MEM_ATTR_NONCACHE_0), + PANFROST_EXCEPTION(MEM_ATTR_NONCACHE_1), + PANFROST_EXCEPTION(MEM_ATTR_NONCACHE_2), + PANFROST_EXCEPTION(MEM_ATTR_NONCACHE_3), +}; + +const char *panfrost_exception_name(u32 exception_code) +{ + if (WARN_ON(exception_code >= ARRAY_SIZE(panfrost_exception_infos) || + !panfrost_exception_infos[exception_code].name)) + return "Unknown exception type"; + + return panfrost_exception_infos[exception_code].name; +} + +bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev, + u32 exception_code) +{ + /* Right now, none of the GPU we support need a reset, but this + * might change. + */ + return false; } void panfrost_device_reset(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index f614e98771e4..8b25278f34c8 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -97,11 +97,12 @@ struct panfrost_device { spinlock_t as_lock; unsigned long as_in_use_mask; unsigned long as_alloc_mask; + unsigned long as_faulty_mask; struct list_head as_lru_list; struct panfrost_job_slot *js; - struct panfrost_job *jobs[NUM_JOB_SLOTS]; + struct panfrost_job *jobs[NUM_JOB_SLOTS][2]; struct list_head scheduled_jobs; struct panfrost_perfcnt *perfcnt; @@ -109,6 +110,7 @@ struct panfrost_device { struct mutex sched_lock; struct { + struct workqueue_struct *wq; struct work_struct work; atomic_t pending; } reset; @@ -121,8 +123,12 @@ struct panfrost_device { }; struct panfrost_mmu { + struct panfrost_device *pfdev; + struct kref refcount; struct io_pgtable_cfg pgtbl_cfg; struct io_pgtable_ops *pgtbl_ops; + struct drm_mm mm; + spinlock_t mm_lock; int as; atomic_t as_count; struct list_head list; @@ -133,9 +139,7 @@ struct panfrost_file_priv { struct drm_sched_entity sched_entity[NUM_JOB_SLOTS]; - struct panfrost_mmu mmu; - struct drm_mm mm; - spinlock_t mm_lock; + struct panfrost_mmu *mmu; }; static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev) @@ -171,6 +175,91 @@ void panfrost_device_reset(struct panfrost_device *pfdev); int panfrost_device_resume(struct device *dev); int panfrost_device_suspend(struct device *dev); -const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code); +enum drm_panfrost_exception_type { + DRM_PANFROST_EXCEPTION_OK = 0x00, + DRM_PANFROST_EXCEPTION_DONE = 0x01, + DRM_PANFROST_EXCEPTION_INTERRUPTED = 0x02, + DRM_PANFROST_EXCEPTION_STOPPED = 0x03, + DRM_PANFROST_EXCEPTION_TERMINATED = 0x04, + DRM_PANFROST_EXCEPTION_KABOOM = 0x05, + DRM_PANFROST_EXCEPTION_EUREKA = 0x06, + DRM_PANFROST_EXCEPTION_ACTIVE = 0x08, + DRM_PANFROST_EXCEPTION_MAX_NON_FAULT = 0x3f, + DRM_PANFROST_EXCEPTION_JOB_CONFIG_FAULT = 0x40, + DRM_PANFROST_EXCEPTION_JOB_POWER_FAULT = 0x41, + DRM_PANFROST_EXCEPTION_JOB_READ_FAULT = 0x42, + DRM_PANFROST_EXCEPTION_JOB_WRITE_FAULT = 0x43, + DRM_PANFROST_EXCEPTION_JOB_AFFINITY_FAULT = 0x44, + DRM_PANFROST_EXCEPTION_JOB_BUS_FAULT = 0x48, + DRM_PANFROST_EXCEPTION_INSTR_INVALID_PC = 0x50, + DRM_PANFROST_EXCEPTION_INSTR_INVALID_ENC = 0x51, + DRM_PANFROST_EXCEPTION_INSTR_TYPE_MISMATCH = 0x52, + DRM_PANFROST_EXCEPTION_INSTR_OPERAND_FAULT = 0x53, + DRM_PANFROST_EXCEPTION_INSTR_TLS_FAULT = 0x54, + DRM_PANFROST_EXCEPTION_INSTR_BARRIER_FAULT = 0x55, + DRM_PANFROST_EXCEPTION_INSTR_ALIGN_FAULT = 0x56, + DRM_PANFROST_EXCEPTION_DATA_INVALID_FAULT = 0x58, + DRM_PANFROST_EXCEPTION_TILE_RANGE_FAULT = 0x59, + DRM_PANFROST_EXCEPTION_ADDR_RANGE_FAULT = 0x5a, + DRM_PANFROST_EXCEPTION_IMPRECISE_FAULT = 0x5b, + DRM_PANFROST_EXCEPTION_OOM = 0x60, + DRM_PANFROST_EXCEPTION_OOM_AFBC = 0x61, + DRM_PANFROST_EXCEPTION_UNKNOWN = 0x7f, + DRM_PANFROST_EXCEPTION_DELAYED_BUS_FAULT = 0x80, + DRM_PANFROST_EXCEPTION_GPU_SHAREABILITY_FAULT = 0x88, + DRM_PANFROST_EXCEPTION_SYS_SHAREABILITY_FAULT = 0x89, + DRM_PANFROST_EXCEPTION_GPU_CACHEABILITY_FAULT = 0x8a, + DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_0 = 0xc0, + DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_1 = 0xc1, + DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_2 = 0xc2, + DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_3 = 0xc3, + DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_4 = 0xc4, + DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_IDENTITY = 0xc7, + DRM_PANFROST_EXCEPTION_PERM_FAULT_0 = 0xc8, + DRM_PANFROST_EXCEPTION_PERM_FAULT_1 = 0xc9, + DRM_PANFROST_EXCEPTION_PERM_FAULT_2 = 0xca, + DRM_PANFROST_EXCEPTION_PERM_FAULT_3 = 0xcb, + DRM_PANFROST_EXCEPTION_TRANSTAB_BUS_FAULT_0 = 0xd0, + DRM_PANFROST_EXCEPTION_TRANSTAB_BUS_FAULT_1 = 0xd1, + DRM_PANFROST_EXCEPTION_TRANSTAB_BUS_FAULT_2 = 0xd2, + DRM_PANFROST_EXCEPTION_TRANSTAB_BUS_FAULT_3 = 0xd3, + DRM_PANFROST_EXCEPTION_ACCESS_FLAG_0 = 0xd8, + DRM_PANFROST_EXCEPTION_ACCESS_FLAG_1 = 0xd9, + DRM_PANFROST_EXCEPTION_ACCESS_FLAG_2 = 0xda, + DRM_PANFROST_EXCEPTION_ACCESS_FLAG_3 = 0xdb, + DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_IN0 = 0xe0, + DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_IN1 = 0xe1, + DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_IN2 = 0xe2, + DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_IN3 = 0xe3, + DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_OUT0 = 0xe4, + DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_OUT1 = 0xe5, + DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_OUT2 = 0xe6, + DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_OUT3 = 0xe7, + DRM_PANFROST_EXCEPTION_MEM_ATTR_FAULT_0 = 0xe8, + DRM_PANFROST_EXCEPTION_MEM_ATTR_FAULT_1 = 0xe9, + DRM_PANFROST_EXCEPTION_MEM_ATTR_FAULT_2 = 0xea, + DRM_PANFROST_EXCEPTION_MEM_ATTR_FAULT_3 = 0xeb, + DRM_PANFROST_EXCEPTION_MEM_ATTR_NONCACHE_0 = 0xec, + DRM_PANFROST_EXCEPTION_MEM_ATTR_NONCACHE_1 = 0xed, + DRM_PANFROST_EXCEPTION_MEM_ATTR_NONCACHE_2 = 0xee, + DRM_PANFROST_EXCEPTION_MEM_ATTR_NONCACHE_3 = 0xef, +}; + +static inline bool +panfrost_exception_is_fault(u32 exception_code) +{ + return exception_code > DRM_PANFROST_EXCEPTION_MAX_NON_FAULT; +} + +const char *panfrost_exception_name(u32 exception_code); +bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev, + u32 exception_code); + +static inline void +panfrost_device_schedule_reset(struct panfrost_device *pfdev) +{ + atomic_set(&pfdev->reset.pending, 1); + queue_work(pfdev->reset.wq, &pfdev->reset.work); +} #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 075ec0ef746c..1ffaef5ec5ff 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -138,12 +138,6 @@ panfrost_lookup_bos(struct drm_device *dev, if (!job->bo_count) return 0; - job->implicit_fences = kvmalloc_array(job->bo_count, - sizeof(struct dma_fence *), - GFP_KERNEL | __GFP_ZERO); - if (!job->implicit_fences) - return -ENOMEM; - ret = drm_gem_objects_lookup(file_priv, (void __user *)(uintptr_t)args->bo_handles, job->bo_count, &job->bos); @@ -174,7 +168,7 @@ panfrost_lookup_bos(struct drm_device *dev, } /** - * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects + * panfrost_copy_in_sync() - Sets up job->deps with the sync objects * referenced by the job. * @dev: DRM device * @file_priv: DRM file for this fd @@ -194,22 +188,14 @@ panfrost_copy_in_sync(struct drm_device *dev, { u32 *handles; int ret = 0; - int i; + int i, in_fence_count; - job->in_fence_count = args->in_sync_count; + in_fence_count = args->in_sync_count; - if (!job->in_fence_count) + if (!in_fence_count) return 0; - job->in_fences = kvmalloc_array(job->in_fence_count, - sizeof(struct dma_fence *), - GFP_KERNEL | __GFP_ZERO); - if (!job->in_fences) { - DRM_DEBUG("Failed to allocate job in fences\n"); - return -ENOMEM; - } - - handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL); + handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL); if (!handles) { ret = -ENOMEM; DRM_DEBUG("Failed to allocate incoming syncobj handles\n"); @@ -218,16 +204,23 @@ panfrost_copy_in_sync(struct drm_device *dev, if (copy_from_user(handles, (void __user *)(uintptr_t)args->in_syncs, - job->in_fence_count * sizeof(u32))) { + in_fence_count * sizeof(u32))) { ret = -EFAULT; DRM_DEBUG("Failed to copy in syncobj handles\n"); goto fail; } - for (i = 0; i < job->in_fence_count; i++) { + for (i = 0; i < in_fence_count; i++) { + struct dma_fence *fence; + ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0, - &job->in_fences[i]); - if (ret == -EINVAL) + &fence); + if (ret) + goto fail; + + ret = drm_gem_fence_array_add(&job->deps, fence); + + if (ret) goto fail; } @@ -265,6 +258,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, kref_init(&job->refcount); + xa_init_flags(&job->deps, XA_FLAGS_ALLOC); + job->pfdev = pfdev; job->jc = args->jc; job->requirements = args->requirements; @@ -417,7 +412,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, * anyway, so let's not bother. */ if (!list_is_singular(&bo->mappings.list) || - WARN_ON_ONCE(first->mmu != &priv->mmu)) { + WARN_ON_ONCE(first->mmu != priv->mmu)) { ret = -EINVAL; goto out_unlock_mappings; } @@ -449,32 +444,6 @@ int panfrost_unstable_ioctl_check(void) return 0; } -#define PFN_4G (SZ_4G >> PAGE_SHIFT) -#define PFN_4G_MASK (PFN_4G - 1) -#define PFN_16M (SZ_16M >> PAGE_SHIFT) - -static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node, - unsigned long color, - u64 *start, u64 *end) -{ - /* Executable buffers can't start or end on a 4GB boundary */ - if (!(color & PANFROST_BO_NOEXEC)) { - u64 next_seg; - - if ((*start & PFN_4G_MASK) == 0) - (*start)++; - - if ((*end & PFN_4G_MASK) == 0) - (*end)--; - - next_seg = ALIGN(*start, PFN_4G); - if (next_seg - *start <= PFN_16M) - *start = next_seg + 1; - - *end = min(*end, ALIGN(*start, PFN_4G) - 1); - } -} - static int panfrost_open(struct drm_device *dev, struct drm_file *file) { @@ -489,15 +458,11 @@ panfrost_open(struct drm_device *dev, struct drm_file *file) panfrost_priv->pfdev = pfdev; file->driver_priv = panfrost_priv; - spin_lock_init(&panfrost_priv->mm_lock); - - /* 4G enough for now. can be 48-bit */ - drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT); - panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust; - - ret = panfrost_mmu_pgtable_alloc(panfrost_priv); - if (ret) - goto err_pgtable; + panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev); + if (IS_ERR(panfrost_priv->mmu)) { + ret = PTR_ERR(panfrost_priv->mmu); + goto err_free; + } ret = panfrost_job_open(panfrost_priv); if (ret) @@ -506,9 +471,8 @@ panfrost_open(struct drm_device *dev, struct drm_file *file) return 0; err_job: - panfrost_mmu_pgtable_free(panfrost_priv); -err_pgtable: - drm_mm_takedown(&panfrost_priv->mm); + panfrost_mmu_ctx_put(panfrost_priv->mmu); +err_free: kfree(panfrost_priv); return ret; } @@ -521,8 +485,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file) panfrost_perfcnt_close(file); panfrost_job_close(panfrost_priv); - panfrost_mmu_pgtable_free(panfrost_priv); - drm_mm_takedown(&panfrost_priv->mm); + panfrost_mmu_ctx_put(panfrost_priv->mmu); kfree(panfrost_priv); } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 3e0723bc36bd..23377481f4e3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -60,7 +60,7 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo, mutex_lock(&bo->mappings.lock); list_for_each_entry(iter, &bo->mappings.list, node) { - if (iter->mmu == &priv->mmu) { + if (iter->mmu == priv->mmu) { kref_get(&iter->refcount); mapping = iter; break; @@ -74,16 +74,13 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo, static void panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) { - struct panfrost_file_priv *priv; - if (mapping->active) panfrost_mmu_unmap(mapping); - priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu); - spin_lock(&priv->mm_lock); + spin_lock(&mapping->mmu->mm_lock); if (drm_mm_node_allocated(&mapping->mmnode)) drm_mm_remove_node(&mapping->mmnode); - spin_unlock(&priv->mm_lock); + spin_unlock(&mapping->mmu->mm_lock); } static void panfrost_gem_mapping_release(struct kref *kref) @@ -94,6 +91,7 @@ static void panfrost_gem_mapping_release(struct kref *kref) panfrost_gem_teardown_mapping(mapping); drm_gem_object_put(&mapping->obj->base.base); + panfrost_mmu_ctx_put(mapping->mmu); kfree(mapping); } @@ -143,11 +141,11 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) else align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; - mapping->mmu = &priv->mmu; - spin_lock(&priv->mm_lock); - ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode, + mapping->mmu = panfrost_mmu_ctx_get(priv->mmu); + spin_lock(&mapping->mmu->mm_lock); + ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode, size >> PAGE_SHIFT, align, color, 0); - spin_unlock(&priv->mm_lock); + spin_unlock(&mapping->mmu->mm_lock); if (ret) goto err; @@ -176,7 +174,7 @@ void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) mutex_lock(&bo->mappings.lock); list_for_each_entry(iter, &bo->mappings.list, node) { - if (iter->mmu == &priv->mmu) { + if (iter->mmu == priv->mmu) { mapping = iter; list_del(&iter->node); break; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 0e70e27fd8c3..bbe628b306ee 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -33,7 +33,7 @@ static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data) address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO); dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n", - fault_status & 0xFF, panfrost_exception_name(pfdev, fault_status), + fault_status, panfrost_exception_name(fault_status & 0xFF), address); if (state & GPU_IRQ_MULTIPLE_FAULT) diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 2df3e999a38d..71a72fb50e6b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -4,6 +4,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/dma-resv.h> @@ -25,17 +26,8 @@ #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) #define job_read(dev, reg) readl(dev->iomem + (reg)) -enum panfrost_queue_status { - PANFROST_QUEUE_STATUS_ACTIVE, - PANFROST_QUEUE_STATUS_STOPPED, - PANFROST_QUEUE_STATUS_STARTING, - PANFROST_QUEUE_STATUS_FAULT_PENDING, -}; - struct panfrost_queue_state { struct drm_gpu_scheduler sched; - atomic_t status; - struct mutex lock; u64 fence_context; u64 emit_seqno; }; @@ -43,6 +35,7 @@ struct panfrost_queue_state { struct panfrost_job_slot { struct panfrost_queue_state queue[NUM_JOB_SLOTS]; spinlock_t job_lock; + int irq; }; static struct panfrost_job * @@ -148,9 +141,52 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev, job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32); } +static u32 +panfrost_get_job_chain_flag(const struct panfrost_job *job) +{ + struct panfrost_fence *f = to_panfrost_fence(job->done_fence); + + if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) + return 0; + + return (f->seqno & 1) ? JS_CONFIG_JOB_CHAIN_FLAG : 0; +} + +static struct panfrost_job * +panfrost_dequeue_job(struct panfrost_device *pfdev, int slot) +{ + struct panfrost_job *job = pfdev->jobs[slot][0]; + + WARN_ON(!job); + pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; + pfdev->jobs[slot][1] = NULL; + + return job; +} + +static unsigned int +panfrost_enqueue_job(struct panfrost_device *pfdev, int slot, + struct panfrost_job *job) +{ + if (WARN_ON(!job)) + return 0; + + if (!pfdev->jobs[slot][0]) { + pfdev->jobs[slot][0] = job; + return 0; + } + + WARN_ON(pfdev->jobs[slot][1]); + pfdev->jobs[slot][1] = job; + WARN_ON(panfrost_get_job_chain_flag(job) == + panfrost_get_job_chain_flag(pfdev->jobs[slot][0])); + return 1; +} + static void panfrost_job_hw_submit(struct panfrost_job *job, int js) { struct panfrost_device *pfdev = job->pfdev; + unsigned int subslot; u32 cfg; u64 jc_head = job->jc; int ret; @@ -165,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) return; } - cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu); + cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF); job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32); @@ -176,7 +212,8 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) * start */ cfg |= JS_CONFIG_THREAD_PRI(8) | JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE | - JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE; + JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE | + panfrost_get_job_chain_flag(job); if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION; @@ -190,20 +227,33 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id); /* GO ! */ - dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx", - job, js, jc_head); - job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); + spin_lock(&pfdev->js->job_lock); + subslot = panfrost_enqueue_job(pfdev, js, job); + /* Don't queue the job if a reset is in progress */ + if (!atomic_read(&pfdev->reset.pending)) { + job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); + dev_dbg(pfdev->dev, + "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d", + job, js, subslot, jc_head, cfg & 0xf); + } + spin_unlock(&pfdev->js->job_lock); } -static void panfrost_acquire_object_fences(struct drm_gem_object **bos, - int bo_count, - struct dma_fence **implicit_fences) +static int panfrost_acquire_object_fences(struct drm_gem_object **bos, + int bo_count, + struct xarray *deps) { - int i; + int i, ret; - for (i = 0; i < bo_count; i++) - implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv); + for (i = 0; i < bo_count; i++) { + /* panfrost always uses write mode in its current uapi */ + ret = drm_gem_fence_array_add_implicit(deps, bos[i], true); + if (ret) + return ret; + } + + return 0; } static void panfrost_attach_object_fences(struct drm_gem_object **bos, @@ -224,14 +274,13 @@ int panfrost_job_push(struct panfrost_job *job) struct ww_acquire_ctx acquire_ctx; int ret = 0; - mutex_lock(&pfdev->sched_lock); ret = drm_gem_lock_reservations(job->bos, job->bo_count, &acquire_ctx); - if (ret) { - mutex_unlock(&pfdev->sched_lock); + if (ret) return ret; - } + + mutex_lock(&pfdev->sched_lock); ret = drm_sched_job_init(&job->base, entity, NULL); if (ret) { @@ -241,10 +290,14 @@ int panfrost_job_push(struct panfrost_job *job) job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); - kref_get(&job->refcount); /* put by scheduler job completion */ + ret = panfrost_acquire_object_fences(job->bos, job->bo_count, + &job->deps); + if (ret) { + mutex_unlock(&pfdev->sched_lock); + goto unlock; + } - panfrost_acquire_object_fences(job->bos, job->bo_count, - job->implicit_fences); + kref_get(&job->refcount); /* put by scheduler job completion */ drm_sched_entity_push_job(&job->base, entity); @@ -263,18 +316,15 @@ static void panfrost_job_cleanup(struct kref *ref) { struct panfrost_job *job = container_of(ref, struct panfrost_job, refcount); + struct dma_fence *fence; + unsigned long index; unsigned int i; - if (job->in_fences) { - for (i = 0; i < job->in_fence_count; i++) - dma_fence_put(job->in_fences[i]); - kvfree(job->in_fences); - } - if (job->implicit_fences) { - for (i = 0; i < job->bo_count; i++) - dma_fence_put(job->implicit_fences[i]); - kvfree(job->implicit_fences); + xa_for_each(&job->deps, index, fence) { + dma_fence_put(fence); } + xa_destroy(&job->deps); + dma_fence_put(job->done_fence); dma_fence_put(job->render_done_fence); @@ -317,26 +367,9 @@ static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job struct drm_sched_entity *s_entity) { struct panfrost_job *job = to_panfrost_job(sched_job); - struct dma_fence *fence; - unsigned int i; - /* Explicit fences */ - for (i = 0; i < job->in_fence_count; i++) { - if (job->in_fences[i]) { - fence = job->in_fences[i]; - job->in_fences[i] = NULL; - return fence; - } - } - - /* Implicit fences, max. one per BO */ - for (i = 0; i < job->bo_count; i++) { - if (job->implicit_fences[i]) { - fence = job->implicit_fences[i]; - job->implicit_fences[i] = NULL; - return fence; - } - } + if (!xa_empty(&job->deps)) + return xa_erase(&job->deps, job->last_dep++); return NULL; } @@ -351,11 +384,15 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) if (unlikely(job->base.s_fence->finished.error)) return NULL; - pfdev->jobs[slot] = job; + /* Nothing to execute: can happen if the job has finished while + * we were resetting the GPU. + */ + if (!job->jc) + return NULL; fence = panfrost_fence_create(pfdev, slot); if (IS_ERR(fence)) - return NULL; + return fence; if (job->done_fence) dma_fence_put(job->done_fence); @@ -379,57 +416,314 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) job_write(pfdev, JOB_INT_MASK, irq_mask); } -static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue, - struct drm_sched_job *bad) +static void panfrost_job_handle_err(struct panfrost_device *pfdev, + struct panfrost_job *job, + unsigned int js) { - enum panfrost_queue_status old_status; - bool stopped = false; + u32 js_status = job_read(pfdev, JS_STATUS(js)); + const char *exception_name = panfrost_exception_name(js_status); + bool signal_fence = true; + + if (!panfrost_exception_is_fault(js_status)) { + dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x", + js, exception_name, + job_read(pfdev, JS_HEAD_LO(js)), + job_read(pfdev, JS_TAIL_LO(js))); + } else { + dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", + js, exception_name, + job_read(pfdev, JS_HEAD_LO(js)), + job_read(pfdev, JS_TAIL_LO(js))); + } - mutex_lock(&queue->lock); - old_status = atomic_xchg(&queue->status, - PANFROST_QUEUE_STATUS_STOPPED); - if (old_status == PANFROST_QUEUE_STATUS_STOPPED) - goto out; + if (js_status == DRM_PANFROST_EXCEPTION_STOPPED) { + /* Update the job head so we can resume */ + job->jc = job_read(pfdev, JS_TAIL_LO(js)) | + ((u64)job_read(pfdev, JS_TAIL_HI(js)) << 32); + + /* The job will be resumed, don't signal the fence */ + signal_fence = false; + } else if (js_status == DRM_PANFROST_EXCEPTION_TERMINATED) { + /* Job has been hard-stopped, flag it as canceled */ + dma_fence_set_error(job->done_fence, -ECANCELED); + job->jc = 0; + } else if (panfrost_exception_is_fault(js_status)) { + /* We might want to provide finer-grained error code based on + * the exception type, but unconditionally setting to EINVAL + * is good enough for now. + */ + dma_fence_set_error(job->done_fence, -EINVAL); + job->jc = 0; + } - WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE); - drm_sched_stop(&queue->sched, bad); - if (bad) - drm_sched_increase_karma(bad); + panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_devfreq_record_idle(&pfdev->pfdevfreq); - stopped = true; + if (signal_fence) + dma_fence_signal_locked(job->done_fence); - /* - * Set the timeout to max so the timer doesn't get started - * when we return from the timeout handler (restored in - * panfrost_scheduler_start()). + pm_runtime_put_autosuspend(pfdev->dev); + + if (panfrost_exception_needs_reset(pfdev, js_status)) { + atomic_set(&pfdev->reset.pending, 1); + drm_sched_fault(&pfdev->js->queue[js].sched); + } +} + +static void panfrost_job_handle_done(struct panfrost_device *pfdev, + struct panfrost_job *job) +{ + /* Set ->jc to 0 to avoid re-submitting an already finished job (can + * happen when we receive the DONE interrupt while doing a GPU reset). + */ + job->jc = 0; + panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_devfreq_record_idle(&pfdev->pfdevfreq); + + dma_fence_signal_locked(job->done_fence); + pm_runtime_put_autosuspend(pfdev->dev); +} + +static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) +{ + struct panfrost_job *done[NUM_JOB_SLOTS][2] = {}; + struct panfrost_job *failed[NUM_JOB_SLOTS] = {}; + u32 js_state = 0, js_events = 0; + unsigned int i, j; + + /* First we collect all failed/done jobs. */ + while (status) { + u32 js_state_mask = 0; + + for (j = 0; j < NUM_JOB_SLOTS; j++) { + if (status & MK_JS_MASK(j)) + js_state_mask |= MK_JS_MASK(j); + + if (status & JOB_INT_MASK_DONE(j)) { + if (done[j][0]) + done[j][1] = panfrost_dequeue_job(pfdev, j); + else + done[j][0] = panfrost_dequeue_job(pfdev, j); + } + + if (status & JOB_INT_MASK_ERR(j)) { + /* Cancel the next submission. Will be submitted + * after we're done handling this failure if + * there's no reset pending. + */ + job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP); + failed[j] = panfrost_dequeue_job(pfdev, j); + } + } + + /* JS_STATE is sampled when JOB_INT_CLEAR is written. + * For each BIT(slot) or BIT(slot + 16) bit written to + * JOB_INT_CLEAR, the corresponding bits in JS_STATE + * (BIT(slot) and BIT(slot + 16)) are updated, but this + * is racy. If we only have one job done at the time we + * read JOB_INT_RAWSTAT but the second job fails before we + * clear the status, we end up with a status containing + * only the DONE bit and consider both jobs as DONE since + * JS_STATE reports both NEXT and CURRENT as inactive. + * To prevent that, let's repeat this clear+read steps + * until status is 0. + */ + job_write(pfdev, JOB_INT_CLEAR, status); + js_state &= ~js_state_mask; + js_state |= job_read(pfdev, JOB_INT_JS_STATE) & js_state_mask; + js_events |= status; + status = job_read(pfdev, JOB_INT_RAWSTAT); + } + + /* Then we handle the dequeued jobs. */ + for (j = 0; j < NUM_JOB_SLOTS; j++) { + if (!(js_events & MK_JS_MASK(j))) + continue; + + if (failed[j]) { + panfrost_job_handle_err(pfdev, failed[j], j); + } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) { + /* When the current job doesn't fail, the JM dequeues + * the next job without waiting for an ACK, this means + * we can have 2 jobs dequeued and only catch the + * interrupt when the second one is done. If both slots + * are inactive, but one job remains in pfdev->jobs[j], + * consider it done. Of course that doesn't apply if a + * failure happened since we cancelled execution of the + * job in _NEXT (see above). + */ + if (WARN_ON(!done[j][0])) + done[j][0] = panfrost_dequeue_job(pfdev, j); + else + done[j][1] = panfrost_dequeue_job(pfdev, j); + } + + for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++) + panfrost_job_handle_done(pfdev, done[j][i]); + } + + /* And finally we requeue jobs that were waiting in the second slot + * and have been stopped if we detected a failure on the first slot. */ - queue->sched.timeout = MAX_SCHEDULE_TIMEOUT; + for (j = 0; j < NUM_JOB_SLOTS; j++) { + if (!(js_events & MK_JS_MASK(j))) + continue; + + if (!failed[j] || !pfdev->jobs[j][0]) + continue; + + if (pfdev->jobs[j][0]->jc == 0) { + /* The job was cancelled, signal the fence now */ + struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j); + + dma_fence_set_error(canceled->done_fence, -ECANCELED); + panfrost_job_handle_done(pfdev, canceled); + } else if (!atomic_read(&pfdev->reset.pending)) { + /* Requeue the job we removed if no reset is pending */ + job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START); + } + } +} -out: - mutex_unlock(&queue->lock); +static void panfrost_job_handle_irqs(struct panfrost_device *pfdev) +{ + u32 status = job_read(pfdev, JOB_INT_RAWSTAT); + + while (status) { + pm_runtime_mark_last_busy(pfdev->dev); - return stopped; + spin_lock(&pfdev->js->job_lock); + panfrost_job_handle_irq(pfdev, status); + spin_unlock(&pfdev->js->job_lock); + status = job_read(pfdev, JOB_INT_RAWSTAT); + } } -static void panfrost_scheduler_start(struct panfrost_queue_state *queue) +static u32 panfrost_active_slots(struct panfrost_device *pfdev, + u32 *js_state_mask, u32 js_state) { - enum panfrost_queue_status old_status; + u32 rawstat; - mutex_lock(&queue->lock); - old_status = atomic_xchg(&queue->status, - PANFROST_QUEUE_STATUS_STARTING); - WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED); + if (!(js_state & *js_state_mask)) + return 0; - /* Restore the original timeout before starting the scheduler. */ - queue->sched.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS); - drm_sched_resubmit_jobs(&queue->sched); - drm_sched_start(&queue->sched, true); - old_status = atomic_xchg(&queue->status, - PANFROST_QUEUE_STATUS_ACTIVE); - if (old_status == PANFROST_QUEUE_STATUS_FAULT_PENDING) - drm_sched_fault(&queue->sched); + rawstat = job_read(pfdev, JOB_INT_RAWSTAT); + if (rawstat) { + unsigned int i; + + for (i = 0; i < NUM_JOB_SLOTS; i++) { + if (rawstat & MK_JS_MASK(i)) + *js_state_mask &= ~MK_JS_MASK(i); + } + } - mutex_unlock(&queue->lock); + return js_state & *js_state_mask; +} + +static void +panfrost_reset(struct panfrost_device *pfdev, + struct drm_sched_job *bad) +{ + u32 js_state, js_state_mask = 0xffffffff; + unsigned int i, j; + bool cookie; + int ret; + + if (!atomic_read(&pfdev->reset.pending)) + return; + + /* Stop the schedulers. + * + * FIXME: We temporarily get out of the dma_fence_signalling section + * because the cleanup path generate lockdep splats when taking locks + * to release job resources. We should rework the code to follow this + * pattern: + * + * try_lock + * if (locked) + * release + * else + * schedule_work_to_release_later + */ + for (i = 0; i < NUM_JOB_SLOTS; i++) + drm_sched_stop(&pfdev->js->queue[i].sched, bad); + + cookie = dma_fence_begin_signalling(); + + if (bad) + drm_sched_increase_karma(bad); + + /* Mask job interrupts and synchronize to make sure we won't be + * interrupted during our reset. + */ + job_write(pfdev, JOB_INT_MASK, 0); + synchronize_irq(pfdev->js->irq); + + for (i = 0; i < NUM_JOB_SLOTS; i++) { + /* Cancel the next job and soft-stop the running job. */ + job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP); + job_write(pfdev, JS_COMMAND(i), JS_COMMAND_SOFT_STOP); + } + + /* Wait at most 10ms for soft-stops to complete */ + ret = readl_poll_timeout(pfdev->iomem + JOB_INT_JS_STATE, js_state, + !panfrost_active_slots(pfdev, &js_state_mask, js_state), + 10, 10000); + + if (ret) + dev_err(pfdev->dev, "Soft-stop failed\n"); + + /* Handle the remaining interrupts before we reset. */ + panfrost_job_handle_irqs(pfdev); + + /* Remaining interrupts have been handled, but we might still have + * stuck jobs. Let's make sure the PM counters stay balanced by + * manually calling pm_runtime_put_noidle() and + * panfrost_devfreq_record_idle() for each stuck job. + */ + spin_lock(&pfdev->js->job_lock); + for (i = 0; i < NUM_JOB_SLOTS; i++) { + for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) { + pm_runtime_put_noidle(pfdev->dev); + panfrost_devfreq_record_idle(&pfdev->pfdevfreq); + } + } + memset(pfdev->jobs, 0, sizeof(pfdev->jobs)); + spin_unlock(&pfdev->js->job_lock); + + /* Proceed with reset now. */ + panfrost_device_reset(pfdev); + + /* panfrost_device_reset() unmasks job interrupts, but we want to + * keep them masked a bit longer. + */ + job_write(pfdev, JOB_INT_MASK, 0); + + /* GPU has been reset, we can clear the reset pending bit. */ + atomic_set(&pfdev->reset.pending, 0); + + /* Now resubmit jobs that were previously queued but didn't have a + * chance to finish. + * FIXME: We temporarily get out of the DMA fence signalling section + * while resubmitting jobs because the job submission logic will + * allocate memory with the GFP_KERNEL flag which can trigger memory + * reclaim and exposes a lock ordering issue. + */ + dma_fence_end_signalling(cookie); + for (i = 0; i < NUM_JOB_SLOTS; i++) + drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); + cookie = dma_fence_begin_signalling(); + + /* Restart the schedulers */ + for (i = 0; i < NUM_JOB_SLOTS; i++) + drm_sched_start(&pfdev->js->queue[i].sched, true); + + /* Re-enable job interrupts now that everything has been restarted. */ + job_write(pfdev, JOB_INT_MASK, + GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | + GENMASK(NUM_JOB_SLOTS - 1, 0)); + + dma_fence_end_signalling(cookie); } static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job @@ -454,17 +748,20 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job job_read(pfdev, JS_TAIL_LO(js)), sched_job); - /* Scheduler is already stopped, nothing to do. */ - if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job)) - return DRM_GPU_SCHED_STAT_NOMINAL; - - /* Schedule a reset if there's no reset in progress. */ - if (!atomic_xchg(&pfdev->reset.pending, 1)) - schedule_work(&pfdev->reset.work); + atomic_set(&pfdev->reset.pending, 1); + panfrost_reset(pfdev, sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } +static void panfrost_reset_work(struct work_struct *work) +{ + struct panfrost_device *pfdev; + + pfdev = container_of(work, struct panfrost_device, reset.work); + panfrost_reset(pfdev, NULL); +} + static const struct drm_sched_backend_ops panfrost_sched_ops = { .dependency = panfrost_job_dependency, .run_job = panfrost_job_run, @@ -472,161 +769,75 @@ static const struct drm_sched_backend_ops panfrost_sched_ops = { .free_job = panfrost_job_free }; -static irqreturn_t panfrost_job_irq_handler(int irq, void *data) +static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data) { struct panfrost_device *pfdev = data; - u32 status = job_read(pfdev, JOB_INT_STAT); - int j; - - dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status); - - if (!status) - return IRQ_NONE; - - pm_runtime_mark_last_busy(pfdev->dev); - - for (j = 0; status; j++) { - u32 mask = MK_JS_MASK(j); - - if (!(status & mask)) - continue; - - job_write(pfdev, JOB_INT_CLEAR, mask); - - if (status & JOB_INT_MASK_ERR(j)) { - enum panfrost_queue_status old_status; - - job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP); - - dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", - j, - panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))), - job_read(pfdev, JS_HEAD_LO(j)), - job_read(pfdev, JS_TAIL_LO(j))); - - /* - * When the queue is being restarted we don't report - * faults directly to avoid races between the timeout - * and reset handlers. panfrost_scheduler_start() will - * call drm_sched_fault() after the queue has been - * started if status == FAULT_PENDING. - */ - old_status = atomic_cmpxchg(&pfdev->js->queue[j].status, - PANFROST_QUEUE_STATUS_STARTING, - PANFROST_QUEUE_STATUS_FAULT_PENDING); - if (old_status == PANFROST_QUEUE_STATUS_ACTIVE) - drm_sched_fault(&pfdev->js->queue[j].sched); - } - - if (status & JOB_INT_MASK_DONE(j)) { - struct panfrost_job *job; - - spin_lock(&pfdev->js->job_lock); - job = pfdev->jobs[j]; - /* Only NULL if job timeout occurred */ - if (job) { - pfdev->jobs[j] = NULL; - - panfrost_mmu_as_put(pfdev, &job->file_priv->mmu); - panfrost_devfreq_record_idle(&pfdev->pfdevfreq); - - dma_fence_signal_locked(job->done_fence); - pm_runtime_put_autosuspend(pfdev->dev); - } - spin_unlock(&pfdev->js->job_lock); - } - - status &= ~mask; - } + panfrost_job_handle_irqs(pfdev); + job_write(pfdev, JOB_INT_MASK, + GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | + GENMASK(NUM_JOB_SLOTS - 1, 0)); return IRQ_HANDLED; } -static void panfrost_reset(struct work_struct *work) +static irqreturn_t panfrost_job_irq_handler(int irq, void *data) { - struct panfrost_device *pfdev = container_of(work, - struct panfrost_device, - reset.work); - unsigned long flags; - unsigned int i; - bool cookie; - - cookie = dma_fence_begin_signalling(); - for (i = 0; i < NUM_JOB_SLOTS; i++) { - /* - * We want pending timeouts to be handled before we attempt - * to stop the scheduler. If we don't do that and the timeout - * handler is in flight, it might have removed the bad job - * from the list, and we'll lose this job if the reset handler - * enters the critical section in panfrost_scheduler_stop() - * before the timeout handler. - * - * Timeout is set to MAX_SCHEDULE_TIMEOUT - 1 because we need - * something big enough to make sure the timer will not expire - * before we manage to stop the scheduler, but we can't use - * MAX_SCHEDULE_TIMEOUT because drm_sched_get_cleanup_job() - * considers that as 'timer is not running' and will dequeue - * the job without making sure the timeout handler is not - * running. - */ - pfdev->js->queue[i].sched.timeout = MAX_SCHEDULE_TIMEOUT - 1; - cancel_delayed_work_sync(&pfdev->js->queue[i].sched.work_tdr); - panfrost_scheduler_stop(&pfdev->js->queue[i], NULL); - } - - /* All timers have been stopped, we can safely reset the pending state. */ - atomic_set(&pfdev->reset.pending, 0); - - spin_lock_irqsave(&pfdev->js->job_lock, flags); - for (i = 0; i < NUM_JOB_SLOTS; i++) { - if (pfdev->jobs[i]) { - pm_runtime_put_noidle(pfdev->dev); - panfrost_devfreq_record_idle(&pfdev->pfdevfreq); - pfdev->jobs[i] = NULL; - } - } - spin_unlock_irqrestore(&pfdev->js->job_lock, flags); - - panfrost_device_reset(pfdev); + struct panfrost_device *pfdev = data; + u32 status = job_read(pfdev, JOB_INT_STAT); - for (i = 0; i < NUM_JOB_SLOTS; i++) - panfrost_scheduler_start(&pfdev->js->queue[i]); + if (!status) + return IRQ_NONE; - dma_fence_end_signalling(cookie); + job_write(pfdev, JOB_INT_MASK, 0); + return IRQ_WAKE_THREAD; } int panfrost_job_init(struct panfrost_device *pfdev) { struct panfrost_job_slot *js; - int ret, j, irq; + unsigned int nentries = 2; + int ret, j; - INIT_WORK(&pfdev->reset.work, panfrost_reset); + /* All GPUs have two entries per queue, but without jobchain + * disambiguation stopping the right job in the close path is tricky, + * so let's just advertise one entry in that case. + */ + if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) + nentries = 1; pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); if (!js) return -ENOMEM; + INIT_WORK(&pfdev->reset.work, panfrost_reset_work); spin_lock_init(&js->job_lock); - irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job"); - if (irq <= 0) + js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job"); + if (js->irq <= 0) return -ENODEV; - ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler, - IRQF_SHARED, KBUILD_MODNAME "-job", pfdev); + ret = devm_request_threaded_irq(pfdev->dev, js->irq, + panfrost_job_irq_handler, + panfrost_job_irq_handler_thread, + IRQF_SHARED, KBUILD_MODNAME "-job", + pfdev); if (ret) { dev_err(pfdev->dev, "failed to request job irq"); return ret; } - for (j = 0; j < NUM_JOB_SLOTS; j++) { - mutex_init(&js->queue[j].lock); + pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0); + if (!pfdev->reset.wq) + return -ENOMEM; + for (j = 0; j < NUM_JOB_SLOTS; j++) { js->queue[j].fence_context = dma_fence_context_alloc(1); ret = drm_sched_init(&js->queue[j].sched, &panfrost_sched_ops, - 1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), + nentries, 0, + msecs_to_jiffies(JOB_TIMEOUT_MS), + pfdev->reset.wq, NULL, "pan_js"); if (ret) { dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); @@ -642,6 +853,7 @@ err_sched: for (j--; j >= 0; j--) drm_sched_fini(&js->queue[j].sched); + destroy_workqueue(pfdev->reset.wq); return ret; } @@ -654,9 +866,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev) for (j = 0; j < NUM_JOB_SLOTS; j++) { drm_sched_fini(&js->queue[j].sched); - mutex_destroy(&js->queue[j].lock); } + cancel_work_sync(&pfdev->reset.work); + destroy_workqueue(pfdev->reset.wq); } int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) @@ -679,10 +892,46 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) { + struct panfrost_device *pfdev = panfrost_priv->pfdev; int i; for (i = 0; i < NUM_JOB_SLOTS; i++) drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); + + /* Kill in-flight jobs */ + spin_lock(&pfdev->js->job_lock); + for (i = 0; i < NUM_JOB_SLOTS; i++) { + struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i]; + int j; + + for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) { + struct panfrost_job *job = pfdev->jobs[i][j]; + u32 cmd; + + if (!job || job->base.entity != entity) + continue; + + if (j == 1) { + /* Try to cancel the job before it starts */ + job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP); + /* Reset the job head so it doesn't get restarted if + * the job in the first slot failed. + */ + job->jc = 0; + } + + if (panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) { + cmd = panfrost_get_job_chain_flag(job) ? + JS_COMMAND_HARD_STOP_1 : + JS_COMMAND_HARD_STOP_0; + } else { + cmd = JS_COMMAND_HARD_STOP; + } + + job_write(pfdev, JS_COMMAND(i), cmd); + } + } + spin_unlock(&pfdev->js->job_lock); } int panfrost_job_is_idle(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index bbd3ba97ff67..82306a03b57e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -19,9 +19,9 @@ struct panfrost_job { struct panfrost_device *pfdev; struct panfrost_file_priv *file_priv; - /* Optional fences userspace can pass in for the job to depend on. */ - struct dma_fence **in_fences; - u32 in_fence_count; + /* Contains both explicit and implicit fences */ + struct xarray deps; + unsigned long last_dep; /* Fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *done_fence; @@ -30,8 +30,6 @@ struct panfrost_job { __u32 requirements; __u32 flush_id; - /* Exclusive fences we have taken from the BOs to wait for */ - struct dma_fence **implicit_fences; struct panfrost_gem_mapping **mappings; struct drm_gem_object **bos; u32 bo_count; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 0581186ebfb3..0da5b3100ab1 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -1,5 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ + +#include <drm/panfrost_drm.h> + #include <linux/atomic.h> #include <linux/bitfield.h> #include <linux/delay.h> @@ -31,10 +34,13 @@ static int wait_ready(struct panfrost_device *pfdev, u32 as_nr) /* Wait for the MMU status to indicate there is no active command, in * case one is pending. */ ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr), - val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000); + val, !(val & AS_STATUS_AS_ACTIVE), 10, 100000); - if (ret) + if (ret) { + /* The GPU hung, let's trigger a reset */ + panfrost_device_schedule_reset(pfdev); dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n"); + } return ret; } @@ -151,6 +157,7 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) as = mmu->as; if (as >= 0) { int en = atomic_inc_return(&mmu->as_count); + u32 mask = BIT(as) | BIT(16 + as); /* * AS can be retained by active jobs or a perfcnt context, @@ -159,6 +166,18 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) WARN_ON(en >= (NUM_JOB_SLOTS + 1)); list_move(&mmu->list, &pfdev->as_lru_list); + + if (pfdev->as_faulty_mask & mask) { + /* Unhandled pagefault on this AS, the MMU was + * disabled. We need to re-enable the MMU after + * clearing+unmasking the AS interrupts. + */ + mmu_write(pfdev, MMU_INT_CLEAR, mask); + mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); + pfdev->as_faulty_mask &= ~mask; + panfrost_mmu_enable(pfdev, mmu); + } + goto out; } @@ -208,6 +227,7 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev) spin_lock(&pfdev->as_lock); pfdev->as_alloc_mask = 0; + pfdev->as_faulty_mask = 0; list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) { mmu->as = -1; @@ -337,7 +357,7 @@ static void mmu_tlb_inv_context_s1(void *cookie) static void mmu_tlb_sync_context(void *cookie) { - //struct panfrost_device *pfdev = cookie; + //struct panfrost_mmu *mmu = cookie; // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X } @@ -352,57 +372,10 @@ static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_walk = mmu_tlb_flush_walk, }; -int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) -{ - struct panfrost_mmu *mmu = &priv->mmu; - struct panfrost_device *pfdev = priv->pfdev; - - INIT_LIST_HEAD(&mmu->list); - mmu->as = -1; - - mmu->pgtbl_cfg = (struct io_pgtable_cfg) { - .pgsize_bitmap = SZ_4K | SZ_2M, - .ias = FIELD_GET(0xff, pfdev->features.mmu_features), - .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), - .coherent_walk = pfdev->coherent, - .tlb = &mmu_tlb_ops, - .iommu_dev = pfdev->dev, - }; - - mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg, - priv); - if (!mmu->pgtbl_ops) - return -EINVAL; - - return 0; -} - -void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) -{ - struct panfrost_device *pfdev = priv->pfdev; - struct panfrost_mmu *mmu = &priv->mmu; - - spin_lock(&pfdev->as_lock); - if (mmu->as >= 0) { - pm_runtime_get_noresume(pfdev->dev); - if (pm_runtime_active(pfdev->dev)) - panfrost_mmu_disable(pfdev, mmu->as); - pm_runtime_put_autosuspend(pfdev->dev); - - clear_bit(mmu->as, &pfdev->as_alloc_mask); - clear_bit(mmu->as, &pfdev->as_in_use_mask); - list_del(&mmu->list); - } - spin_unlock(&pfdev->as_lock); - - free_io_pgtable_ops(mmu->pgtbl_ops); -} - static struct panfrost_gem_mapping * addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr) { struct panfrost_gem_mapping *mapping = NULL; - struct panfrost_file_priv *priv; struct drm_mm_node *node; u64 offset = addr >> PAGE_SHIFT; struct panfrost_mmu *mmu; @@ -415,11 +388,10 @@ addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr) goto out; found_mmu: - priv = container_of(mmu, struct panfrost_file_priv, mmu); - spin_lock(&priv->mm_lock); + spin_lock(&mmu->mm_lock); - drm_mm_for_each_node(node, &priv->mm) { + drm_mm_for_each_node(node, &mmu->mm) { if (offset >= node->start && offset < (node->start + node->size)) { mapping = drm_mm_node_to_panfrost_mapping(node); @@ -429,7 +401,7 @@ found_mmu: } } - spin_unlock(&priv->mm_lock); + spin_unlock(&mmu->mm_lock); out: spin_unlock(&pfdev->as_lock); return mapping; @@ -542,6 +514,107 @@ err_bo: return ret; } +static void panfrost_mmu_release_ctx(struct kref *kref) +{ + struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu, + refcount); + struct panfrost_device *pfdev = mmu->pfdev; + + spin_lock(&pfdev->as_lock); + if (mmu->as >= 0) { + pm_runtime_get_noresume(pfdev->dev); + if (pm_runtime_active(pfdev->dev)) + panfrost_mmu_disable(pfdev, mmu->as); + pm_runtime_put_autosuspend(pfdev->dev); + + clear_bit(mmu->as, &pfdev->as_alloc_mask); + clear_bit(mmu->as, &pfdev->as_in_use_mask); + list_del(&mmu->list); + } + spin_unlock(&pfdev->as_lock); + + free_io_pgtable_ops(mmu->pgtbl_ops); + drm_mm_takedown(&mmu->mm); + kfree(mmu); +} + +void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu) +{ + kref_put(&mmu->refcount, panfrost_mmu_release_ctx); +} + +struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu) +{ + kref_get(&mmu->refcount); + + return mmu; +} + +#define PFN_4G (SZ_4G >> PAGE_SHIFT) +#define PFN_4G_MASK (PFN_4G - 1) +#define PFN_16M (SZ_16M >> PAGE_SHIFT) + +static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node, + unsigned long color, + u64 *start, u64 *end) +{ + /* Executable buffers can't start or end on a 4GB boundary */ + if (!(color & PANFROST_BO_NOEXEC)) { + u64 next_seg; + + if ((*start & PFN_4G_MASK) == 0) + (*start)++; + + if ((*end & PFN_4G_MASK) == 0) + (*end)--; + + next_seg = ALIGN(*start, PFN_4G); + if (next_seg - *start <= PFN_16M) + *start = next_seg + 1; + + *end = min(*end, ALIGN(*start, PFN_4G) - 1); + } +} + +struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev) +{ + struct panfrost_mmu *mmu; + + mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return ERR_PTR(-ENOMEM); + + mmu->pfdev = pfdev; + spin_lock_init(&mmu->mm_lock); + + /* 4G enough for now. can be 48-bit */ + drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT); + mmu->mm.color_adjust = panfrost_drm_mm_color_adjust; + + INIT_LIST_HEAD(&mmu->list); + mmu->as = -1; + + mmu->pgtbl_cfg = (struct io_pgtable_cfg) { + .pgsize_bitmap = SZ_4K | SZ_2M, + .ias = FIELD_GET(0xff, pfdev->features.mmu_features), + .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), + .coherent_walk = pfdev->coherent, + .tlb = &mmu_tlb_ops, + .iommu_dev = pfdev->dev, + }; + + mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg, + mmu); + if (!mmu->pgtbl_ops) { + kfree(mmu); + return ERR_PTR(-EINVAL); + } + + kref_init(&mmu->refcount); + + return mmu; +} + static const char *access_type_name(struct panfrost_device *pfdev, u32 fault_status) { @@ -605,7 +678,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0) ret = panfrost_mmu_map_fault_addr(pfdev, as, addr); - if (ret) + if (ret) { /* terminal fault, print info about the fault */ dev_err(pfdev->dev, "Unhandled Page fault in AS%d at VA 0x%016llX\n" @@ -619,18 +692,32 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) "TODO", fault_status, (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), - exception_type, panfrost_exception_name(pfdev, exception_type), + exception_type, panfrost_exception_name(exception_type), access_type, access_type_name(pfdev, fault_status), source_id); + spin_lock(&pfdev->as_lock); + /* Ignore MMU interrupts on this AS until it's been + * re-enabled. + */ + pfdev->as_faulty_mask |= mask; + + /* Disable the MMU to kill jobs on this AS. */ + panfrost_mmu_disable(pfdev, as); + spin_unlock(&pfdev->as_lock); + } + status &= ~mask; /* If we received new MMU interrupts, process them before returning. */ if (!status) - status = mmu_read(pfdev, MMU_INT_RAWSTAT); + status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask; } - mmu_write(pfdev, MMU_INT_MASK, ~0); + spin_lock(&pfdev->as_lock); + mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); + spin_unlock(&pfdev->as_lock); + return IRQ_HANDLED; }; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index 44fc2edf63ce..cc2a0d307feb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -18,7 +18,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev); u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); -int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv); -void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv); +struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu); +void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu); +struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev); #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index dc9df5457f1c..1940ff86e49a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -262,9 +262,6 @@ #define JS_COMMAND_SOFT_STOP_1 0x06 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */ #define JS_COMMAND_HARD_STOP_1 0x07 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */ -#define JS_STATUS_EVENT_ACTIVE 0x08 - - /* MMU regs */ #define MMU_INT_RAWSTAT 0x2000 #define MMU_INT_CLEAR 0x2004 diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 6fd7f13f1aca..b5a8859739a2 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -440,7 +440,6 @@ static struct drm_simple_display_pipe_funcs pl111_display_funcs = { .enable = pl111_display_enable, .disable = pl111_display_disable, .update = pl111_display_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 854e6c5a563f..fc47b0deb021 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -95,7 +95,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "qxl"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &qxl_driver); if (ret) goto disable_pci; @@ -281,10 +281,8 @@ static struct drm_driver qxl_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, - .gem_prime_mmap = qxl_gem_prime_mmap, .fops = &qxl_fops, .ioctls = qxl_ioctls, - .irq_handler = qxl_irq_handler, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index dd6abee55f56..359266d9e860 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -434,12 +434,9 @@ struct drm_gem_object *qxl_gem_prime_import_sg_table( int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); void qxl_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); -int qxl_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma); /* qxl_irq.c */ int qxl_irq_init(struct qxl_device *qdev); -irqreturn_t qxl_irq_handler(int irq, void *arg); void qxl_debugfs_add_files(struct qxl_device *qdev, struct drm_info_list *files, diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c index d312322cacd1..665278ee3b6d 100644 --- a/drivers/gpu/drm/qxl/qxl_irq.c +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -25,11 +25,11 @@ #include <linux/pci.h> -#include <drm/drm_irq.h> +#include <drm/drm_drv.h> #include "qxl_drv.h" -irqreturn_t qxl_irq_handler(int irq, void *arg) +static irqreturn_t qxl_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; struct qxl_device *qdev = to_qxl(dev); @@ -81,7 +81,8 @@ static void qxl_client_monitors_config_work_func(struct work_struct *work) int qxl_irq_init(struct qxl_device *qdev) { - struct pci_dev *pdev = to_pci_dev(qdev->ddev.dev); + struct drm_device *ddev = &qdev->ddev; + struct pci_dev *pdev = to_pci_dev(ddev->dev); int ret; init_waitqueue_head(&qdev->display_event); @@ -95,7 +96,7 @@ int qxl_irq_init(struct qxl_device *qdev) atomic_set(&qdev->irq_received_cursor, 0); atomic_set(&qdev->irq_received_io_cmd, 0); qdev->irq_received_error = 0; - ret = drm_irq_install(&qdev->ddev, pdev->irq); + ret = request_irq(pdev->irq, qxl_irq_handler, IRQF_SHARED, ddev->driver->name, ddev); qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; if (unlikely(ret != 0)) { DRM_ERROR("Failed installing irq: %d\n", ret); diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index 0628d1cc91fe..4a10cb0a413b 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -73,9 +73,3 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj, qxl_bo_vunmap(bo); } - -int qxl_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *area) -{ - return -ENOSYS; -} diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 46eea01950cb..cec03238e14d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1067,15 +1067,16 @@ void radeon_combios_fini(struct radeon_device *rdev) /** * radeon_vga_set_decode - enable/disable vga decode * - * @cookie: radeon_device pointer + * @pdev: PCI device * @state: enable/disable vga decode * * Enable/disable vga decode (all asics). * Returns VGA resource flags. */ -static unsigned int radeon_vga_set_decode(void *cookie, bool state) +static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state) { - struct radeon_device *rdev = cookie; + struct drm_device *dev = pci_get_drvdata(pdev); + struct radeon_device *rdev = dev->dev_private; radeon_vga_set_state(rdev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | @@ -1434,7 +1435,7 @@ int radeon_device_init(struct radeon_device *rdev, /* if we have > 1 VGA cards, then disable the radeon VGA resources */ /* this will fail for cards that aren't VGA class devices, just * ignore it */ - vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); + vga_client_register(rdev->pdev, radeon_vga_set_decode); if (rdev->flags & RADEON_IS_PX) runtime = true; @@ -1530,7 +1531,7 @@ void radeon_device_fini(struct radeon_device *rdev) vga_switcheroo_unregister_client(rdev->pdev); if (rdev->flags & RADEON_IS_PX) vga_switcheroo_fini_domain_pm_ops(rdev->dev); - vga_client_register(rdev->pdev, NULL, NULL, NULL); + vga_client_unregister(rdev->pdev); if (rdev->rio_mem) pci_iounmap(rdev->pdev, rdev->rio_mem); rdev->rio_mem = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 5c23b77cb81a..c8dd68152d65 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -330,7 +330,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, return -EPROBE_DEFER; /* Get rid of things like offb */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "radeondrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &kms_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index b2ce642ca4fa..18f2c2e0dfb3 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -288,7 +288,7 @@ static void radeon_fence_check_lockup(struct work_struct *work) return; } - if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) { + if (fence_drv->delayed_irq && rdev->irq.installed) { unsigned long irqflags; fence_drv->delayed_irq = false; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 84d0b1a3355f..a36ce826d0c0 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -357,7 +357,7 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) { unsigned long irqflags; - if (!rdev->ddev->irq_enabled) + if (!rdev->irq.installed) return; if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) { @@ -396,7 +396,7 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) { unsigned long irqflags; - if (!rdev->ddev->irq_enabled) + if (!rdev->irq.installed) return; if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) { @@ -422,7 +422,7 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) if (crtc < 0 || crtc >= rdev->num_crtc) return; - if (!rdev->ddev->irq_enabled) + if (!rdev->irq.installed) return; if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) { @@ -448,7 +448,7 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) if (crtc < 0 || crtc >= rdev->num_crtc) return; - if (!rdev->ddev->irq_enabled) + if (!rdev->irq.installed) return; if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) { @@ -470,7 +470,7 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block) { unsigned long irqflags; - if (!rdev->ddev->irq_enabled) + if (!rdev->irq.installed) return; spin_lock_irqsave(&rdev->irq.lock, irqflags); @@ -492,7 +492,7 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block) { unsigned long irqflags; - if (!rdev->ddev->irq_enabled) + if (!rdev->irq.installed) return; spin_lock_irqsave(&rdev->irq.lock, irqflags); @@ -514,7 +514,7 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask) unsigned long irqflags; int i; - if (!rdev->ddev->irq_enabled) + if (!rdev->irq.installed) return; spin_lock_irqsave(&rdev->irq.lock, irqflags); @@ -537,7 +537,7 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask) unsigned long irqflags; int i; - if (!rdev->ddev->irq_enabled) + if (!rdev->irq.installed) return; spin_lock_irqsave(&rdev->irq.lock, irqflags); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index bfbff90588cb..4ac26d08ebb4 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -553,14 +553,20 @@ static int rcar_du_remove(struct platform_device *pdev) struct drm_device *ddev = &rcdu->ddev; drm_dev_unregister(ddev); + drm_atomic_helper_shutdown(ddev); drm_kms_helper_poll_fini(ddev); - drm_dev_put(ddev); - return 0; } +static void rcar_du_shutdown(struct platform_device *pdev) +{ + struct rcar_du_device *rcdu = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(&rcdu->ddev); +} + static int rcar_du_probe(struct platform_device *pdev) { struct rcar_du_device *rcdu; @@ -593,8 +599,6 @@ static int rcar_du_probe(struct platform_device *pdev) goto error; } - rcdu->ddev.irq_enabled = 1; - /* * Register the DRM device with the core and the connectors with * sysfs. @@ -617,6 +621,7 @@ error: static struct platform_driver rcar_du_platform_driver = { .probe = rcar_du_probe, .remove = rcar_du_remove, + .shutdown = rcar_du_shutdown, .driver = { .name = "rcar-du", .pm = &rcar_du_pm_ops, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index ca3761772211..0daa8bba50f5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -11,6 +11,7 @@ #include <linux/slab.h> #include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> @@ -53,7 +54,9 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, struct device_node *enc_node) { struct rcar_du_encoder *renc; + struct drm_connector *connector; struct drm_bridge *bridge; + int ret; /* * Locate the DRM bridge from the DT node. For the DPAD outputs, if the @@ -103,9 +106,22 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, renc->output = output; - /* - * Attach the bridge to the encoder. The bridge will create the - * connector. - */ - return drm_bridge_attach(&renc->base, bridge, NULL, 0); + /* Attach the bridge to the encoder. */ + ret = drm_bridge_attach(&renc->base, bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + dev_err(rcdu->dev, "failed to attach bridge for output %u\n", + output); + return ret; + } + + /* Create the connector for the chain of bridges. */ + connector = drm_bridge_connector_init(&rcdu->ddev, &renc->base); + if (IS_ERR(connector)) { + dev_err(rcdu->dev, + "failed to created connector for output %u\n", output); + return PTR_ERR(connector); + } + + return drm_connector_attach_encoder(connector, &renc->base); } diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c index 7b8ec8310699..18ed14911b98 100644 --- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c +++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c @@ -75,6 +75,7 @@ static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi, void *data, } static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = { + .output_port = 1, .mode_valid = rcar_hdmi_mode_valid, .configure_phy = rcar_hdmi_phy_configure, }; diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 70dbbe44bb23..d061b8de748f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -63,7 +63,6 @@ struct rcar_lvds { struct drm_bridge bridge; struct drm_bridge *next_bridge; - struct drm_connector connector; struct drm_panel *panel; void __iomem *mmio; @@ -80,74 +79,12 @@ struct rcar_lvds { #define bridge_to_rcar_lvds(b) \ container_of(b, struct rcar_lvds, bridge) -#define connector_to_rcar_lvds(c) \ - container_of(c, struct rcar_lvds, connector) - static void rcar_lvds_write(struct rcar_lvds *lvds, u32 reg, u32 data) { iowrite32(data, lvds->mmio + reg); } /* ----------------------------------------------------------------------------- - * Connector & Panel - */ - -static int rcar_lvds_connector_get_modes(struct drm_connector *connector) -{ - struct rcar_lvds *lvds = connector_to_rcar_lvds(connector); - - return drm_panel_get_modes(lvds->panel, connector); -} - -static int rcar_lvds_connector_atomic_check(struct drm_connector *connector, - struct drm_atomic_state *state) -{ - struct rcar_lvds *lvds = connector_to_rcar_lvds(connector); - const struct drm_display_mode *panel_mode; - struct drm_connector_state *conn_state; - struct drm_crtc_state *crtc_state; - - conn_state = drm_atomic_get_new_connector_state(state, connector); - if (!conn_state->crtc) - return 0; - - if (list_empty(&connector->modes)) { - dev_dbg(lvds->dev, "connector: empty modes list\n"); - return -EINVAL; - } - - panel_mode = list_first_entry(&connector->modes, - struct drm_display_mode, head); - - /* We're not allowed to modify the resolution. */ - crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - - if (crtc_state->mode.hdisplay != panel_mode->hdisplay || - crtc_state->mode.vdisplay != panel_mode->vdisplay) - return -EINVAL; - - /* The flat panel mode is fixed, just copy it to the adjusted mode. */ - drm_mode_copy(&crtc_state->adjusted_mode, panel_mode); - - return 0; -} - -static const struct drm_connector_helper_funcs rcar_lvds_conn_helper_funcs = { - .get_modes = rcar_lvds_connector_get_modes, - .atomic_check = rcar_lvds_connector_atomic_check, -}; - -static const struct drm_connector_funcs rcar_lvds_conn_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -/* ----------------------------------------------------------------------------- * PLL Setup */ @@ -583,11 +520,6 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge, /* Turn the output on. */ lvdcr0 |= LVDCR0_LVRES; rcar_lvds_write(lvds, LVDCR0, lvdcr0); - - if (lvds->panel) { - drm_panel_prepare(lvds->panel); - drm_panel_enable(lvds->panel); - } } static void rcar_lvds_atomic_enable(struct drm_bridge *bridge, @@ -609,11 +541,6 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge, { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); - if (lvds->panel) { - drm_panel_disable(lvds->panel); - drm_panel_unprepare(lvds->panel); - } - rcar_lvds_write(lvds, LVDCR0, 0); rcar_lvds_write(lvds, LVDCR1, 0); rcar_lvds_write(lvds, LVDPLLCR, 0); @@ -648,45 +575,13 @@ static int rcar_lvds_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); - struct drm_connector *connector = &lvds->connector; - struct drm_encoder *encoder = bridge->encoder; - int ret; - - /* If we have a next bridge just attach it. */ - if (lvds->next_bridge) - return drm_bridge_attach(bridge->encoder, lvds->next_bridge, - bridge, flags); - - if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { - DRM_ERROR("Fix bridge driver to make connector optional!"); - return -EINVAL; - } - - /* Otherwise if we have a panel, create a connector. */ - if (!lvds->panel) - return 0; - - ret = drm_connector_init(bridge->dev, connector, &rcar_lvds_conn_funcs, - DRM_MODE_CONNECTOR_LVDS); - if (ret < 0) - return ret; - - drm_connector_helper_add(connector, &rcar_lvds_conn_helper_funcs); - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret < 0) - return ret; - - return 0; -} -static void rcar_lvds_detach(struct drm_bridge *bridge) -{ + return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge, + flags); } static const struct drm_bridge_funcs rcar_lvds_bridge_ops = { .attach = rcar_lvds_attach, - .detach = rcar_lvds_detach, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, @@ -759,7 +654,7 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds) * that we are expected to generate even pixels from the primary * encoder, and odd pixels from the companion encoder. */ - if (lvds->next_bridge && lvds->next_bridge->timings && + if (lvds->next_bridge->timings && lvds->next_bridge->timings->dual_link) lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS; else @@ -811,6 +706,15 @@ static int rcar_lvds_parse_dt(struct rcar_lvds *lvds) if (ret) goto done; + if (lvds->panel) { + lvds->next_bridge = devm_drm_panel_bridge_add(lvds->dev, + lvds->panel); + if (IS_ERR_OR_NULL(lvds->next_bridge)) { + ret = -EINVAL; + goto done; + } + } + if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) ret = rcar_lvds_parse_dt_companion(lvds); @@ -839,9 +743,8 @@ static struct clk *rcar_lvds_get_clock(struct rcar_lvds *lvds, const char *name, if (PTR_ERR(clk) == -ENOENT && optional) return NULL; - if (PTR_ERR(clk) != -EPROBE_DEFER) - dev_err(lvds->dev, "failed to get %s clock\n", - name ? name : "module"); + dev_err_probe(lvds->dev, PTR_ERR(clk), "failed to get %s clock\n", + name ? name : "module"); return clk; } @@ -919,7 +822,6 @@ static int rcar_lvds_probe(struct platform_device *pdev) if (ret < 0) return ret; - lvds->bridge.driver_private = lvds; lvds->bridge.funcs = &rcar_lvds_bridge_ops; lvds->bridge.of_node = pdev->dev.of_node; diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index cb25c0e8fc9b..558f1b58bd69 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -10,6 +10,8 @@ config DRM_ROCKCHIP select DRM_DW_HDMI if ROCKCHIP_DW_HDMI select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI select DRM_RGB if ROCKCHIP_RGB + select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI + select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC help Choose this option if you have a Rockchip soc chipset. diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index ec7729d18cb8..a2262bee5aa4 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -14,6 +14,7 @@ #include <linux/of_device.h> #include <linux/phy/phy.h> #include <linux/pm_runtime.h> +#include <linux/phy/phy.h> #include <linux/regmap.h> #include <video/mipi_display.h> @@ -125,7 +126,9 @@ #define BANDGAP_AND_BIAS_CONTROL 0x20 #define TERMINATION_RESISTER_CONTROL 0x21 #define AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY 0x22 +#define HS_RX_CONTROL_OF_LANE_CLK 0x34 #define HS_RX_CONTROL_OF_LANE_0 0x44 +#define HS_RX_CONTROL_OF_LANE_1 0x54 #define HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL 0x60 #define HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL 0x61 #define HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL 0x62 @@ -137,6 +140,9 @@ #define HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL 0x72 #define HS_TX_DATA_LANE_TRAIL_STATE_TIME_CONTROL 0x73 #define HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL 0x74 +#define HS_RX_DATA_LANE_THS_SETTLE_CONTROL 0x75 +#define HS_RX_CONTROL_OF_LANE_2 0x84 +#define HS_RX_CONTROL_OF_LANE_3 0x94 #define DW_MIPI_NEEDS_PHY_CFG_CLK BIT(0) #define DW_MIPI_NEEDS_GRF_CLK BIT(1) @@ -171,12 +177,20 @@ #define RK3399_TXRX_MASTERSLAVEZ BIT(7) #define RK3399_TXRX_ENABLECLK BIT(6) #define RK3399_TXRX_BASEDIR BIT(5) +#define RK3399_TXRX_SRC_SEL_ISP0 BIT(4) +#define RK3399_TXRX_TURNREQUEST GENMASK(3, 0) #define HIWORD_UPDATE(val, mask) (val | (mask) << 16) #define to_dsi(nm) container_of(nm, struct dw_mipi_dsi_rockchip, nm) enum { + DW_DSI_USAGE_IDLE, + DW_DSI_USAGE_DSI, + DW_DSI_USAGE_PHY, +}; + +enum { BANDGAP_97_07, BANDGAP_98_05, BANDGAP_99_02, @@ -213,6 +227,10 @@ struct rockchip_dw_dsi_chip_data { u32 lanecfg2_grf_reg; u32 lanecfg2; + int (*dphy_rx_init)(struct phy *phy); + int (*dphy_rx_power_on)(struct phy *phy); + int (*dphy_rx_power_off)(struct phy *phy); + unsigned int flags; unsigned int max_data_lanes; }; @@ -223,6 +241,7 @@ struct dw_mipi_dsi_rockchip { void __iomem *base; struct regmap *grf_regmap; + struct clk *pclk; struct clk *pllref_clk; struct clk *grf_clk; struct clk *phy_cfg_clk; @@ -235,6 +254,12 @@ struct dw_mipi_dsi_rockchip { struct phy *phy; union phy_configure_opts phy_opts; + /* being a phy for other mipi hosts */ + unsigned int usage_mode; + struct mutex usage_mutex; + struct phy *dphy; + struct phy_configure_opts_mipi_dphy dphy_config; + unsigned int lane_mbps; /* per lane */ u16 input_div; u16 feedback_div; @@ -978,6 +1003,17 @@ static int dw_mipi_dsi_rockchip_host_attach(void *priv_data, struct device *second; int ret; + mutex_lock(&dsi->usage_mutex); + + if (dsi->usage_mode != DW_DSI_USAGE_IDLE) { + DRM_DEV_ERROR(dsi->dev, "dsi controller already in use\n"); + mutex_unlock(&dsi->usage_mutex); + return -EBUSY; + } + + dsi->usage_mode = DW_DSI_USAGE_DSI; + mutex_unlock(&dsi->usage_mutex); + ret = component_add(dsi->dev, &dw_mipi_dsi_rockchip_ops); if (ret) { DRM_DEV_ERROR(dsi->dev, "Failed to register component: %d\n", @@ -1013,6 +1049,10 @@ static int dw_mipi_dsi_rockchip_host_detach(void *priv_data, component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops); + mutex_lock(&dsi->usage_mutex); + dsi->usage_mode = DW_DSI_USAGE_IDLE; + mutex_unlock(&dsi->usage_mutex); + return 0; } @@ -1021,11 +1061,227 @@ static const struct dw_mipi_dsi_host_ops dw_mipi_dsi_rockchip_host_ops = { .detach = dw_mipi_dsi_rockchip_host_detach, }; +static int dw_mipi_dsi_rockchip_dphy_bind(struct device *dev, + struct device *master, + void *data) +{ + /* + * Nothing to do when used as a dphy. + * Just make the rest of Rockchip-DRM happy + * by being here. + */ + + return 0; +} + +static void dw_mipi_dsi_rockchip_dphy_unbind(struct device *dev, + struct device *master, + void *data) +{ + /* Nothing to do when used as a dphy. */ +} + +static const struct component_ops dw_mipi_dsi_rockchip_dphy_ops = { + .bind = dw_mipi_dsi_rockchip_dphy_bind, + .unbind = dw_mipi_dsi_rockchip_dphy_unbind, +}; + +static int dw_mipi_dsi_dphy_init(struct phy *phy) +{ + struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); + int ret; + + mutex_lock(&dsi->usage_mutex); + + if (dsi->usage_mode != DW_DSI_USAGE_IDLE) { + DRM_DEV_ERROR(dsi->dev, "dsi controller already in use\n"); + mutex_unlock(&dsi->usage_mutex); + return -EBUSY; + } + + dsi->usage_mode = DW_DSI_USAGE_PHY; + mutex_unlock(&dsi->usage_mutex); + + ret = component_add(dsi->dev, &dw_mipi_dsi_rockchip_dphy_ops); + if (ret < 0) + goto err_graph; + + if (dsi->cdata->dphy_rx_init) { + ret = clk_prepare_enable(dsi->pclk); + if (ret < 0) + goto err_init; + + ret = clk_prepare_enable(dsi->grf_clk); + if (ret) { + clk_disable_unprepare(dsi->pclk); + goto err_init; + } + + ret = dsi->cdata->dphy_rx_init(phy); + clk_disable_unprepare(dsi->grf_clk); + clk_disable_unprepare(dsi->pclk); + if (ret < 0) + goto err_init; + } + + return 0; + +err_init: + component_del(dsi->dev, &dw_mipi_dsi_rockchip_dphy_ops); +err_graph: + mutex_lock(&dsi->usage_mutex); + dsi->usage_mode = DW_DSI_USAGE_IDLE; + mutex_unlock(&dsi->usage_mutex); + + return ret; +} + +static int dw_mipi_dsi_dphy_exit(struct phy *phy) +{ + struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); + + component_del(dsi->dev, &dw_mipi_dsi_rockchip_dphy_ops); + + mutex_lock(&dsi->usage_mutex); + dsi->usage_mode = DW_DSI_USAGE_IDLE; + mutex_unlock(&dsi->usage_mutex); + + return 0; +} + +static int dw_mipi_dsi_dphy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + struct phy_configure_opts_mipi_dphy *config = &opts->mipi_dphy; + struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); + int ret; + + ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy); + if (ret) + return ret; + + dsi->dphy_config = *config; + dsi->lane_mbps = div_u64(config->hs_clk_rate, 1000 * 1000 * 1); + + return 0; +} + +static int dw_mipi_dsi_dphy_power_on(struct phy *phy) +{ + struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); + int i, ret; + + DRM_DEV_DEBUG(dsi->dev, "lanes %d - data_rate_mbps %u\n", + dsi->dphy_config.lanes, dsi->lane_mbps); + + i = max_mbps_to_parameter(dsi->lane_mbps); + if (i < 0) { + DRM_DEV_ERROR(dsi->dev, "failed to get parameter for %dmbps clock\n", + dsi->lane_mbps); + return i; + } + + ret = pm_runtime_get_sync(dsi->dev); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "failed to enable device: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(dsi->pclk); + if (ret) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk: %d\n", ret); + goto err_pclk; + } + + ret = clk_prepare_enable(dsi->grf_clk); + if (ret) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); + goto err_grf_clk; + } + + ret = clk_prepare_enable(dsi->phy_cfg_clk); + if (ret) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk: %d\n", ret); + goto err_phy_cfg_clk; + } + + /* do soc-variant specific init */ + if (dsi->cdata->dphy_rx_power_on) { + ret = dsi->cdata->dphy_rx_power_on(phy); + if (ret < 0) { + DRM_DEV_ERROR(dsi->dev, "hardware-specific phy bringup failed: %d\n", ret); + goto err_pwr_on; + } + } + + /* + * Configure hsfreqrange according to frequency values + * Set clock lane and hsfreqrange by lane0(test code 0x44) + */ + dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_CLK, 0); + dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_0, + HSFREQRANGE_SEL(dppa_map[i].hsfreqrange)); + dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_1, 0); + dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_2, 0); + dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_3, 0); + + /* Normal operation */ + dw_mipi_dsi_phy_write(dsi, 0x0, 0); + + clk_disable_unprepare(dsi->phy_cfg_clk); + clk_disable_unprepare(dsi->grf_clk); + + return ret; + +err_pwr_on: + clk_disable_unprepare(dsi->phy_cfg_clk); +err_phy_cfg_clk: + clk_disable_unprepare(dsi->grf_clk); +err_grf_clk: + clk_disable_unprepare(dsi->pclk); +err_pclk: + pm_runtime_put(dsi->dev); + return ret; +} + +static int dw_mipi_dsi_dphy_power_off(struct phy *phy) +{ + struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); + int ret; + + ret = clk_prepare_enable(dsi->grf_clk); + if (ret) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); + return ret; + } + + if (dsi->cdata->dphy_rx_power_off) { + ret = dsi->cdata->dphy_rx_power_off(phy); + if (ret < 0) + DRM_DEV_ERROR(dsi->dev, "hardware-specific phy shutdown failed: %d\n", ret); + } + + clk_disable_unprepare(dsi->grf_clk); + clk_disable_unprepare(dsi->pclk); + + pm_runtime_put(dsi->dev); + + return ret; +} + +static const struct phy_ops dw_mipi_dsi_dphy_ops = { + .configure = dw_mipi_dsi_dphy_configure, + .power_on = dw_mipi_dsi_dphy_power_on, + .power_off = dw_mipi_dsi_dphy_power_off, + .init = dw_mipi_dsi_dphy_init, + .exit = dw_mipi_dsi_dphy_exit, +}; + static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct dw_mipi_dsi_rockchip *dsi; + struct phy_provider *phy_provider; struct resource *res; const struct rockchip_dw_dsi_chip_data *cdata = of_device_get_match_data(dev); @@ -1065,6 +1321,13 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) return ret; } + dsi->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(dsi->pclk)) { + ret = PTR_ERR(dsi->pclk); + DRM_DEV_ERROR(dev, "Unable to get pclk: %d\n", ret); + return ret; + } + dsi->pllref_clk = devm_clk_get(dev, "ref"); if (IS_ERR(dsi->pllref_clk)) { if (dsi->phy) { @@ -1115,6 +1378,19 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) dsi->pdata.priv_data = dsi; platform_set_drvdata(pdev, dsi); + mutex_init(&dsi->usage_mutex); + + dsi->dphy = devm_phy_create(dev, NULL, &dw_mipi_dsi_dphy_ops); + if (IS_ERR(dsi->dphy)) { + DRM_DEV_ERROR(&pdev->dev, "failed to create PHY\n"); + return PTR_ERR(dsi->dphy); + } + + phy_set_drvdata(dsi->dphy, dsi); + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); + dsi->dmd = dw_mipi_dsi_probe(pdev, &dsi->pdata); if (IS_ERR(dsi->dmd)) { ret = PTR_ERR(dsi->dmd); @@ -1178,6 +1454,75 @@ static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = { { /* sentinel */ } }; +static int rk3399_dphy_tx1rx1_init(struct phy *phy) +{ + struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); + + /* + * Set TX1RX1 source to isp1. + * Assume ISP0 is supplied by the RX0 dphy. + */ + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24, + HIWORD_UPDATE(0, RK3399_TXRX_SRC_SEL_ISP0)); + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24, + HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ)); + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24, + HIWORD_UPDATE(0, RK3399_TXRX_BASEDIR)); + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23, + HIWORD_UPDATE(0, RK3399_DSI1_ENABLE)); + + return 0; +} + +static int rk3399_dphy_tx1rx1_power_on(struct phy *phy) +{ + struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); + + /* tester reset pulse */ + dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_TESTCLR); + usleep_range(100, 150); + + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24, + HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ)); + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24, + HIWORD_UPDATE(RK3399_TXRX_BASEDIR, RK3399_TXRX_BASEDIR)); + + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23, + HIWORD_UPDATE(0, RK3399_DSI1_FORCERXMODE)); + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23, + HIWORD_UPDATE(0, RK3399_DSI1_FORCETXSTOPMODE)); + + /* Disable lane turn around, which is ignored in receive mode */ + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24, + HIWORD_UPDATE(0, RK3399_TXRX_TURNREQUEST)); + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23, + HIWORD_UPDATE(RK3399_DSI1_TURNDISABLE, + RK3399_DSI1_TURNDISABLE)); + usleep_range(100, 150); + + dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR); + usleep_range(100, 150); + + /* Enable dphy lanes */ + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23, + HIWORD_UPDATE(GENMASK(dsi->dphy_config.lanes - 1, 0), + RK3399_DSI1_ENABLE)); + + usleep_range(100, 150); + + return 0; +} + +static int rk3399_dphy_tx1rx1_power_off(struct phy *phy) +{ + struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); + + regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23, + HIWORD_UPDATE(0, RK3399_DSI1_ENABLE)); + + return 0; +} + static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = { { .reg = 0xff960000, @@ -1220,6 +1565,10 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = { .flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK, .max_data_lanes = 4, + + .dphy_rx_init = rk3399_dphy_tx1rx1_init, + .dphy_rx_power_on = rk3399_dphy_tx1rx1_power_on, + .dphy_rx_power_off = rk3399_dphy_tx1rx1_power_off, }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index b730b8d5d949..bfba9793d238 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -116,7 +116,7 @@ static int rockchip_drm_bind(struct device *dev) int ret; /* Remove existing drivers that may own the framebuffer memory. */ - ret = drm_aperture_remove_framebuffers(false, "rockchip-drm-fb"); + ret = drm_aperture_remove_framebuffers(false, &rockchip_drm_driver); if (ret) { DRM_DEV_ERROR(dev, "Failed to remove existing framebuffers - %d.\n", @@ -162,12 +162,6 @@ static int rockchip_drm_bind(struct device *dev) drm_mode_config_reset(drm_dev); - /* - * enable drm irq mode. - * - with irq_enabled = true, we can use the vblank feature. - */ - drm_dev->irq_enabled = true; - ret = rockchip_drm_fbdev_init(drm_dev); if (ret) goto err_unbind_all; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index f5b9028a16a3..ba9e14da41b4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1110,7 +1110,6 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = { .atomic_disable = vop_plane_atomic_disable, .atomic_async_check = vop_plane_atomic_async_check, .atomic_async_update = vop_plane_atomic_async_update, - .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static const struct drm_plane_funcs vop_plane_funcs = { diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 489d63c05c0d..551653940e39 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -636,11 +636,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, } } else { ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0); - if (ret) { - DRM_DEV_ERROR(drm_dev->dev, - "failed to attach bridge: %d\n", ret); + if (ret) goto err_free_encoder; - } } pm_runtime_enable(dev); diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index c079714477d8..d691d9bef8e7 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -143,11 +143,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, rgb->bridge = bridge; ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0); - if (ret) { - DRM_DEV_ERROR(drm_dev->dev, - "failed to attach bridge: %d\n", ret); + if (ret) goto err_free_encoder; - } return rgb; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index a2a953693b45..67382621b429 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -232,7 +232,7 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) { if (sched->timeout != MAX_SCHEDULE_TIMEOUT && !list_empty(&sched->pending_list)) - schedule_delayed_work(&sched->work_tdr, sched->timeout); + queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); } /** @@ -244,7 +244,7 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) */ void drm_sched_fault(struct drm_gpu_scheduler *sched) { - mod_delayed_work(system_wq, &sched->work_tdr, 0); + mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); } EXPORT_SYMBOL(drm_sched_fault); @@ -270,7 +270,7 @@ unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) * Modify the timeout to an arbitrarily large value. This also prevents * the timeout to be restarted when new submissions arrive */ - if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) + if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) && time_after(sched_timeout, now)) return sched_timeout - now; else @@ -294,7 +294,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, if (list_empty(&sched->pending_list)) cancel_delayed_work(&sched->work_tdr); else - mod_delayed_work(system_wq, &sched->work_tdr, remaining); + mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); spin_unlock(&sched->job_list_lock); } @@ -802,10 +802,10 @@ static int drm_sched_main(void *param) sched_job = drm_sched_entity_pop_job(entity); - complete(&entity->entity_idle); - - if (!sched_job) + if (!sched_job) { + complete(&entity->entity_idle); continue; + } s_fence = sched_job->s_fence; @@ -814,6 +814,7 @@ static int drm_sched_main(void *param) trace_drm_run_job(sched_job, entity); fence = sched->ops->run_job(sched_job); + complete(&entity->entity_idle); drm_sched_fence_scheduled(s_fence); if (!IS_ERR_OR_NULL(fence)) { @@ -846,6 +847,8 @@ static int drm_sched_main(void *param) * @hw_submission: number of hw submissions that can be in flight * @hang_limit: number of times to allow a job to hang before dropping it * @timeout: timeout value in jiffies for the scheduler + * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is + * used * @score: optional score atomic shared with other schedulers * @name: name used for debugging * @@ -853,7 +856,8 @@ static int drm_sched_main(void *param) */ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, - unsigned hw_submission, unsigned hang_limit, long timeout, + unsigned hw_submission, unsigned hang_limit, + long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name) { int i, ret; @@ -861,6 +865,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->hw_submission_limit = hw_submission; sched->name = name; sched->timeout = timeout; + sched->timeout_wq = timeout_wq ? : system_wq; sched->hang_limit = hang_limit; sched->score = score ? score : &sched->_score; for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 0a02b7092c04..7db01904d18d 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -18,7 +18,6 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_irq.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -130,7 +129,6 @@ DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops); static const struct drm_driver shmob_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET, - .irq_handler = shmob_drm_irq, DRM_GEM_CMA_DRIVER_OPS, .fops = &shmob_drm_fops, .name = "shmob-drm", @@ -183,7 +181,7 @@ static int shmob_drm_remove(struct platform_device *pdev) drm_dev_unregister(ddev); drm_kms_helper_poll_fini(ddev); - drm_irq_uninstall(ddev); + free_irq(sdev->irq, ddev); drm_dev_put(ddev); return 0; @@ -258,7 +256,13 @@ static int shmob_drm_probe(struct platform_device *pdev) goto err_modeset_cleanup; } - ret = drm_irq_install(ddev, platform_get_irq(pdev, 0)); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_modeset_cleanup; + sdev->irq = ret; + + ret = request_irq(sdev->irq, shmob_drm_irq, 0, ddev->driver->name, + ddev); if (ret < 0) { dev_err(&pdev->dev, "failed to install IRQ handler\n"); goto err_modeset_cleanup; @@ -275,7 +279,7 @@ static int shmob_drm_probe(struct platform_device *pdev) return 0; err_irq_uninstall: - drm_irq_uninstall(ddev); + free_irq(sdev->irq, ddev); err_modeset_cleanup: drm_kms_helper_poll_fini(ddev); err_free_drm_dev: diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h index 80dc4b1020aa..4964ddd5ab74 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h @@ -29,6 +29,7 @@ struct shmob_drm_device { u32 lddckr; u32 ldmt1r; + unsigned int irq; spinlock_t irq_lock; /* Protects hardware LDINTR register */ struct drm_device *ddev; diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index 319962a2c17b..9caaf3ccfabe 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -145,8 +145,6 @@ static int sti_compositor_bind(struct device *dev, } drm_vblank_init(drm_dev, crtc_id); - /* Allow usage of vblank without having to call drm_irq_install */ - drm_dev->irq_enabled = 1; return 0; } diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index ddb4184f0726..b6ee8a82e656 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -463,10 +463,8 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data) drm_bridge_add(bridge); err = drm_bridge_attach(encoder, bridge, NULL, 0); - if (err) { - DRM_ERROR("Failed to attach bridge\n"); + if (err) return err; - } dvo->bridge = bridge; connector->encoder = encoder; diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c index 8399d337589d..32cb41b2202f 100644 --- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c +++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c @@ -309,14 +309,23 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode, return 0; } +#define DSI_PHY_DELAY(fp, vp, mbps) DIV_ROUND_UP((fp) * (mbps) + 1000 * (vp), 8000) + static int dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps, struct dw_mipi_dsi_dphy_timing *timing) { - timing->clk_hs2lp = 0x40; - timing->clk_lp2hs = 0x40; - timing->data_hs2lp = 0x40; - timing->data_lp2hs = 0x40; + /* + * From STM32MP157 datasheet, valid for STM32F469, STM32F7x9, STM32H747 + * phy_clkhs2lp_time = (272+136*UI)/(8*UI) + * phy_clklp2hs_time = (512+40*UI)/(8*UI) + * phy_hs2lp_time = (192+64*UI)/(8*UI) + * phy_lp2hs_time = (256+32*UI)/(8*UI) + */ + timing->clk_hs2lp = DSI_PHY_DELAY(272, 136, lane_mbps); + timing->clk_lp2hs = DSI_PHY_DELAY(512, 40, lane_mbps); + timing->data_hs2lp = DSI_PHY_DELAY(192, 64, lane_mbps); + timing->data_lp2hs = DSI_PHY_DELAY(256, 32, lane_mbps); return 0; } diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 08b71248044d..195de30eb90c 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -947,7 +947,6 @@ static const struct drm_plane_funcs ltdc_plane_funcs = { }; static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = ltdc_plane_atomic_check, .atomic_update = ltdc_plane_atomic_update, .atomic_disable = ltdc_plane_atomic_disable, @@ -1122,8 +1121,9 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge) ret = drm_bridge_attach(encoder, bridge, NULL, 0); if (ret) { - drm_encoder_cleanup(encoder); - return -EINVAL; + if (ret != -EPROBE_DEFER) + drm_encoder_cleanup(encoder); + return ret; } DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id); @@ -1266,7 +1266,8 @@ int ltdc_load(struct drm_device *ddev) if (bridge) { ret = ltdc_encoder_init(ddev, bridge); if (ret) { - DRM_ERROR("init encoder endpoint %d\n", i); + if (ret != -EPROBE_DEFER) + DRM_ERROR("init encoder endpoint %d\n", i); goto err; } } @@ -1339,9 +1340,6 @@ int ltdc_load(struct drm_device *ddev) goto err; } - /* Allow usage of vblank without having to call drm_irq_install */ - ddev->irq_enabled = 1; - clk_disable_unprepare(ldev->pixel_clk); pinctrl_pm_select_sleep_state(ddev->dev); diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index af335f58bdfc..54dd562e294c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -97,10 +97,8 @@ static int sun4i_drv_bind(struct device *dev) if (ret) goto cleanup_mode_config; - drm->irq_enabled = true; - /* Remove early framebuffers (ie. simplefb) */ - ret = drm_aperture_remove_framebuffers(false, "sun4i-drm-fb"); + ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver); if (ret) goto cleanup_mode_config; diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c index 11771bdd6e7c..929e95f86b5b 100644 --- a/drivers/gpu/drm/sun4i/sun4i_layer.c +++ b/drivers/gpu/drm/sun4i/sun4i_layer.c @@ -127,7 +127,6 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane, } static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_disable = sun4i_backend_layer_atomic_disable, .atomic_update = sun4i_backend_layer_atomic_update, }; diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index ac570437172e..6716e895ae8a 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -142,10 +142,8 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) if (bridge) { ret = drm_bridge_attach(encoder, bridge, NULL, 0); - if (ret) { - dev_err(drm->dev, "Couldn't attach our bridge\n"); + if (ret) goto err_cleanup_connector; - } } return 0; diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index e172426eb7e9..dfb6acc42f02 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -234,10 +234,8 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) if (rgb->bridge) { ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0); - if (ret) { - dev_err(drm->dev, "Couldn't attach our bridge\n"); + if (ret) goto err_cleanup_connector; - } } return 0; diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c index e779855bcd6e..7845c2a53a7f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c @@ -332,7 +332,6 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = sun8i_ui_layer_atomic_check, .atomic_disable = sun8i_ui_layer_atomic_disable, .atomic_update = sun8i_ui_layer_atomic_update, diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index 1c86c2dd0bbf..bb7c43036dfa 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -436,7 +436,6 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = sun8i_vi_layer_atomic_check, .atomic_disable = sun8i_vi_layer_atomic_disable, .atomic_update = sun8i_vi_layer_atomic_update, diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index f96c237b2242..8c6069b33160 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1188,13 +1188,6 @@ static int host1x_drm_probe(struct host1x_device *dev) goto device; } - /* - * We don't use the drm_irq_install() helpers provided by the DRM - * core, so we need to set this manually in order to allow the - * DRM_IOCTL_WAIT_VBLANK to operate correctly. - */ - drm->irq_enabled = true; - /* syncpoints are used for full 32-bit hardware VBLANK counters */ drm->max_vblank_count = 0xffffffff; @@ -1204,7 +1197,7 @@ static int host1x_drm_probe(struct host1x_device *dev) drm_mode_config_reset(drm); - err = drm_aperture_remove_framebuffers(false, "tegradrmfb"); + err = drm_aperture_remove_framebuffers(false, &tegra_drm_driver); if (err < 0) goto hub; diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 4142a56ca764..606c78a2b988 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -275,11 +275,8 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) if (output->bridge) { err = drm_bridge_attach(&output->encoder, output->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); - if (err) { - dev_err(output->dev, "failed to attach bridge: %d\n", - err); + if (err) return err; - } connector = drm_bridge_connector_init(drm, &output->encoder); if (IS_ERR(connector)) { diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c index a5ec7931ef6b..2ed3e3296776 100644 --- a/drivers/gpu/drm/tidss/tidss_irq.c +++ b/drivers/gpu/drm/tidss/tidss_irq.c @@ -57,9 +57,6 @@ irqreturn_t tidss_irq_handler(int irq, void *arg) unsigned int id; dispc_irq_t irqstatus; - if (WARN_ON(!ddev->irq_enabled)) - return IRQ_NONE; - irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc); for (id = 0; id < tidss->num_crtcs; id++) { diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index 95f8e0f78e32..666e527a0acf 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -227,10 +227,8 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss) } ret = drm_bridge_attach(enc, pipes[i].bridge, NULL, 0); - if (ret) { - dev_err(tidss->dev, "bridge attach failed: %d\n", ret); + if (ret) return ret; - } } /* create overlay planes of the leftover planes */ diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c index 1acd15aa4193..217415ec8eea 100644 --- a/drivers/gpu/drm/tidss/tidss_plane.c +++ b/drivers/gpu/drm/tidss/tidss_plane.c @@ -158,7 +158,6 @@ static void drm_plane_destroy(struct drm_plane *plane) } static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = { - .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = tidss_plane_atomic_check, .atomic_update = tidss_plane_atomic_update, .atomic_disable = tidss_plane_atomic_disable, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index b177525588c1..7594cf6e186e 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -93,10 +93,8 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge) priv->external_encoder->possible_crtcs = BIT(0); ret = drm_bridge_attach(priv->external_encoder, bridge, NULL, 0); - if (ret) { - dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret); + if (ret) return ret; - } tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_default); diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index d46f95d9196d..d31be274a2bd 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -10,6 +10,19 @@ config DRM_ARCPGU If M is selected the module will be called arcpgu. +config DRM_BOCHS + tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" + depends on DRM && PCI && MMU + select DRM_KMS_HELPER + select DRM_VRAM_HELPER + select DRM_TTM + select DRM_TTM_HELPER + help + This is a KMS driver for qemu's stdvga output. Choose this option + for qemu. + + If M is selected the module will be called bochs. + config DRM_CIRRUS_QEMU tristate "Cirrus driver for QEMU emulated device" depends on DRM && PCI && MMU @@ -51,8 +64,8 @@ config DRM_SIMPLEDRM buffer, size, and display format must be provided via device tree, UEFI, VESA, etc. - On x86 and compatible, you should also select CONFIG_X86_SYSFB to - use UEFI and VESA framebuffers. + On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB + to use UEFI and VESA framebuffers. config TINYDRM_HX8357D tristate "DRM support for HX8357D display panels" diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index 9cc847e756da..e09942895c77 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o +obj-$(CONFIG_DRM_BOCHS) += bochs.o obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o obj-$(CONFIG_DRM_GM12U320) += gm12u320.o obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c new file mode 100644 index 000000000000..73415fa9ae0f --- /dev/null +++ b/drivers/gpu/drm/tiny/bochs.c @@ -0,0 +1,733 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/console.h> +#include <linux/pci.h> + +#include <drm/drm_aperture.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_vram_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include <video/vga.h> + +/* ---------------------------------------------------------------------- */ + +#define VBE_DISPI_IOPORT_INDEX 0x01CE +#define VBE_DISPI_IOPORT_DATA 0x01CF + +#define VBE_DISPI_INDEX_ID 0x0 +#define VBE_DISPI_INDEX_XRES 0x1 +#define VBE_DISPI_INDEX_YRES 0x2 +#define VBE_DISPI_INDEX_BPP 0x3 +#define VBE_DISPI_INDEX_ENABLE 0x4 +#define VBE_DISPI_INDEX_BANK 0x5 +#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6 +#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7 +#define VBE_DISPI_INDEX_X_OFFSET 0x8 +#define VBE_DISPI_INDEX_Y_OFFSET 0x9 +#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa + +#define VBE_DISPI_ID0 0xB0C0 +#define VBE_DISPI_ID1 0xB0C1 +#define VBE_DISPI_ID2 0xB0C2 +#define VBE_DISPI_ID3 0xB0C3 +#define VBE_DISPI_ID4 0xB0C4 +#define VBE_DISPI_ID5 0xB0C5 + +#define VBE_DISPI_DISABLED 0x00 +#define VBE_DISPI_ENABLED 0x01 +#define VBE_DISPI_GETCAPS 0x02 +#define VBE_DISPI_8BIT_DAC 0x20 +#define VBE_DISPI_LFB_ENABLED 0x40 +#define VBE_DISPI_NOCLEARMEM 0x80 + +static int bochs_modeset = -1; +static int defx = 1024; +static int defy = 768; + +module_param_named(modeset, bochs_modeset, int, 0444); +MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting"); + +module_param(defx, int, 0444); +module_param(defy, int, 0444); +MODULE_PARM_DESC(defx, "default x resolution"); +MODULE_PARM_DESC(defy, "default y resolution"); + +/* ---------------------------------------------------------------------- */ + +enum bochs_types { + BOCHS_QEMU_STDVGA, + BOCHS_UNKNOWN, +}; + +struct bochs_device { + /* hw */ + void __iomem *mmio; + int ioports; + void __iomem *fb_map; + unsigned long fb_base; + unsigned long fb_size; + unsigned long qext_size; + + /* mode */ + u16 xres; + u16 yres; + u16 yres_virtual; + u32 stride; + u32 bpp; + struct edid *edid; + + /* drm */ + struct drm_device *dev; + struct drm_simple_display_pipe pipe; + struct drm_connector connector; +}; + +/* ---------------------------------------------------------------------- */ + +static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val) +{ + if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) + return; + + if (bochs->mmio) { + int offset = ioport - 0x3c0 + 0x400; + + writeb(val, bochs->mmio + offset); + } else { + outb(val, ioport); + } +} + +static u8 bochs_vga_readb(struct bochs_device *bochs, u16 ioport) +{ + if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) + return 0xff; + + if (bochs->mmio) { + int offset = ioport - 0x3c0 + 0x400; + + return readb(bochs->mmio + offset); + } else { + return inb(ioport); + } +} + +static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg) +{ + u16 ret = 0; + + if (bochs->mmio) { + int offset = 0x500 + (reg << 1); + + ret = readw(bochs->mmio + offset); + } else { + outw(reg, VBE_DISPI_IOPORT_INDEX); + ret = inw(VBE_DISPI_IOPORT_DATA); + } + return ret; +} + +static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val) +{ + if (bochs->mmio) { + int offset = 0x500 + (reg << 1); + + writew(val, bochs->mmio + offset); + } else { + outw(reg, VBE_DISPI_IOPORT_INDEX); + outw(val, VBE_DISPI_IOPORT_DATA); + } +} + +static void bochs_hw_set_big_endian(struct bochs_device *bochs) +{ + if (bochs->qext_size < 8) + return; + + writel(0xbebebebe, bochs->mmio + 0x604); +} + +static void bochs_hw_set_little_endian(struct bochs_device *bochs) +{ + if (bochs->qext_size < 8) + return; + + writel(0x1e1e1e1e, bochs->mmio + 0x604); +} + +#ifdef __BIG_ENDIAN +#define bochs_hw_set_native_endian(_b) bochs_hw_set_big_endian(_b) +#else +#define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b) +#endif + +static int bochs_get_edid_block(void *data, u8 *buf, + unsigned int block, size_t len) +{ + struct bochs_device *bochs = data; + size_t i, start = block * EDID_LENGTH; + + if (start + len > 0x400 /* vga register offset */) + return -1; + + for (i = 0; i < len; i++) + buf[i] = readb(bochs->mmio + start + i); + + return 0; +} + +static int bochs_hw_load_edid(struct bochs_device *bochs) +{ + u8 header[8]; + + if (!bochs->mmio) + return -1; + + /* check header to detect whenever edid support is enabled in qemu */ + bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header)); + if (drm_edid_header_is_valid(header) != 8) + return -1; + + kfree(bochs->edid); + bochs->edid = drm_do_get_edid(&bochs->connector, + bochs_get_edid_block, bochs); + if (bochs->edid == NULL) + return -1; + + return 0; +} + +static int bochs_hw_init(struct drm_device *dev) +{ + struct bochs_device *bochs = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); + unsigned long addr, size, mem, ioaddr, iosize; + u16 id; + + if (pdev->resource[2].flags & IORESOURCE_MEM) { + /* mmio bar with vga and bochs registers present */ + if (pci_request_region(pdev, 2, "bochs-drm") != 0) { + DRM_ERROR("Cannot request mmio region\n"); + return -EBUSY; + } + ioaddr = pci_resource_start(pdev, 2); + iosize = pci_resource_len(pdev, 2); + bochs->mmio = ioremap(ioaddr, iosize); + if (bochs->mmio == NULL) { + DRM_ERROR("Cannot map mmio region\n"); + return -ENOMEM; + } + } else { + ioaddr = VBE_DISPI_IOPORT_INDEX; + iosize = 2; + if (!request_region(ioaddr, iosize, "bochs-drm")) { + DRM_ERROR("Cannot request ioports\n"); + return -EBUSY; + } + bochs->ioports = 1; + } + + id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID); + mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K) + * 64 * 1024; + if ((id & 0xfff0) != VBE_DISPI_ID0) { + DRM_ERROR("ID mismatch\n"); + return -ENODEV; + } + + if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) + return -ENODEV; + addr = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + if (addr == 0) + return -ENODEV; + if (size != mem) { + DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n", + size, mem); + size = min(size, mem); + } + + if (pci_request_region(pdev, 0, "bochs-drm") != 0) + DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); + + bochs->fb_map = ioremap(addr, size); + if (bochs->fb_map == NULL) { + DRM_ERROR("Cannot map framebuffer\n"); + return -ENOMEM; + } + bochs->fb_base = addr; + bochs->fb_size = size; + + DRM_INFO("Found bochs VGA, ID 0x%x.\n", id); + DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n", + size / 1024, addr, + bochs->ioports ? "ioports" : "mmio", + ioaddr); + + if (bochs->mmio && pdev->revision >= 2) { + bochs->qext_size = readl(bochs->mmio + 0x600); + if (bochs->qext_size < 4 || bochs->qext_size > iosize) { + bochs->qext_size = 0; + goto noext; + } + DRM_DEBUG("Found qemu ext regs, size %ld\n", + bochs->qext_size); + bochs_hw_set_native_endian(bochs); + } + +noext: + return 0; +} + +static void bochs_hw_fini(struct drm_device *dev) +{ + struct bochs_device *bochs = dev->dev_private; + + /* TODO: shot down existing vram mappings */ + + if (bochs->mmio) + iounmap(bochs->mmio); + if (bochs->ioports) + release_region(VBE_DISPI_IOPORT_INDEX, 2); + if (bochs->fb_map) + iounmap(bochs->fb_map); + pci_release_regions(to_pci_dev(dev->dev)); + kfree(bochs->edid); +} + +static void bochs_hw_blank(struct bochs_device *bochs, bool blank) +{ + DRM_DEBUG_DRIVER("hw_blank %d\n", blank); + /* discard ar_flip_flop */ + (void)bochs_vga_readb(bochs, VGA_IS1_RC); + /* blank or unblank; we need only update index and set 0x20 */ + bochs_vga_writeb(bochs, VGA_ATT_W, blank ? 0 : 0x20); +} + +static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode *mode) +{ + int idx; + + if (!drm_dev_enter(bochs->dev, &idx)) + return; + + bochs->xres = mode->hdisplay; + bochs->yres = mode->vdisplay; + bochs->bpp = 32; + bochs->stride = mode->hdisplay * (bochs->bpp / 8); + bochs->yres_virtual = bochs->fb_size / bochs->stride; + + DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n", + bochs->xres, bochs->yres, bochs->bpp, + bochs->yres_virtual); + + bochs_hw_blank(bochs, false); + + bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK, 0); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, bochs->xres); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT, + bochs->yres_virtual); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, 0); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, 0); + + bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, + VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED); + + drm_dev_exit(idx); +} + +static void bochs_hw_setformat(struct bochs_device *bochs, const struct drm_format_info *format) +{ + int idx; + + if (!drm_dev_enter(bochs->dev, &idx)) + return; + + DRM_DEBUG_DRIVER("format %c%c%c%c\n", + (format->format >> 0) & 0xff, + (format->format >> 8) & 0xff, + (format->format >> 16) & 0xff, + (format->format >> 24) & 0xff); + + switch (format->format) { + case DRM_FORMAT_XRGB8888: + bochs_hw_set_little_endian(bochs); + break; + case DRM_FORMAT_BGRX8888: + bochs_hw_set_big_endian(bochs); + break; + default: + /* should not happen */ + DRM_ERROR("%s: Huh? Got framebuffer format 0x%x", + __func__, format->format); + break; + } + + drm_dev_exit(idx); +} + +static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int stride, u64 addr) +{ + unsigned long offset; + unsigned int vx, vy, vwidth, idx; + + if (!drm_dev_enter(bochs->dev, &idx)) + return; + + bochs->stride = stride; + offset = (unsigned long)addr + + y * bochs->stride + + x * (bochs->bpp / 8); + vy = offset / bochs->stride; + vx = (offset % bochs->stride) * 8 / bochs->bpp; + vwidth = stride * 8 / bochs->bpp; + + DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n", + x, y, addr, offset, vx, vy); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, vwidth); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx); + bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy); + + drm_dev_exit(idx); +} + +/* ---------------------------------------------------------------------- */ + +static const uint32_t bochs_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_BGRX8888, +}; + +static void bochs_plane_update(struct bochs_device *bochs, struct drm_plane_state *state) +{ + struct drm_gem_vram_object *gbo; + s64 gpu_addr; + + if (!state->fb || !bochs->stride) + return; + + gbo = drm_gem_vram_of_gem(state->fb->obj[0]); + gpu_addr = drm_gem_vram_offset(gbo); + if (WARN_ON_ONCE(gpu_addr < 0)) + return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ + + bochs_hw_setbase(bochs, + state->crtc_x, + state->crtc_y, + state->fb->pitches[0], + state->fb->offsets[0] + gpu_addr); + bochs_hw_setformat(bochs, state->fb->format); +} + +static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state) +{ + struct bochs_device *bochs = pipe->crtc.dev->dev_private; + + bochs_hw_setmode(bochs, &crtc_state->mode); + bochs_plane_update(bochs, plane_state); +} + +static void bochs_pipe_disable(struct drm_simple_display_pipe *pipe) +{ + struct bochs_device *bochs = pipe->crtc.dev->dev_private; + + bochs_hw_blank(bochs, true); +} + +static void bochs_pipe_update(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state) +{ + struct bochs_device *bochs = pipe->crtc.dev->dev_private; + + bochs_plane_update(bochs, pipe->plane.state); +} + +static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = { + .enable = bochs_pipe_enable, + .disable = bochs_pipe_disable, + .update = bochs_pipe_update, + .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb, + .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb, +}; + +static int bochs_connector_get_modes(struct drm_connector *connector) +{ + struct bochs_device *bochs = + container_of(connector, struct bochs_device, connector); + int count = 0; + + if (bochs->edid) + count = drm_add_edid_modes(connector, bochs->edid); + + if (!count) { + count = drm_add_modes_noedid(connector, 8192, 8192); + drm_set_preferred_mode(connector, defx, defy); + } + return count; +} + +static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { + .get_modes = bochs_connector_get_modes, +}; + +static const struct drm_connector_funcs bochs_connector_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static void bochs_connector_init(struct drm_device *dev) +{ + struct bochs_device *bochs = dev->dev_private; + struct drm_connector *connector = &bochs->connector; + + drm_connector_init(dev, connector, &bochs_connector_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + drm_connector_helper_add(connector, &bochs_connector_connector_helper_funcs); + + bochs_hw_load_edid(bochs); + if (bochs->edid) { + DRM_INFO("Found EDID data blob.\n"); + drm_connector_attach_edid_property(connector); + drm_connector_update_edid_property(connector, bochs->edid); + } +} + +static struct drm_framebuffer * +bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 && + mode_cmd->pixel_format != DRM_FORMAT_BGRX8888) + return ERR_PTR(-EINVAL); + + return drm_gem_fb_create(dev, file, mode_cmd); +} + +static const struct drm_mode_config_funcs bochs_mode_funcs = { + .fb_create = bochs_gem_fb_create, + .mode_valid = drm_vram_helper_mode_valid, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int bochs_kms_init(struct bochs_device *bochs) +{ + int ret; + + ret = drmm_mode_config_init(bochs->dev); + if (ret) + return ret; + + bochs->dev->mode_config.max_width = 8192; + bochs->dev->mode_config.max_height = 8192; + + bochs->dev->mode_config.fb_base = bochs->fb_base; + bochs->dev->mode_config.preferred_depth = 24; + bochs->dev->mode_config.prefer_shadow = 0; + bochs->dev->mode_config.prefer_shadow_fbdev = 1; + bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; + + bochs->dev->mode_config.funcs = &bochs_mode_funcs; + + bochs_connector_init(bochs->dev); + drm_simple_display_pipe_init(bochs->dev, + &bochs->pipe, + &bochs_pipe_funcs, + bochs_formats, + ARRAY_SIZE(bochs_formats), + NULL, + &bochs->connector); + + drm_mode_config_reset(bochs->dev); + + return 0; +} + +/* ---------------------------------------------------------------------- */ +/* drm interface */ + +static int bochs_load(struct drm_device *dev) +{ + struct bochs_device *bochs; + int ret; + + bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL); + if (bochs == NULL) + return -ENOMEM; + dev->dev_private = bochs; + bochs->dev = dev; + + ret = bochs_hw_init(dev); + if (ret) + return ret; + + ret = drmm_vram_helper_init(dev, bochs->fb_base, bochs->fb_size); + if (ret) + return ret; + + ret = bochs_kms_init(bochs); + if (ret) + return ret; + + return 0; +} + +DEFINE_DRM_GEM_FOPS(bochs_fops); + +static const struct drm_driver bochs_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &bochs_fops, + .name = "bochs-drm", + .desc = "bochs dispi vga interface (qemu stdvga)", + .date = "20130925", + .major = 1, + .minor = 0, + DRM_GEM_VRAM_DRIVER, +}; + +/* ---------------------------------------------------------------------- */ +/* pm interface */ + +#ifdef CONFIG_PM_SLEEP +static int bochs_pm_suspend(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + + return drm_mode_config_helper_suspend(drm_dev); +} + +static int bochs_pm_resume(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + + return drm_mode_config_helper_resume(drm_dev); +} +#endif + +static const struct dev_pm_ops bochs_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend, + bochs_pm_resume) +}; + +/* ---------------------------------------------------------------------- */ +/* pci interface */ + +static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_device *dev; + unsigned long fbsize; + int ret; + + fbsize = pci_resource_len(pdev, 0); + if (fbsize < 4 * 1024 * 1024) { + DRM_ERROR("less than 4 MB video memory, ignoring device\n"); + return -ENOMEM; + } + + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &bochs_driver); + if (ret) + return ret; + + dev = drm_dev_alloc(&bochs_driver, &pdev->dev); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + ret = pcim_enable_device(pdev); + if (ret) + goto err_free_dev; + + pci_set_drvdata(pdev, dev); + + ret = bochs_load(dev); + if (ret) + goto err_free_dev; + + ret = drm_dev_register(dev, 0); + if (ret) + goto err_free_dev; + + drm_fbdev_generic_setup(dev, 32); + return ret; + +err_free_dev: + drm_dev_put(dev); + return ret; +} + +static void bochs_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_dev_unplug(dev); + drm_atomic_helper_shutdown(dev); + bochs_hw_fini(dev); + drm_dev_put(dev); +} + +static const struct pci_device_id bochs_pci_tbl[] = { + { + .vendor = 0x1234, + .device = 0x1111, + .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET, + .subdevice = PCI_SUBDEVICE_ID_QEMU, + .driver_data = BOCHS_QEMU_STDVGA, + }, + { + .vendor = 0x1234, + .device = 0x1111, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = BOCHS_UNKNOWN, + }, + { /* end of list */ } +}; + +static struct pci_driver bochs_pci_driver = { + .name = "bochs-drm", + .id_table = bochs_pci_tbl, + .probe = bochs_pci_probe, + .remove = bochs_pci_remove, + .driver.pm = &bochs_pm_ops, +}; + +/* ---------------------------------------------------------------------- */ +/* module init/exit */ + +static int __init bochs_init(void) +{ + if (vgacon_text_force() && bochs_modeset == -1) + return -EINVAL; + + if (bochs_modeset == 0) + return -EINVAL; + + return pci_register_driver(&bochs_pci_driver); +} + +static void __exit bochs_exit(void) +{ + pci_unregister_driver(&bochs_pci_driver); +} + +module_init(bochs_init); +module_exit(bochs_exit); + +MODULE_DEVICE_TABLE(pci, bochs_pci_tbl); +MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 42611dacde88..a8b476a59c0d 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -550,7 +550,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev, struct cirrus_device *cirrus; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &cirrus_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index a233c86d428b..cf7287fccd72 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -3,7 +3,6 @@ * Copyright 2019 Hans de Goede <hdegoede@redhat.com> */ -#include <linux/dma-buf.h> #include <linux/module.h> #include <linux/usb.h> @@ -268,13 +267,10 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) y2 = gm12u320->fb_update.rect.y2; vaddr = gm12u320->fb_update.src_map.vaddr; /* TODO: Use mapping abstraction properly */ - if (fb->obj[0]->import_attach) { - ret = dma_buf_begin_cpu_access( - fb->obj[0]->import_attach->dmabuf, DMA_FROM_DEVICE); - if (ret) { - GM12U320_ERR("dma_buf_begin_cpu_access err: %d\n", ret); - goto put_fb; - } + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) { + GM12U320_ERR("drm_gem_fb_begin_cpu_access err: %d\n", ret); + goto put_fb; } src = vaddr + y1 * fb->pitches[0] + x1 * 4; @@ -311,12 +307,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) src += fb->pitches[0]; } - if (fb->obj[0]->import_attach) { - ret = dma_buf_end_cpu_access(fb->obj[0]->import_attach->dmabuf, - DMA_FROM_DEVICE); - if (ret) - GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret); - } + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); put_fb: drm_framebuffer_put(fb); gm12u320->fb_update.fb = NULL; diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index da5df93450de..9b33c05732aa 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -184,7 +184,6 @@ static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = { .enable = yx240qv29_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode yx350hv15_mode = { diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 69265d8a3beb..976d3209f164 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -328,7 +328,6 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = { .enable = ili9225_pipe_enable, .disable = ili9225_pipe_disable, .update = ili9225_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode ili9225_mode = { diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index ad9ce7b4f76f..37e0c33399c8 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -140,7 +140,6 @@ static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = { .enable = yx240qv29_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode yx240qv29_mode = { diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 75aa1476c66c..e9a63f4b2993 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -153,7 +153,6 @@ static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = { .enable = waveshare_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode waveshare_mode = { diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index 82fd1ad3413f..023de49e7a8e 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -144,7 +144,6 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { .enable = mi0283qt_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode mi0283qt_mode = { diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 2cee07a2e00b..4d07b21a16e6 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -14,7 +14,6 @@ */ #include <linux/delay.h> -#include <linux/dma-buf.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/property.h> @@ -532,7 +531,6 @@ static void repaper_gray8_to_mono_reversed(u8 *buf, u32 width, u32 height) static int repaper_fb_dirty(struct drm_framebuffer *fb) { struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; struct repaper_epd *epd = drm_to_epd(fb->dev); struct drm_rect clip; int idx, ret = 0; @@ -558,21 +556,13 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) goto out_exit; } - if (import_attach) { - ret = dma_buf_begin_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); - if (ret) - goto out_free; - } + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + goto out_free; drm_fb_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip); - if (import_attach) { - ret = dma_buf_end_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); - if (ret) - goto out_free; - } + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); repaper_gray8_to_mono_reversed(buf, fb->width, fb->height); @@ -861,7 +851,6 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { .enable = repaper_pipe_enable, .disable = repaper_pipe_disable, .update = repaper_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static int repaper_connector_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index 05db980cc047..ad0faa8723c2 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -6,7 +6,6 @@ */ #include <linux/delay.h> -#include <linux/dma-buf.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/property.h> @@ -21,6 +20,7 @@ #include <drm/drm_format_helper.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_rect.h> @@ -92,24 +92,18 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb, struct drm_rect *clip) { struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; void *src = cma_obj->vaddr; int ret = 0; - if (import_attach) { - ret = dma_buf_begin_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); - if (ret) - return ret; - } + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + return ret; st7586_xrgb8888_to_gray332(dst, src, fb, clip); - if (import_attach) - ret = dma_buf_end_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); - return ret; + return 0; } static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) @@ -268,7 +262,6 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = { .enable = st7586_pipe_enable, .disable = st7586_pipe_disable, .update = st7586_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode st7586_mode = { diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index e8b7815d8cae..fc40dd10efa8 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -136,7 +136,6 @@ static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = { .enable = st7735r_pipe_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct st7735r_cfg jd_t18003_t01_cfg = { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 8d7fd65ccced..ea4add2b9717 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -488,6 +488,31 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched) } EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); +static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, + struct ttm_resource **mem, + struct ttm_operation_ctx *ctx, + struct ttm_place *hop) +{ + struct ttm_placement hop_placement; + struct ttm_resource *hop_mem; + int ret; + + hop_placement.num_placement = hop_placement.num_busy_placement = 1; + hop_placement.placement = hop_placement.busy_placement = hop; + + /* find space in the bounce domain */ + ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx); + if (ret) + return ret; + /* move to the bounce domain */ + ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL); + if (ret) { + ttm_resource_free(bo, &hop_mem); + return ret; + } + return 0; +} + static int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { @@ -527,12 +552,17 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, goto out; } +bounce: ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop); - if (unlikely(ret)) { - WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n"); - if (ret != -ERESTARTSYS) + if (ret == -EMULTIHOP) { + ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop); + if (ret) { pr_err("Buffer eviction failed\n"); - ttm_resource_free(bo, &evict_mem); + ttm_resource_free(bo, &evict_mem); + goto out; + } + /* try and move to final place now. */ + goto bounce; } out: return ret; @@ -847,31 +877,6 @@ error: } EXPORT_SYMBOL(ttm_bo_mem_space); -static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, - struct ttm_resource **mem, - struct ttm_operation_ctx *ctx, - struct ttm_place *hop) -{ - struct ttm_placement hop_placement; - struct ttm_resource *hop_mem; - int ret; - - hop_placement.num_placement = hop_placement.num_busy_placement = 1; - hop_placement.placement = hop_placement.busy_placement = hop; - - /* find space in the bounce domain */ - ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx); - if (ret) - return ret; - /* move to the bounce domain */ - ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL); - if (ret) { - ttm_resource_free(bo, &hop_mem); - return ret; - } - return 0; -} - static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_operation_ctx *ctx) @@ -916,6 +921,9 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, { unsigned i; + if (mem->placement & TTM_PL_FLAG_TEMPORARY) + return false; + for (i = 0; i < num_placement; i++) { const struct ttm_place *heap = &places[i]; diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index 50e1fb71869f..17b8c8dd169d 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -316,7 +316,6 @@ static const struct drm_simple_display_pipe_funcs tve200_display_funcs = { .enable = tve200_display_enable, .disable = tve200_display_disable, .update = tve200_display_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, .enable_vblank = tve200_display_enable_vblank, .disable_vblank = tve200_display_disable_vblank, }; diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 8d98bf69d075..8a6b94b1511b 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -6,11 +6,8 @@ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> - */ -#include <linux/dma-buf.h> - #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> @@ -271,9 +268,8 @@ static int udl_handle_damage(struct drm_framebuffer *fb, const struct dma_buf_ma int x, int y, int width, int height) { struct drm_device *dev = fb->dev; - struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach; void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */ - int i, ret, tmp_ret; + int i, ret; char *cmd; struct urb *urb; struct drm_rect clip; @@ -290,17 +286,14 @@ static int udl_handle_damage(struct drm_framebuffer *fb, const struct dma_buf_ma else if ((clip.x2 > fb->width) || (clip.y2 > fb->height)) return -EINVAL; - if (import_attach) { - ret = dma_buf_begin_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); - if (ret) - return ret; - } + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + return ret; urb = udl_get_urb(dev); if (!urb) { ret = -ENOMEM; - goto out_dma_buf_end_cpu_access; + goto out_drm_gem_fb_end_cpu_access; } cmd = urb->transfer_buffer; @@ -313,7 +306,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, const struct dma_buf_ma &cmd, byte_offset, dev_byte_offset, byte_width); if (ret) - goto out_dma_buf_end_cpu_access; + goto out_drm_gem_fb_end_cpu_access; } if (cmd > (char *)urb->transfer_buffer) { @@ -329,14 +322,8 @@ static int udl_handle_damage(struct drm_framebuffer *fb, const struct dma_buf_ma ret = 0; -out_dma_buf_end_cpu_access: - if (import_attach) { - tmp_ret = dma_buf_end_cpu_access(import_attach->dmabuf, - DMA_FROM_DEVICE); - if (tmp_ret && !ret) - ret = tmp_ret; /* only update ret if not set yet */ - } - +out_drm_gem_fb_end_cpu_access: + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); return ret; } diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile index db4cfc155821..e8b314137020 100644 --- a/drivers/gpu/drm/v3d/Makefile +++ b/drivers/gpu/drm/v3d/Makefile @@ -9,6 +9,7 @@ v3d-y := \ v3d_gem.o \ v3d_irq.o \ v3d_mmu.o \ + v3d_perfmon.o \ v3d_trace_points.o \ v3d_sched.o diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 99e22beea90b..9403c3b36aca 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -94,6 +94,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH: args->value = 1; return 0; + case DRM_V3D_PARAM_SUPPORTS_PERFMON: + args->value = (v3d->ver >= 40); + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; @@ -121,6 +124,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file) 1, NULL); } + v3d_perfmon_open_file(v3d_priv); file->driver_priv = v3d_priv; return 0; @@ -136,6 +140,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); } + v3d_perfmon_close_file(v3d_priv); kfree(v3d_priv); } @@ -156,6 +161,9 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), }; static const struct drm_driver v3d_drm_driver = { diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 8a390738d65b..270134779073 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -37,6 +37,40 @@ struct v3d_queue_state { u64 emit_seqno; }; +/* Performance monitor object. The perform lifetime is controlled by userspace + * using perfmon related ioctls. A perfmon can be attached to a submit_cl + * request, and when this is the case, HW perf counters will be activated just + * before the submit_cl is submitted to the GPU and disabled when the job is + * done. This way, only events related to a specific job will be counted. + */ +struct v3d_perfmon { + /* Tracks the number of users of the perfmon, when this counter reaches + * zero the perfmon is destroyed. + */ + refcount_t refcnt; + + /* Protects perfmon stop, as it can be invoked from multiple places. */ + struct mutex lock; + + /* Number of counters activated in this perfmon instance + * (should be less than DRM_V3D_MAX_PERF_COUNTERS). + */ + u8 ncounters; + + /* Events counted by the HW perf counters. */ + u8 counters[DRM_V3D_MAX_PERF_COUNTERS]; + + /* Storage for counter values. Counters are incremented by the + * HW perf counter values every time the perfmon is attached + * to a GPU job. This way, perfmon users don't have to + * retrieve the results after each job if they want to track + * events covering several submissions. Note that counter + * values can't be reset, but you can fake a reset by + * destroying the perfmon and creating a new one. + */ + u64 values[]; +}; + struct v3d_dev { struct drm_device drm; @@ -89,6 +123,9 @@ struct v3d_dev { */ spinlock_t job_lock; + /* Used to track the active perfmon if any. */ + struct v3d_perfmon *active_perfmon; + /* Protects bo_stats */ struct mutex bo_lock; @@ -133,6 +170,11 @@ v3d_has_csd(struct v3d_dev *v3d) struct v3d_file_priv { struct v3d_dev *v3d; + struct { + struct idr idr; + struct mutex lock; + } perfmon; + struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; }; @@ -205,6 +247,11 @@ struct v3d_job { */ struct dma_fence *done_fence; + /* Pointer to a performance monitor object if the user requested it, + * NULL otherwise. + */ + struct v3d_perfmon *perfmon; + /* Callback for the freeing of the job on refcount going to 0. */ void (*free)(struct kref *ref); }; @@ -353,3 +400,19 @@ void v3d_mmu_remove_ptes(struct v3d_bo *bo); /* v3d_sched.c */ int v3d_sched_init(struct v3d_dev *v3d); void v3d_sched_fini(struct v3d_dev *v3d); + +/* v3d_perfmon.c */ +void v3d_perfmon_get(struct v3d_perfmon *perfmon); +void v3d_perfmon_put(struct v3d_perfmon *perfmon); +void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon); +void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon, + bool capture); +struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id); +void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv); +void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv); +int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 4eb354226972..5689da118197 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -126,6 +126,8 @@ v3d_reset(struct v3d_dev *v3d) v3d_mmu_set_page_table(v3d); v3d_irq_reset(v3d); + v3d_perfmon_stop(v3d, v3d->active_perfmon, false); + trace_v3d_reset_end(dev); } @@ -375,6 +377,9 @@ v3d_job_free(struct kref *ref) pm_runtime_mark_last_busy(job->v3d->drm.dev); pm_runtime_put_autosuspend(job->v3d->drm.dev); + if (job->perfmon) + v3d_perfmon_put(job->perfmon); + kfree(job); } @@ -539,6 +544,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); + if (args->pad != 0) + return -EINVAL; + if (args->flags != 0 && args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { DRM_INFO("invalid flags: %d\n", args->flags); @@ -611,8 +619,20 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (ret) goto fail; + if (args->perfmon_id) { + render->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + + if (!render->base.perfmon) { + ret = -ENOENT; + goto fail; + } + } + mutex_lock(&v3d->sched_lock); if (bin) { + bin->base.perfmon = render->base.perfmon; + v3d_perfmon_get(bin->base.perfmon); ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN); if (ret) goto fail_unreserve; @@ -633,6 +653,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ret = drm_gem_fence_array_add(&clean_job->deps, render_fence); if (ret) goto fail_unreserve; + clean_job->perfmon = render->base.perfmon; + v3d_perfmon_get(clean_job->perfmon); ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN); if (ret) goto fail_unreserve; @@ -827,6 +849,15 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, if (ret) goto fail; + if (args->perfmon_id) { + job->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + if (!job->base.perfmon) { + ret = -ENOENT; + goto fail; + } + } + mutex_lock(&v3d->sched_lock); ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD); if (ret) diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c new file mode 100644 index 000000000000..0288ef063513 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Raspberry Pi + */ + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define V3D_PERFMONID_MIN 1 +#define V3D_PERFMONID_MAX U32_MAX + +void v3d_perfmon_get(struct v3d_perfmon *perfmon) +{ + if (perfmon) + refcount_inc(&perfmon->refcnt); +} + +void v3d_perfmon_put(struct v3d_perfmon *perfmon) +{ + if (perfmon && refcount_dec_and_test(&perfmon->refcnt)) + kfree(perfmon); +} + +void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon) +{ + unsigned int i; + u32 mask; + u8 ncounters = perfmon->ncounters; + + if (WARN_ON_ONCE(!perfmon || v3d->active_perfmon)) + return; + + mask = GENMASK(ncounters - 1, 0); + + for (i = 0; i < ncounters; i++) { + u32 source = i / 4; + u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0); + + i++; + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S1); + i++; + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S2); + i++; + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S3); + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel); + } + + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask); + V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask); + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); + + v3d->active_perfmon = perfmon; +} + +void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon, + bool capture) +{ + unsigned int i; + + if (!perfmon || !v3d->active_perfmon) + return; + + mutex_lock(&perfmon->lock); + if (perfmon != v3d->active_perfmon) { + mutex_unlock(&perfmon->lock); + return; + } + + if (capture) + for (i = 0; i < perfmon->ncounters; i++) + perfmon->values[i] += V3D_CORE_READ(0, V3D_PCTR_0_PCTRX(i)); + + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, 0); + + v3d->active_perfmon = NULL; + mutex_unlock(&perfmon->lock); +} + +struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id) +{ + struct v3d_perfmon *perfmon; + + mutex_lock(&v3d_priv->perfmon.lock); + perfmon = idr_find(&v3d_priv->perfmon.idr, id); + v3d_perfmon_get(perfmon); + mutex_unlock(&v3d_priv->perfmon.lock); + + return perfmon; +} + +void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) +{ + mutex_init(&v3d_priv->perfmon.lock); + idr_init(&v3d_priv->perfmon.idr); +} + +static int v3d_perfmon_idr_del(int id, void *elem, void *data) +{ + struct v3d_perfmon *perfmon = elem; + + v3d_perfmon_put(perfmon); + + return 0; +} + +void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) +{ + mutex_lock(&v3d_priv->perfmon.lock); + idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); + idr_destroy(&v3d_priv->perfmon.idr); + mutex_unlock(&v3d_priv->perfmon.lock); +} + +int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_create *req = data; + struct v3d_perfmon *perfmon; + unsigned int i; + int ret; + + /* Number of monitored counters cannot exceed HW limits. */ + if (req->ncounters > DRM_V3D_MAX_PERF_COUNTERS || + !req->ncounters) + return -EINVAL; + + /* Make sure all counters are valid. */ + for (i = 0; i < req->ncounters; i++) { + if (req->counters[i] >= V3D_PERFCNT_NUM) + return -EINVAL; + } + + perfmon = kzalloc(struct_size(perfmon, values, req->ncounters), + GFP_KERNEL); + if (!perfmon) + return -ENOMEM; + + for (i = 0; i < req->ncounters; i++) + perfmon->counters[i] = req->counters[i]; + + perfmon->ncounters = req->ncounters; + + refcount_set(&perfmon->refcnt, 1); + mutex_init(&perfmon->lock); + + mutex_lock(&v3d_priv->perfmon.lock); + ret = idr_alloc(&v3d_priv->perfmon.idr, perfmon, V3D_PERFMONID_MIN, + V3D_PERFMONID_MAX, GFP_KERNEL); + mutex_unlock(&v3d_priv->perfmon.lock); + + if (ret < 0) { + kfree(perfmon); + return ret; + } + + req->id = ret; + + return 0; +} + +int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_destroy *req = data; + struct v3d_perfmon *perfmon; + + mutex_lock(&v3d_priv->perfmon.lock); + perfmon = idr_remove(&v3d_priv->perfmon.idr, req->id); + mutex_unlock(&v3d_priv->perfmon.lock); + + if (!perfmon) + return -EINVAL; + + v3d_perfmon_put(perfmon); + + return 0; +} + +int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_get_values *req = data; + struct v3d_perfmon *perfmon; + int ret = 0; + + if (req->pad != 0) + return -EINVAL; + + mutex_lock(&v3d_priv->perfmon.lock); + perfmon = idr_find(&v3d_priv->perfmon.idr, req->id); + v3d_perfmon_get(perfmon); + mutex_unlock(&v3d_priv->perfmon.lock); + + if (!perfmon) + return -EINVAL; + + v3d_perfmon_stop(v3d, perfmon, true); + + if (copy_to_user(u64_to_user_ptr(req->values_ptr), perfmon->values, + perfmon->ncounters * sizeof(u64))) + ret = -EFAULT; + + v3d_perfmon_put(perfmon); + + return ret; +} diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 9bcb57781d31..3663e0d6bf76 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -347,6 +347,8 @@ /* Each src reg muxes four counters each. */ #define V3D_V4_PCTR_0_SRC_0_3 0x00660 #define V3D_V4_PCTR_0_SRC_28_31 0x0067c +#define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \ + 4 * (x)) # define V3D_PCTR_S0_MASK V3D_MASK(6, 0) # define V3D_PCTR_S0_SHIFT 0 # define V3D_PCTR_S1_MASK V3D_MASK(14, 8) diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 8992480c88fa..dd7fcc36d726 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -63,6 +63,16 @@ v3d_job_free(struct drm_sched_job *sched_job) v3d_job_put(job); } +static void +v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) +{ + if (job->perfmon != v3d->active_perfmon) + v3d_perfmon_stop(v3d, v3d->active_perfmon, true); + + if (job->perfmon && v3d->active_perfmon != job->perfmon) + v3d_perfmon_start(v3d, job->perfmon); +} + /* * Returns the fences that the job depends on, one by one. * @@ -120,6 +130,8 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno, job->start, job->end); + v3d_switch_perfmon(v3d, &job->base); + /* Set the current and end address of the control list. * Writing the end register is what starts the job. */ @@ -169,6 +181,8 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno, job->start, job->end); + v3d_switch_perfmon(v3d, &job->base); + /* XXX: Set the QCFG */ /* Set the current and end address of the control list. @@ -240,6 +254,8 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); + v3d_switch_perfmon(v3d, &job->base); + for (i = 1; i <= 6; i++) V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]); /* CFG0 write kicks off the job. */ @@ -402,7 +418,7 @@ v3d_sched_init(struct v3d_dev *v3d) ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, &v3d_bin_sched_ops, hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), + msecs_to_jiffies(hang_limit_ms), NULL, NULL, "v3d_bin"); if (ret) { dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret); @@ -412,7 +428,7 @@ v3d_sched_init(struct v3d_dev *v3d) ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, &v3d_render_sched_ops, hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), + msecs_to_jiffies(hang_limit_ms), NULL, NULL, "v3d_render"); if (ret) { dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.", @@ -424,7 +440,7 @@ v3d_sched_init(struct v3d_dev *v3d) ret = drm_sched_init(&v3d->queue[V3D_TFU].sched, &v3d_tfu_sched_ops, hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), + msecs_to_jiffies(hang_limit_ms), NULL, NULL, "v3d_tfu"); if (ret) { dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.", @@ -437,7 +453,7 @@ v3d_sched_init(struct v3d_dev *v3d) ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, &v3d_csd_sched_ops, hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), + msecs_to_jiffies(hang_limit_ms), NULL, NULL, "v3d_csd"); if (ret) { dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.", @@ -449,7 +465,7 @@ v3d_sched_init(struct v3d_dev *v3d) ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, &v3d_cache_clean_sched_ops, hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), + msecs_to_jiffies(hang_limit_ms), NULL, NULL, "v3d_cache_clean"); if (ret) { dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.", diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index 6d4b32da9866..2b81cb259d23 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -43,7 +43,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) return -ENODEV; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); if (ret) return ret; @@ -184,7 +184,6 @@ static const struct drm_driver driver = { .lastclose = drm_fb_helper_lastclose, .fops = &vbox_fops, - .irq_handler = vbox_irq_handler, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h index ac7c2effc46f..4903b91d7fe4 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.h +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h @@ -145,7 +145,6 @@ void vbox_mm_fini(struct vbox_private *vbox); int vbox_irq_init(struct vbox_private *vbox); void vbox_irq_fini(struct vbox_private *vbox); void vbox_report_hotplug(struct vbox_private *vbox); -irqreturn_t vbox_irq_handler(int irq, void *arg); /* vbox_hgsmi.c */ void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size, diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c index b3ded68603ba..903a6c48ee8b 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_irq.c +++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c @@ -10,7 +10,8 @@ */ #include <linux/pci.h> -#include <drm/drm_irq.h> + +#include <drm/drm_drv.h> #include <drm/drm_probe_helper.h> #include "vbox_drv.h" @@ -31,7 +32,7 @@ void vbox_report_hotplug(struct vbox_private *vbox) schedule_work(&vbox->hotplug_work); } -irqreturn_t vbox_irq_handler(int irq, void *arg) +static irqreturn_t vbox_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *)arg; struct vbox_private *vbox = to_vbox_dev(dev); @@ -170,16 +171,21 @@ static void vbox_hotplug_worker(struct work_struct *work) int vbox_irq_init(struct vbox_private *vbox) { - struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev); + struct drm_device *dev = &vbox->ddev; + struct pci_dev *pdev = to_pci_dev(dev->dev); INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker); vbox_update_mode_hints(vbox); - return drm_irq_install(&vbox->ddev, pdev->irq); + /* PCI devices require shared interrupts. */ + return request_irq(pdev->irq, vbox_irq_handler, IRQF_SHARED, dev->driver->name, dev); } void vbox_irq_fini(struct vbox_private *vbox) { - drm_irq_uninstall(&vbox->ddev); + struct drm_device *dev = &vbox->ddev; + struct pci_dev *pdev = to_pci_dev(dev->dev); + + free_irq(pdev->irq, dev); flush_work(&vbox->hotplug_work); } diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 964381d55fc1..972c83b720aa 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -488,8 +488,7 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = { .atomic_check = vbox_primary_atomic_check, .atomic_update = vbox_primary_atomic_update, .atomic_disable = vbox_primary_atomic_disable, - .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, - .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, + DRM_GEM_VRAM_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs vbox_primary_plane_funcs = { diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 118e8a426b1a..345a5570a3da 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -12,6 +12,7 @@ config DRM_VC4 select SND_PCM select SND_PCM_ELD select SND_SOC_GENERIC_DMAENGINE_PCM + select SND_SOC_HDMI_CODEC select DRM_MIPI_DSI help Choose this option if you have a system that has a Broadcom diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 8a60fb8ad370..73335feb712f 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -265,7 +265,7 @@ static int vc4_drm_bind(struct device *dev) if (ret) goto unbind_all; - ret = drm_aperture_remove_framebuffers(false, "vc4drmfb"); + ret = drm_aperture_remove_framebuffers(false, &vc4_drm_driver); if (ret) goto unbind_all; diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index a55256ed0955..a185027911ce 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1646,10 +1646,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs); ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0); - if (ret) { - dev_err(dev, "bridge attach failed: %d\n", ret); + if (ret) return ret; - } /* Disable the atomic helper calls into the bridge. We * manually call the bridge pre_enable / enable / etc. calls * from our driver, since we need to sequence them within the diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index c2876731ee2d..b7dc32a0c9bb 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -46,6 +46,7 @@ #include <linux/rational.h> #include <linux/reset.h> #include <sound/dmaengine_pcm.h> +#include <sound/hdmi-codec.h> #include <sound/pcm_drm_eld.h> #include <sound/pcm_params.h> #include <sound/soc.h> @@ -435,7 +436,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct drm_connector *connector = &vc4_hdmi->connector; struct drm_connector_state *cstate = connector->state; - struct drm_crtc *crtc = encoder->crtc; + struct drm_crtc *crtc = cstate->crtc; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; union hdmi_infoframe frame; int ret; @@ -477,15 +478,10 @@ static void vc4_hdmi_set_spd_infoframe(struct drm_encoder *encoder) static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct hdmi_audio_infoframe *audio = &vc4_hdmi->audio.infoframe; union hdmi_infoframe frame; - hdmi_audio_infoframe_init(&frame.audio); - - frame.audio.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM; - frame.audio.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM; - frame.audio.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM; - frame.audio.channels = vc4_hdmi->audio.channels; - + memcpy(&frame.audio, audio, sizeof(*audio)); vc4_hdmi_write_infoframe(encoder, &frame); } @@ -545,8 +541,11 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder, static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) { - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct drm_connector *connector = &vc4_hdmi->connector; + struct drm_connector_state *cstate = connector->state; + struct drm_crtc *crtc = cstate->crtc; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; if (!vc4_hdmi_supports_scrambling(encoder, mode)) return; @@ -567,17 +566,18 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - struct drm_crtc *crtc = encoder->crtc; + struct drm_connector *connector = &vc4_hdmi->connector; + struct drm_connector_state *cstate = connector->state; /* - * At boot, encoder->crtc will be NULL. Since we don't know the + * At boot, connector->state will be NULL. Since we don't know the * state of the scrambler and in order to avoid any * inconsistency, let's disable it all the time. */ - if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode)) + if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode)) return; - if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode)) + if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode)) return; if (delayed_work_pending(&vc4_hdmi->scrambling_work)) @@ -613,12 +613,12 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder, HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0); - HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | - VC4_HD_VID_CTL_CLRRGB | VC4_HD_VID_CTL_CLRSYNC); + HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_CLRRGB); - HDMI_WRITE(HDMI_VID_CTL, - HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX); + mdelay(1); + HDMI_WRITE(HDMI_VID_CTL, + HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE); vc4_hdmi_disable_scrambling(encoder); } @@ -628,12 +628,12 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder, struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); int ret; + HDMI_WRITE(HDMI_VID_CTL, + HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX); + if (vc4_hdmi->variant->phy_disable) vc4_hdmi->variant->phy_disable(vc4_hdmi); - HDMI_WRITE(HDMI_VID_CTL, - HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE); - clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock); @@ -898,7 +898,9 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, vc4_hdmi_encoder_get_connector_state(encoder, state); struct vc4_hdmi_connector_state *vc4_conn_state = conn_state_to_vc4_hdmi_conn_state(conn_state); - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, conn_state->crtc); + struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); unsigned long bvb_rate, pixel_rate, hsm_rate; int ret; @@ -983,7 +985,11 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct drm_connector_state *conn_state = + vc4_hdmi_encoder_get_connector_state(encoder, state); + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, conn_state->crtc); + struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); @@ -1006,7 +1012,11 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct drm_connector_state *conn_state = + vc4_hdmi_encoder_get_connector_state(encoder, state); + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, conn_state->crtc); + struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; @@ -1015,6 +1025,7 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, HDMI_WRITE(HDMI_VID_CTL, VC4_HD_VID_CTL_ENABLE | + VC4_HD_VID_CTL_CLRRGB | VC4_HD_VID_CTL_UNDERFLOW_ENABLE | VC4_HD_VID_CTL_FRAME_COUNTER_RESET | (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) | @@ -1173,12 +1184,13 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask) } /* HDMI audio codec callbacks */ -static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi) +static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi, + unsigned int samplerate) { u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock); unsigned long n, m; - rational_best_approximation(hsm_clock, vc4_hdmi->audio.samplerate, + rational_best_approximation(hsm_clock, samplerate, VC4_HD_MAI_SMP_N_MASK >> VC4_HD_MAI_SMP_N_SHIFT, (VC4_HD_MAI_SMP_M_MASK >> @@ -1190,12 +1202,11 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi) VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M)); } -static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi) +static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) { - struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; - struct drm_crtc *crtc = encoder->crtc; + struct drm_connector *connector = &vc4_hdmi->connector; + struct drm_crtc *crtc = connector->state->crtc; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; - u32 samplerate = vc4_hdmi->audio.samplerate; u32 n, cts; u64 tmp; @@ -1224,36 +1235,31 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai) return snd_soc_card_get_drvdata(card); } -static int vc4_hdmi_audio_startup(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) +static int vc4_hdmi_audio_startup(struct device *dev, void *data) { - struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); - struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; + struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); struct drm_connector *connector = &vc4_hdmi->connector; - int ret; - - if (vc4_hdmi->audio.substream && vc4_hdmi->audio.substream != substream) - return -EINVAL; - - vc4_hdmi->audio.substream = substream; /* * If the HDMI encoder hasn't probed, or the encoder is * currently in DVI mode, treat the codec dai as missing. */ - if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & + if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE)) return -ENODEV; - ret = snd_pcm_hw_constraint_eld(substream->runtime, connector->eld); - if (ret) - return ret; + vc4_hdmi->audio.streaming = true; - return 0; -} + HDMI_WRITE(HDMI_MAI_CTL, + VC4_HD_MAI_CTL_RESET | + VC4_HD_MAI_CTL_FLUSH | + VC4_HD_MAI_CTL_DLATE | + VC4_HD_MAI_CTL_ERRORE | + VC4_HD_MAI_CTL_ERRORF); + + if (vc4_hdmi->variant->phy_rng_enable) + vc4_hdmi->variant->phy_rng_enable(vc4_hdmi); -static int vc4_hdmi_audio_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) -{ return 0; } @@ -1273,48 +1279,96 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi) HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_FLUSH); } -static void vc4_hdmi_audio_shutdown(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) +static void vc4_hdmi_audio_shutdown(struct device *dev, void *data) { - struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); + struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - if (substream != vc4_hdmi->audio.substream) - return; + HDMI_WRITE(HDMI_MAI_CTL, + VC4_HD_MAI_CTL_DLATE | + VC4_HD_MAI_CTL_ERRORE | + VC4_HD_MAI_CTL_ERRORF); + + if (vc4_hdmi->variant->phy_rng_disable) + vc4_hdmi->variant->phy_rng_disable(vc4_hdmi); + vc4_hdmi->audio.streaming = false; vc4_hdmi_audio_reset(vc4_hdmi); +} - vc4_hdmi->audio.substream = NULL; +static int sample_rate_to_mai_fmt(int samplerate) +{ + switch (samplerate) { + case 8000: + return VC4_HDMI_MAI_SAMPLE_RATE_8000; + case 11025: + return VC4_HDMI_MAI_SAMPLE_RATE_11025; + case 12000: + return VC4_HDMI_MAI_SAMPLE_RATE_12000; + case 16000: + return VC4_HDMI_MAI_SAMPLE_RATE_16000; + case 22050: + return VC4_HDMI_MAI_SAMPLE_RATE_22050; + case 24000: + return VC4_HDMI_MAI_SAMPLE_RATE_24000; + case 32000: + return VC4_HDMI_MAI_SAMPLE_RATE_32000; + case 44100: + return VC4_HDMI_MAI_SAMPLE_RATE_44100; + case 48000: + return VC4_HDMI_MAI_SAMPLE_RATE_48000; + case 64000: + return VC4_HDMI_MAI_SAMPLE_RATE_64000; + case 88200: + return VC4_HDMI_MAI_SAMPLE_RATE_88200; + case 96000: + return VC4_HDMI_MAI_SAMPLE_RATE_96000; + case 128000: + return VC4_HDMI_MAI_SAMPLE_RATE_128000; + case 176400: + return VC4_HDMI_MAI_SAMPLE_RATE_176400; + case 192000: + return VC4_HDMI_MAI_SAMPLE_RATE_192000; + default: + return VC4_HDMI_MAI_SAMPLE_RATE_NOT_INDICATED; + } } /* HDMI audio codec callbacks */ -static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params, - struct snd_soc_dai *dai) +static int vc4_hdmi_audio_prepare(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) { - struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); + struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; - struct device *dev = &vc4_hdmi->pdev->dev; + unsigned int sample_rate = params->sample_rate; + unsigned int channels = params->channels; u32 audio_packet_config, channel_mask; u32 channel_map; - - if (substream != vc4_hdmi->audio.substream) - return -EINVAL; + u32 mai_audio_format; + u32 mai_sample_rate; dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__, - params_rate(params), params_width(params), - params_channels(params)); - - vc4_hdmi->audio.channels = params_channels(params); - vc4_hdmi->audio.samplerate = params_rate(params); + sample_rate, params->sample_width, channels); HDMI_WRITE(HDMI_MAI_CTL, - VC4_HD_MAI_CTL_RESET | - VC4_HD_MAI_CTL_FLUSH | - VC4_HD_MAI_CTL_DLATE | - VC4_HD_MAI_CTL_ERRORE | - VC4_HD_MAI_CTL_ERRORF); + VC4_SET_FIELD(channels, VC4_HD_MAI_CTL_CHNUM) | + VC4_HD_MAI_CTL_WHOLSMP | + VC4_HD_MAI_CTL_CHALIGN | + VC4_HD_MAI_CTL_ENABLE); + + vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate); - vc4_hdmi_audio_set_mai_clock(vc4_hdmi); + mai_sample_rate = sample_rate_to_mai_fmt(sample_rate); + if (params->iec.status[0] & IEC958_AES0_NONAUDIO && + params->channels == 8) + mai_audio_format = VC4_HDMI_MAI_FORMAT_HBR; + else + mai_audio_format = VC4_HDMI_MAI_FORMAT_PCM; + HDMI_WRITE(HDMI_MAI_FMT, + VC4_SET_FIELD(mai_sample_rate, + VC4_HDMI_MAI_FORMAT_SAMPLE_RATE) | + VC4_SET_FIELD(mai_audio_format, + VC4_HDMI_MAI_FORMAT_AUDIO_FORMAT)); /* The B frame identifier should match the value used by alsa-lib (8) */ audio_packet_config = @@ -1322,122 +1376,33 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream, VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_INACTIVE_CHANNELS | VC4_SET_FIELD(0x8, VC4_HDMI_AUDIO_PACKET_B_FRAME_IDENTIFIER); - channel_mask = GENMASK(vc4_hdmi->audio.channels - 1, 0); + channel_mask = GENMASK(channels - 1, 0); audio_packet_config |= VC4_SET_FIELD(channel_mask, VC4_HDMI_AUDIO_PACKET_CEA_MASK); - /* Set the MAI threshold. This logic mimics the firmware's. */ - if (vc4_hdmi->audio.samplerate > 96000) { - HDMI_WRITE(HDMI_MAI_THR, - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); - } else if (vc4_hdmi->audio.samplerate > 48000) { - HDMI_WRITE(HDMI_MAI_THR, - VC4_SET_FIELD(0x14, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); - } else { - HDMI_WRITE(HDMI_MAI_THR, - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW)); - } + /* Set the MAI threshold */ + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW)); HDMI_WRITE(HDMI_MAI_CONFIG, VC4_HDMI_MAI_CONFIG_BIT_REVERSE | + VC4_HDMI_MAI_CONFIG_FORMAT_REVERSE | VC4_SET_FIELD(channel_mask, VC4_HDMI_MAI_CHANNEL_MASK)); channel_map = vc4_hdmi->variant->channel_map(vc4_hdmi, channel_mask); HDMI_WRITE(HDMI_MAI_CHANNEL_MAP, channel_map); HDMI_WRITE(HDMI_AUDIO_PACKET_CONFIG, audio_packet_config); - vc4_hdmi_set_n_cts(vc4_hdmi); + vc4_hdmi_set_n_cts(vc4_hdmi, sample_rate); + memcpy(&vc4_hdmi->audio.infoframe, ¶ms->cea, sizeof(params->cea)); vc4_hdmi_set_audio_infoframe(encoder); return 0; } -static int vc4_hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd, - struct snd_soc_dai *dai) -{ - struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); - - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - vc4_hdmi->audio.streaming = true; - - if (vc4_hdmi->variant->phy_rng_enable) - vc4_hdmi->variant->phy_rng_enable(vc4_hdmi); - - HDMI_WRITE(HDMI_MAI_CTL, - VC4_SET_FIELD(vc4_hdmi->audio.channels, - VC4_HD_MAI_CTL_CHNUM) | - VC4_HD_MAI_CTL_ENABLE); - break; - case SNDRV_PCM_TRIGGER_STOP: - HDMI_WRITE(HDMI_MAI_CTL, - VC4_HD_MAI_CTL_DLATE | - VC4_HD_MAI_CTL_ERRORE | - VC4_HD_MAI_CTL_ERRORF); - - if (vc4_hdmi->variant->phy_rng_disable) - vc4_hdmi->variant->phy_rng_disable(vc4_hdmi); - - vc4_hdmi->audio.streaming = false; - - break; - default: - break; - } - - return 0; -} - -static inline struct vc4_hdmi * -snd_component_to_hdmi(struct snd_soc_component *component) -{ - struct snd_soc_card *card = snd_soc_component_get_drvdata(component); - - return snd_soc_card_get_drvdata(card); -} - -static int vc4_hdmi_audio_eld_ctl_info(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct vc4_hdmi *vc4_hdmi = snd_component_to_hdmi(component); - struct drm_connector *connector = &vc4_hdmi->connector; - - uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; - uinfo->count = sizeof(connector->eld); - - return 0; -} - -static int vc4_hdmi_audio_eld_ctl_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct vc4_hdmi *vc4_hdmi = snd_component_to_hdmi(component); - struct drm_connector *connector = &vc4_hdmi->connector; - - memcpy(ucontrol->value.bytes.data, connector->eld, - sizeof(connector->eld)); - - return 0; -} - -static const struct snd_kcontrol_new vc4_hdmi_audio_controls[] = { - { - .access = SNDRV_CTL_ELEM_ACCESS_READ | - SNDRV_CTL_ELEM_ACCESS_VOLATILE, - .iface = SNDRV_CTL_ELEM_IFACE_PCM, - .name = "ELD", - .info = vc4_hdmi_audio_eld_ctl_info, - .get = vc4_hdmi_audio_eld_ctl_get, - }, -}; - static const struct snd_soc_dapm_widget vc4_hdmi_audio_widgets[] = { SND_SOC_DAPM_OUTPUT("TX"), }; @@ -1446,42 +1411,6 @@ static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = { { "TX", NULL, "Playback" }, }; -static const struct snd_soc_component_driver vc4_hdmi_audio_component_drv = { - .name = "vc4-hdmi-codec-dai-component", - .controls = vc4_hdmi_audio_controls, - .num_controls = ARRAY_SIZE(vc4_hdmi_audio_controls), - .dapm_widgets = vc4_hdmi_audio_widgets, - .num_dapm_widgets = ARRAY_SIZE(vc4_hdmi_audio_widgets), - .dapm_routes = vc4_hdmi_audio_routes, - .num_dapm_routes = ARRAY_SIZE(vc4_hdmi_audio_routes), - .idle_bias_on = 1, - .use_pmdown_time = 1, - .endianness = 1, - .non_legacy_dai_naming = 1, -}; - -static const struct snd_soc_dai_ops vc4_hdmi_audio_dai_ops = { - .startup = vc4_hdmi_audio_startup, - .shutdown = vc4_hdmi_audio_shutdown, - .hw_params = vc4_hdmi_audio_hw_params, - .set_fmt = vc4_hdmi_audio_set_fmt, - .trigger = vc4_hdmi_audio_trigger, -}; - -static struct snd_soc_dai_driver vc4_hdmi_audio_codec_dai_drv = { - .name = "vc4-hdmi-hifi", - .playback = { - .stream_name = "Playback", - .channels_min = 2, - .channels_max = 8, - .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | - SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | - SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | - SNDRV_PCM_RATE_192000, - .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE, - }, -}; - static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = { .name = "vc4-hdmi-cpu-dai-component", }; @@ -1508,7 +1437,6 @@ static struct snd_soc_dai_driver vc4_hdmi_audio_cpu_dai_drv = { SNDRV_PCM_RATE_192000, .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE, }, - .ops = &vc4_hdmi_audio_dai_ops, }; static const struct snd_dmaengine_pcm_config pcm_conf = { @@ -1516,6 +1444,30 @@ static const struct snd_dmaengine_pcm_config pcm_conf = { .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, }; +static int vc4_hdmi_audio_get_eld(struct device *dev, void *data, + uint8_t *buf, size_t len) +{ + struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); + struct drm_connector *connector = &vc4_hdmi->connector; + + memcpy(buf, connector->eld, min(sizeof(connector->eld), len)); + + return 0; +} + +static const struct hdmi_codec_ops vc4_hdmi_codec_ops = { + .get_eld = vc4_hdmi_audio_get_eld, + .prepare = vc4_hdmi_audio_prepare, + .audio_shutdown = vc4_hdmi_audio_shutdown, + .audio_startup = vc4_hdmi_audio_startup, +}; + +struct hdmi_codec_pdata vc4_hdmi_codec_pdata = { + .ops = &vc4_hdmi_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, +}; + static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) { const struct vc4_hdmi_register *mai_data = @@ -1523,6 +1475,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) struct snd_soc_dai_link *dai_link = &vc4_hdmi->audio.link; struct snd_soc_card *card = &vc4_hdmi->audio.card; struct device *dev = &vc4_hdmi->pdev->dev; + struct platform_device *codec_pdev; const __be32 *addr; int index; int ret; @@ -1569,12 +1522,13 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) return ret; } - /* register component and codec dai */ - ret = devm_snd_soc_register_component(dev, &vc4_hdmi_audio_component_drv, - &vc4_hdmi_audio_codec_dai_drv, 1); - if (ret) { - dev_err(dev, "Could not register component: %d\n", ret); - return ret; + codec_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &vc4_hdmi_codec_pdata, + sizeof(vc4_hdmi_codec_pdata)); + if (IS_ERR(codec_pdev)) { + dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev)); + return PTR_ERR(codec_pdev); } dai_link->cpus = &vc4_hdmi->audio.cpu; @@ -1587,9 +1541,9 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) dai_link->name = "MAI"; dai_link->stream_name = "MAI PCM"; - dai_link->codecs->dai_name = vc4_hdmi_audio_codec_dai_drv.name; + dai_link->codecs->dai_name = "i2s-hifi"; dai_link->cpus->dai_name = dev_name(dev); - dai_link->codecs->name = dev_name(dev); + dai_link->codecs->name = dev_name(&codec_pdev->dev); dai_link->platforms->name = dev_name(dev); card->dai_link = dai_link; @@ -1609,12 +1563,65 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) snd_soc_card_set_drvdata(card, vc4_hdmi); ret = devm_snd_soc_register_card(dev, card); if (ret) - dev_err(dev, "Could not register sound card: %d\n", ret); + dev_err_probe(dev, ret, "Could not register sound card\n"); return ret; } +static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv) +{ + struct vc4_hdmi *vc4_hdmi = priv; + struct drm_device *dev = vc4_hdmi->connector.dev; + + if (dev && dev->registered) + drm_kms_helper_hotplug_event(dev); + + return IRQ_HANDLED; +} + +static int vc4_hdmi_hotplug_init(struct vc4_hdmi *vc4_hdmi) +{ + struct drm_connector *connector = &vc4_hdmi->connector; + struct platform_device *pdev = vc4_hdmi->pdev; + int ret; + + if (vc4_hdmi->variant->external_irq_controller) { + unsigned int hpd_con = platform_get_irq_byname(pdev, "hpd-connected"); + unsigned int hpd_rm = platform_get_irq_byname(pdev, "hpd-removed"); + + ret = request_threaded_irq(hpd_con, + NULL, + vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT, + "vc4 hdmi hpd connected", vc4_hdmi); + if (ret) + return ret; + + ret = request_threaded_irq(hpd_rm, + NULL, + vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT, + "vc4 hdmi hpd disconnected", vc4_hdmi); + if (ret) { + free_irq(hpd_con, vc4_hdmi); + return ret; + } + + connector->polled = DRM_CONNECTOR_POLL_HPD; + } + + return 0; +} + +static void vc4_hdmi_hotplug_exit(struct vc4_hdmi *vc4_hdmi) +{ + struct platform_device *pdev = vc4_hdmi->pdev; + + if (vc4_hdmi->variant->external_irq_controller) { + free_irq(platform_get_irq_byname(pdev, "hpd-connected"), vc4_hdmi); + free_irq(platform_get_irq_byname(pdev, "hpd-removed"), vc4_hdmi); + } +} + #ifdef CONFIG_DRM_VC4_HDMI_CEC static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv) { @@ -2213,10 +2220,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) if (ret) goto err_destroy_encoder; - ret = vc4_hdmi_cec_init(vc4_hdmi); + ret = vc4_hdmi_hotplug_init(vc4_hdmi); if (ret) goto err_destroy_conn; + ret = vc4_hdmi_cec_init(vc4_hdmi); + if (ret) + goto err_free_hotplug; + ret = vc4_hdmi_audio_init(vc4_hdmi); if (ret) goto err_free_cec; @@ -2229,6 +2240,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) err_free_cec: vc4_hdmi_cec_exit(vc4_hdmi); +err_free_hotplug: + vc4_hdmi_hotplug_exit(vc4_hdmi); err_destroy_conn: vc4_hdmi_connector_destroy(&vc4_hdmi->connector); err_destroy_encoder: @@ -2270,6 +2283,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master, kfree(vc4_hdmi->hd_regset.regs); vc4_hdmi_cec_exit(vc4_hdmi); + vc4_hdmi_hotplug_exit(vc4_hdmi); vc4_hdmi_connector_destroy(&vc4_hdmi->connector); drm_encoder_cleanup(&vc4_hdmi->encoder.base.base); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 884d245507a9..33e9f665ab8e 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -111,11 +111,8 @@ struct vc4_hdmi_audio { struct snd_soc_dai_link_component cpu; struct snd_soc_dai_link_component codec; struct snd_soc_dai_link_component platform; - int samplerate; - int channels; struct snd_dmaengine_dai_dma_data dma_data; - struct snd_pcm_substream *substream; - + struct hdmi_audio_infoframe infoframe; bool streaming; }; diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 6a1a9e1d72ce..f0b3e4cf5bce 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -880,7 +880,6 @@ int vc4_kms_load(struct drm_device *dev) /* Set support for vblank irq fast disable, before drm_vblank_init() */ dev->vblank_disable_immediate = true; - dev->irq_enabled = true; ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index be2c32a519b3..489f921ef44d 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -516,6 +516,36 @@ # define VC4_HDMI_AUDIO_PACKET_CEA_MASK_MASK VC4_MASK(7, 0) # define VC4_HDMI_AUDIO_PACKET_CEA_MASK_SHIFT 0 +# define VC4_HDMI_MAI_FORMAT_AUDIO_FORMAT_MASK VC4_MASK(23, 16) +# define VC4_HDMI_MAI_FORMAT_AUDIO_FORMAT_SHIFT 16 + +enum { + VC4_HDMI_MAI_FORMAT_PCM = 2, + VC4_HDMI_MAI_FORMAT_HBR = 200, +}; + +# define VC4_HDMI_MAI_FORMAT_SAMPLE_RATE_MASK VC4_MASK(15, 8) +# define VC4_HDMI_MAI_FORMAT_SAMPLE_RATE_SHIFT 8 + +enum { + VC4_HDMI_MAI_SAMPLE_RATE_NOT_INDICATED = 0, + VC4_HDMI_MAI_SAMPLE_RATE_8000 = 1, + VC4_HDMI_MAI_SAMPLE_RATE_11025 = 2, + VC4_HDMI_MAI_SAMPLE_RATE_12000 = 3, + VC4_HDMI_MAI_SAMPLE_RATE_16000 = 4, + VC4_HDMI_MAI_SAMPLE_RATE_22050 = 5, + VC4_HDMI_MAI_SAMPLE_RATE_24000 = 6, + VC4_HDMI_MAI_SAMPLE_RATE_32000 = 7, + VC4_HDMI_MAI_SAMPLE_RATE_44100 = 8, + VC4_HDMI_MAI_SAMPLE_RATE_48000 = 9, + VC4_HDMI_MAI_SAMPLE_RATE_64000 = 10, + VC4_HDMI_MAI_SAMPLE_RATE_88200 = 11, + VC4_HDMI_MAI_SAMPLE_RATE_96000 = 12, + VC4_HDMI_MAI_SAMPLE_RATE_128000 = 13, + VC4_HDMI_MAI_SAMPLE_RATE_176400 = 14, + VC4_HDMI_MAI_SAMPLE_RATE_192000 = 15, +}; + # define VC4_HDMI_RAM_PACKET_ENABLE BIT(16) /* When set, the CTS_PERIOD counts based on MAI bus sync pulse instead diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index ca77edbc5ea0..ed85a7863256 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -57,7 +57,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd vga ? "virtio-vga" : "virtio-gpu-pci", pname); if (vga) { - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "virtiodrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); if (ret) return ret; } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index d9dbc4f258f3..d4e610a44e12 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -315,7 +315,9 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, uint32_t x, uint32_t y, - uint32_t width, uint32_t height); + uint32_t width, uint32_t height, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, uint32_t scanout_id, uint32_t resource_id, uint32_t width, uint32_t height, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 4e1b17548007..a49fd9480381 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -129,6 +129,40 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, objs, NULL); } +static void virtio_gpu_resource_flush(struct drm_plane *plane, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height) +{ + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_object *bo; + + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); + if (vgfb->fence) { + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); + virtio_gpu_array_lock_resv(objs); + virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, + width, height, objs, vgfb->fence); + virtio_gpu_notify(vgdev); + + dma_fence_wait_timeout(&vgfb->fence->f, true, + msecs_to_jiffies(50)); + dma_fence_put(&vgfb->fence->f); + vgfb->fence = NULL; + } else { + virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, + width, height, NULL, NULL); + virtio_gpu_notify(vgdev); + } +} + static void virtio_gpu_primary_plane_update(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -198,16 +232,15 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, } } - virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, - rect.x1, - rect.y1, - rect.x2 - rect.x1, - rect.y2 - rect.y1); - virtio_gpu_notify(vgdev); + virtio_gpu_resource_flush(plane, + rect.x1, + rect.y1, + rect.x2 - rect.x1, + rect.y2 - rect.y1); } -static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) +static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) { struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; @@ -219,7 +252,10 @@ static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, vgfb = to_virtio_gpu_framebuffer(new_state->fb); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); - if (bo && bo->dumb && (plane->state->fb != new_state->fb)) { + if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) + return 0; + + if (bo->dumb && (plane->state->fb != new_state->fb)) { vgfb->fence = virtio_gpu_fence_alloc(vgdev); if (!vgfb->fence) return -ENOMEM; @@ -228,8 +264,8 @@ static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, return 0; } -static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) +static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct virtio_gpu_framebuffer *vgfb; @@ -321,13 +357,15 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { + .prepare_fb = virtio_gpu_plane_prepare_fb, + .cleanup_fb = virtio_gpu_plane_cleanup_fb, .atomic_check = virtio_gpu_plane_atomic_check, .atomic_update = virtio_gpu_primary_plane_update, }; static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { - .prepare_fb = virtio_gpu_cursor_prepare_fb, - .cleanup_fb = virtio_gpu_cursor_cleanup_fb, + .prepare_fb = virtio_gpu_plane_prepare_fb, + .cleanup_fb = virtio_gpu_plane_cleanup_fb, .atomic_check = virtio_gpu_plane_atomic_check, .atomic_update = virtio_gpu_cursor_plane_update, }; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index cf84d382dd41..2e71e91278b4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -576,13 +576,16 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, uint32_t x, uint32_t y, - uint32_t width, uint32_t height) + uint32_t width, uint32_t height, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_flush *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); cmd_p->resource_id = cpu_to_le32(resource_id); @@ -591,7 +594,7 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, cmd_p->r.x = cpu_to_le32(x); cmd_p->r.y = cpu_to_le32(y); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index e49523866e1d..ead8fff81f30 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -6,7 +6,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_vblank.h> #include "vkms_drv.h" @@ -154,24 +153,21 @@ static void compose_plane(struct vkms_composer *primary_composer, struct vkms_composer *plane_composer, void *vaddr_out) { - struct drm_gem_object *plane_obj; - struct drm_gem_shmem_object *plane_shmem_obj; struct drm_framebuffer *fb = &plane_composer->fb; + void *vaddr; void (*pixel_blend)(const u8 *p_src, u8 *p_dst); - plane_obj = drm_gem_fb_get_obj(&plane_composer->fb, 0); - plane_shmem_obj = to_drm_gem_shmem_obj(plane_obj); - - if (WARN_ON(!plane_shmem_obj->vaddr)) + if (WARN_ON(dma_buf_map_is_null(&primary_composer->map[0]))) return; + vaddr = plane_composer->map[0].vaddr; + if (fb->format->format == DRM_FORMAT_ARGB8888) pixel_blend = &alpha_blend; else pixel_blend = &x_blend; - blend(vaddr_out, plane_shmem_obj->vaddr, primary_composer, - plane_composer, pixel_blend); + blend(vaddr_out, vaddr, primary_composer, plane_composer, pixel_blend); } static int compose_active_planes(void **vaddr_out, @@ -180,21 +176,23 @@ static int compose_active_planes(void **vaddr_out, { struct drm_framebuffer *fb = &primary_composer->fb; struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0); - struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(gem_obj); + const void *vaddr; int i; if (!*vaddr_out) { - *vaddr_out = kzalloc(shmem_obj->base.size, GFP_KERNEL); + *vaddr_out = kzalloc(gem_obj->size, GFP_KERNEL); if (!*vaddr_out) { DRM_ERROR("Cannot allocate memory for output frame."); return -ENOMEM; } } - if (WARN_ON(!shmem_obj->vaddr)) + if (WARN_ON(dma_buf_map_is_null(&primary_composer->map[0]))) return -EINVAL; - memcpy(*vaddr_out, shmem_obj->vaddr, shmem_obj->base.size); + vaddr = primary_composer->map[0].vaddr; + + memcpy(*vaddr_out, vaddr, gem_obj->size); /* If there are other planes besides primary, we consider the active * planes should be in z-order and compose them associatively: @@ -251,7 +249,7 @@ void vkms_composer_worker(struct work_struct *work) if (crtc_state->num_active_planes >= 1) { act_plane = crtc_state->active_planes[0]; - if (act_plane->base.plane->type == DRM_PLANE_TYPE_PRIMARY) + if (act_plane->base.base.plane->type == DRM_PLANE_TYPE_PRIMARY) primary_composer = act_plane->composer; } diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 027ffe759440..0ffe5f0e33f7 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -28,6 +28,9 @@ #include "vkms_drv.h" +#include <drm/drm_print.h> +#include <drm/drm_debugfs.h> + #define DRIVER_NAME "vkms" #define DRIVER_DESC "Virtual Kernel Mode Setting" #define DRIVER_DATE "20180514" @@ -52,7 +55,7 @@ DEFINE_DRM_GEM_FOPS(vkms_driver_fops); static void vkms_release(struct drm_device *dev) { - struct vkms_device *vkms = container_of(dev, struct vkms_device, drm); + struct vkms_device *vkms = drm_device_to_vkms_device(dev); destroy_workqueue(vkms->output.composer_workq); } @@ -86,12 +89,37 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_cleanup_planes(dev, old_state); } +static int vkms_config_show(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); + + seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback); + seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor); + seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay); + + return 0; +} + +static const struct drm_info_list vkms_config_debugfs_list[] = { + { "vkms_config", vkms_config_show, 0 }, +}; + +static void vkms_config_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(vkms_config_debugfs_list, ARRAY_SIZE(vkms_config_debugfs_list), + minor->debugfs_root, minor); +} + static const struct drm_driver vkms_driver = { .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, .release = vkms_release, .fops = &vkms_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, + .debugfs_init = vkms_config_debugfs_init, + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -163,8 +191,6 @@ static int vkms_create(struct vkms_config *config) goto out_devres; } - vkms_device->drm.irq_enabled = true; - ret = drm_vblank_init(&vkms_device->drm, 1); if (ret) { DRM_ERROR("Failed to vblank\n"); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index ac8c9c2fa4ed..8c731b6dcba7 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -7,6 +7,7 @@ #include <drm/drm.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_encoder.h> #include <drm/drm_writeback.h> @@ -22,6 +23,7 @@ struct vkms_composer { struct drm_framebuffer fb; struct drm_rect src, dst; + struct dma_buf_map map[4]; unsigned int offset; unsigned int pitch; unsigned int cpp; @@ -33,7 +35,7 @@ struct vkms_composer { * @composer: data required for composing computation */ struct vkms_plane_state { - struct drm_plane_state base; + struct drm_shadow_plane_state base; struct vkms_composer *composer; }; @@ -111,7 +113,7 @@ struct vkms_device { container_of(target, struct vkms_crtc_state, base) #define to_vkms_plane_state(target)\ - container_of(target, struct vkms_plane_state, base) + container_of(target, struct vkms_plane_state, base.base) /* CRTC */ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 107521ace597..8a56fbf572b0 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -8,7 +8,6 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> -#include <drm/drm_gem_shmem_helper.h> #include "vkms_drv.h" @@ -40,17 +39,16 @@ vkms_plane_duplicate_state(struct drm_plane *plane) vkms_state->composer = composer; - __drm_atomic_helper_plane_duplicate_state(plane, - &vkms_state->base); + __drm_gem_duplicate_shadow_plane_state(plane, &vkms_state->base); - return &vkms_state->base; + return &vkms_state->base.base; } static void vkms_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *old_state) { struct vkms_plane_state *vkms_state = to_vkms_plane_state(old_state); - struct drm_crtc *crtc = vkms_state->base.crtc; + struct drm_crtc *crtc = vkms_state->base.base.crtc; if (crtc) { /* dropping the reference we acquired in @@ -63,7 +61,7 @@ static void vkms_plane_destroy_state(struct drm_plane *plane, kfree(vkms_state->composer); vkms_state->composer = NULL; - __drm_atomic_helper_plane_destroy_state(old_state); + __drm_gem_destroy_shadow_plane_state(&vkms_state->base); kfree(vkms_state); } @@ -71,8 +69,10 @@ static void vkms_plane_reset(struct drm_plane *plane) { struct vkms_plane_state *vkms_state; - if (plane->state) + if (plane->state) { vkms_plane_destroy_state(plane, plane->state); + plane->state = NULL; /* must be set to NULL here */ + } vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL); if (!vkms_state) { @@ -80,8 +80,7 @@ static void vkms_plane_reset(struct drm_plane *plane) return; } - plane->state = &vkms_state->base; - plane->state->plane = plane; + __drm_gem_reset_shadow_plane(plane, &vkms_state->base); } static const struct drm_plane_funcs vkms_plane_funcs = { @@ -98,6 +97,7 @@ static void vkms_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct vkms_plane_state *vkms_plane_state; + struct drm_shadow_plane_state *shadow_plane_state; struct drm_framebuffer *fb = new_state->fb; struct vkms_composer *composer; @@ -105,11 +105,13 @@ static void vkms_plane_atomic_update(struct drm_plane *plane, return; vkms_plane_state = to_vkms_plane_state(new_state); + shadow_plane_state = &vkms_plane_state->base; composer = vkms_plane_state->composer; memcpy(&composer->src, &new_state->src, sizeof(struct drm_rect)); memcpy(&composer->dst, &new_state->dst, sizeof(struct drm_rect)); memcpy(&composer->fb, fb, sizeof(struct drm_framebuffer)); + memcpy(&composer->map, &shadow_plane_state->map, sizeof(composer->map)); drm_framebuffer_get(&composer->fb); composer->offset = fb->offsets[0]; composer->pitch = fb->pitches[0]; @@ -150,45 +152,10 @@ static int vkms_plane_atomic_check(struct drm_plane *plane, return 0; } -static int vkms_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct drm_gem_object *gem_obj; - struct dma_buf_map map; - int ret; - - if (!state->fb) - return 0; - - gem_obj = drm_gem_fb_get_obj(state->fb, 0); - ret = drm_gem_shmem_vmap(gem_obj, &map); - if (ret) - DRM_ERROR("vmap failed: %d\n", ret); - - return drm_gem_plane_helper_prepare_fb(plane, state); -} - -static void vkms_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct drm_gem_object *gem_obj; - struct drm_gem_shmem_object *shmem_obj; - struct dma_buf_map map; - - if (!old_state->fb) - return; - - gem_obj = drm_gem_fb_get_obj(old_state->fb, 0); - shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0)); - dma_buf_map_set_vaddr(&map, shmem_obj->vaddr); - drm_gem_shmem_vunmap(gem_obj, &map); -} - static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { .atomic_update = vkms_plane_atomic_update, .atomic_check = vkms_plane_atomic_check, - .prepare_fb = vkms_prepare_fb, - .cleanup_fb = vkms_cleanup_fb, + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, }; struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 0060ef842b5a..c9ce47c448e0 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -22,3 +22,11 @@ config DRM_VMWGFX_FBCON Choose this option if you are shipping a new vmwgfx userspace driver that supports using the kernel driver. +config DRM_VMWGFX_MKSSTATS + bool "Enable mksGuestStats instrumentation of vmwgfx by default" + depends on DRM_VMWGFX + depends on X86 + default n + help + Choose this option to instrument the kernel driver for mksGuestStats. + diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 09f6dcac768b..bc323f7d4032 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ - ttm_object.o ttm_memory.o + vmwgfx_devcaps.o ttm_object.o ttm_memory.o vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o diff --git a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h deleted file mode 100644 index 8cce7f15b6eb..000000000000 --- a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h +++ /dev/null @@ -1,3 +0,0 @@ -/* - * Intentionally empty file. - */ diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h deleted file mode 100644 index 69c4253fbfbb..000000000000 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h +++ /dev/null @@ -1,111 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/********************************************************** - * Copyright 2007-2015 VMware, Inc. - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, - * modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - **********************************************************/ - -/* - * svga3d_caps.h -- - * - * Definitions for SVGA3D hardware capabilities. Capabilities - * are used to query for optional rendering features during - * driver initialization. The capability data is stored as very - * basic key/value dictionary within the "FIFO register" memory - * area at the beginning of BAR2. - * - * Note that these definitions are only for 3D capabilities. - * The SVGA device also has "device capabilities" and "FIFO - * capabilities", which are non-3D-specific and are stored as - * bitfields rather than key/value pairs. - */ - -#ifndef _SVGA3D_CAPS_H_ -#define _SVGA3D_CAPS_H_ - -#define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_USERLEVEL - -#include "includeCheck.h" - -#include "svga_reg.h" - -#define SVGA_FIFO_3D_CAPS_SIZE (SVGA_FIFO_3D_CAPS_LAST - \ - SVGA_FIFO_3D_CAPS + 1) - - -/* - * SVGA3dCapsRecordType - * - * Record types that can be found in the caps block. - * Related record types are grouped together numerically so that - * SVGA3dCaps_FindRecord() can be applied on a range of record - * types. - */ - -typedef enum { - SVGA3DCAPS_RECORD_UNKNOWN = 0, - SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100, - SVGA3DCAPS_RECORD_DEVCAPS = 0x100, - SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff, -} SVGA3dCapsRecordType; - - -/* - * SVGA3dCapsRecordHeader - * - * Header field leading each caps block record. Contains the offset (in - * register words, NOT bytes) to the next caps block record (or the end - * of caps block records which will be a zero word) and the record type - * as defined above. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCapsRecordHeader { - uint32 length; - SVGA3dCapsRecordType type; -} -#include "vmware_pack_end.h" -SVGA3dCapsRecordHeader; - - -/* - * SVGA3dCapsRecord - * - * Caps block record; "data" is a placeholder for the actual data structure - * contained within the record; - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCapsRecord { - SVGA3dCapsRecordHeader header; - uint32 data[1]; -} -#include "vmware_pack_end.h" -SVGA3dCapsRecord; - - -typedef uint32 SVGA3dCapPair[2]; - -#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h index 799bc0963f7a..945c84b27e81 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 1998-2020 VMware, Inc. + * Copyright 2012-2021 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -27,2249 +27,1487 @@ /* * svga3d_cmd.h -- * - * SVGA 3d hardware cmd definitions + * SVGA 3d hardware cmd definitions */ + + #ifndef _SVGA3D_CMD_H_ #define _SVGA3D_CMD_H_ -#define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_VMCORE - -#include "includeCheck.h" #include "svga3d_types.h" - -/* - * Identifiers for commands in the command FIFO. - * - * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of - * the SVGA3D protocol and remain reserved; they should not be used in the - * future. - * - * IDs between 1040 and 2999 (inclusive) are available for use by the - * current SVGA3D protocol. - * - * FIFO clients other than SVGA3D should stay below 1000, or at 3000 - * and up. - */ - -typedef enum { - SVGA_3D_CMD_LEGACY_BASE = 1000, - SVGA_3D_CMD_BASE = 1040, - - SVGA_3D_CMD_SURFACE_DEFINE = 1040, - SVGA_3D_CMD_SURFACE_DESTROY = 1041, - SVGA_3D_CMD_SURFACE_COPY = 1042, - SVGA_3D_CMD_SURFACE_STRETCHBLT = 1043, - SVGA_3D_CMD_SURFACE_DMA = 1044, - SVGA_3D_CMD_CONTEXT_DEFINE = 1045, - SVGA_3D_CMD_CONTEXT_DESTROY = 1046, - SVGA_3D_CMD_SETTRANSFORM = 1047, - SVGA_3D_CMD_SETZRANGE = 1048, - SVGA_3D_CMD_SETRENDERSTATE = 1049, - SVGA_3D_CMD_SETRENDERTARGET = 1050, - SVGA_3D_CMD_SETTEXTURESTATE = 1051, - SVGA_3D_CMD_SETMATERIAL = 1052, - SVGA_3D_CMD_SETLIGHTDATA = 1053, - SVGA_3D_CMD_SETLIGHTENABLED = 1054, - SVGA_3D_CMD_SETVIEWPORT = 1055, - SVGA_3D_CMD_SETCLIPPLANE = 1056, - SVGA_3D_CMD_CLEAR = 1057, - SVGA_3D_CMD_PRESENT = 1058, - SVGA_3D_CMD_SHADER_DEFINE = 1059, - SVGA_3D_CMD_SHADER_DESTROY = 1060, - SVGA_3D_CMD_SET_SHADER = 1061, - SVGA_3D_CMD_SET_SHADER_CONST = 1062, - SVGA_3D_CMD_DRAW_PRIMITIVES = 1063, - SVGA_3D_CMD_SETSCISSORRECT = 1064, - SVGA_3D_CMD_BEGIN_QUERY = 1065, - SVGA_3D_CMD_END_QUERY = 1066, - SVGA_3D_CMD_WAIT_FOR_QUERY = 1067, - SVGA_3D_CMD_PRESENT_READBACK = 1068, - SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069, - SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070, - SVGA_3D_CMD_GENERATE_MIPMAPS = 1071, - SVGA_3D_CMD_DEAD4 = 1072, - SVGA_3D_CMD_DEAD5 = 1073, - SVGA_3D_CMD_DEAD6 = 1074, - SVGA_3D_CMD_DEAD7 = 1075, - SVGA_3D_CMD_DEAD8 = 1076, - SVGA_3D_CMD_DEAD9 = 1077, - SVGA_3D_CMD_DEAD10 = 1078, - SVGA_3D_CMD_DEAD11 = 1079, - SVGA_3D_CMD_ACTIVATE_SURFACE = 1080, - SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081, - SVGA_3D_CMD_SCREEN_DMA = 1082, - SVGA_3D_CMD_DEAD1 = 1083, - SVGA_3D_CMD_DEAD2 = 1084, - - SVGA_3D_CMD_DEAD12 = 1085, - SVGA_3D_CMD_DEAD13 = 1086, - SVGA_3D_CMD_DEAD14 = 1087, - SVGA_3D_CMD_DEAD15 = 1088, - SVGA_3D_CMD_DEAD16 = 1089, - SVGA_3D_CMD_DEAD17 = 1090, - - SVGA_3D_CMD_SET_OTABLE_BASE = 1091, - SVGA_3D_CMD_READBACK_OTABLE = 1092, - - SVGA_3D_CMD_DEFINE_GB_MOB = 1093, - SVGA_3D_CMD_DESTROY_GB_MOB = 1094, - SVGA_3D_CMD_DEAD3 = 1095, - SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING = 1096, - - SVGA_3D_CMD_DEFINE_GB_SURFACE = 1097, - SVGA_3D_CMD_DESTROY_GB_SURFACE = 1098, - SVGA_3D_CMD_BIND_GB_SURFACE = 1099, - SVGA_3D_CMD_COND_BIND_GB_SURFACE = 1100, - SVGA_3D_CMD_UPDATE_GB_IMAGE = 1101, - SVGA_3D_CMD_UPDATE_GB_SURFACE = 1102, - SVGA_3D_CMD_READBACK_GB_IMAGE = 1103, - SVGA_3D_CMD_READBACK_GB_SURFACE = 1104, - SVGA_3D_CMD_INVALIDATE_GB_IMAGE = 1105, - SVGA_3D_CMD_INVALIDATE_GB_SURFACE = 1106, - - SVGA_3D_CMD_DEFINE_GB_CONTEXT = 1107, - SVGA_3D_CMD_DESTROY_GB_CONTEXT = 1108, - SVGA_3D_CMD_BIND_GB_CONTEXT = 1109, - SVGA_3D_CMD_READBACK_GB_CONTEXT = 1110, - SVGA_3D_CMD_INVALIDATE_GB_CONTEXT = 1111, - - SVGA_3D_CMD_DEFINE_GB_SHADER = 1112, - SVGA_3D_CMD_DESTROY_GB_SHADER = 1113, - SVGA_3D_CMD_BIND_GB_SHADER = 1114, - - SVGA_3D_CMD_SET_OTABLE_BASE64 = 1115, - - SVGA_3D_CMD_BEGIN_GB_QUERY = 1116, - SVGA_3D_CMD_END_GB_QUERY = 1117, - SVGA_3D_CMD_WAIT_FOR_GB_QUERY = 1118, - - SVGA_3D_CMD_NOP = 1119, - - SVGA_3D_CMD_ENABLE_GART = 1120, - SVGA_3D_CMD_DISABLE_GART = 1121, - SVGA_3D_CMD_MAP_MOB_INTO_GART = 1122, - SVGA_3D_CMD_UNMAP_GART_RANGE = 1123, - - SVGA_3D_CMD_DEFINE_GB_SCREENTARGET = 1124, - SVGA_3D_CMD_DESTROY_GB_SCREENTARGET = 1125, - SVGA_3D_CMD_BIND_GB_SCREENTARGET = 1126, - SVGA_3D_CMD_UPDATE_GB_SCREENTARGET = 1127, - - SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL = 1128, - SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL = 1129, - - SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE = 1130, - - SVGA_3D_CMD_GB_SCREEN_DMA = 1131, - SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH = 1132, - SVGA_3D_CMD_GB_MOB_FENCE = 1133, - SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 = 1134, - SVGA_3D_CMD_DEFINE_GB_MOB64 = 1135, - SVGA_3D_CMD_REDEFINE_GB_MOB64 = 1136, - SVGA_3D_CMD_NOP_ERROR = 1137, - - SVGA_3D_CMD_SET_VERTEX_STREAMS = 1138, - SVGA_3D_CMD_SET_VERTEX_DECLS = 1139, - SVGA_3D_CMD_SET_VERTEX_DIVISORS = 1140, - SVGA_3D_CMD_DRAW = 1141, - SVGA_3D_CMD_DRAW_INDEXED = 1142, - - /* - * DX10 Commands - */ - SVGA_3D_CMD_DX_MIN = 1143, - SVGA_3D_CMD_DX_DEFINE_CONTEXT = 1143, - SVGA_3D_CMD_DX_DESTROY_CONTEXT = 1144, - SVGA_3D_CMD_DX_BIND_CONTEXT = 1145, - SVGA_3D_CMD_DX_READBACK_CONTEXT = 1146, - SVGA_3D_CMD_DX_INVALIDATE_CONTEXT = 1147, - SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER = 1148, - SVGA_3D_CMD_DX_SET_SHADER_RESOURCES = 1149, - SVGA_3D_CMD_DX_SET_SHADER = 1150, - SVGA_3D_CMD_DX_SET_SAMPLERS = 1151, - SVGA_3D_CMD_DX_DRAW = 1152, - SVGA_3D_CMD_DX_DRAW_INDEXED = 1153, - SVGA_3D_CMD_DX_DRAW_INSTANCED = 1154, - SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED = 1155, - SVGA_3D_CMD_DX_DRAW_AUTO = 1156, - SVGA_3D_CMD_DX_SET_INPUT_LAYOUT = 1157, - SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS = 1158, - SVGA_3D_CMD_DX_SET_INDEX_BUFFER = 1159, - SVGA_3D_CMD_DX_SET_TOPOLOGY = 1160, - SVGA_3D_CMD_DX_SET_RENDERTARGETS = 1161, - SVGA_3D_CMD_DX_SET_BLEND_STATE = 1162, - SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE = 1163, - SVGA_3D_CMD_DX_SET_RASTERIZER_STATE = 1164, - SVGA_3D_CMD_DX_DEFINE_QUERY = 1165, - SVGA_3D_CMD_DX_DESTROY_QUERY = 1166, - SVGA_3D_CMD_DX_BIND_QUERY = 1167, - SVGA_3D_CMD_DX_SET_QUERY_OFFSET = 1168, - SVGA_3D_CMD_DX_BEGIN_QUERY = 1169, - SVGA_3D_CMD_DX_END_QUERY = 1170, - SVGA_3D_CMD_DX_READBACK_QUERY = 1171, - SVGA_3D_CMD_DX_SET_PREDICATION = 1172, - SVGA_3D_CMD_DX_SET_SOTARGETS = 1173, - SVGA_3D_CMD_DX_SET_VIEWPORTS = 1174, - SVGA_3D_CMD_DX_SET_SCISSORRECTS = 1175, - SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW = 1176, - SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177, - SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178, - SVGA_3D_CMD_DX_PRED_COPY = 1179, - SVGA_3D_CMD_DX_PRESENTBLT = 1180, - SVGA_3D_CMD_DX_GENMIPS = 1181, - SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182, - SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183, - SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE = 1184, - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW = 1185, - SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW = 1186, - SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW = 1187, - SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW = 1188, - SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW = 1189, - SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW = 1190, - SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT = 1191, - SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT = 1192, - SVGA_3D_CMD_DX_DEFINE_BLEND_STATE = 1193, - SVGA_3D_CMD_DX_DESTROY_BLEND_STATE = 1194, - SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE = 1195, - SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE = 1196, - SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE = 1197, - SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE = 1198, - SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE = 1199, - SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE = 1200, - SVGA_3D_CMD_DX_DEFINE_SHADER = 1201, - SVGA_3D_CMD_DX_DESTROY_SHADER = 1202, - SVGA_3D_CMD_DX_BIND_SHADER = 1203, - SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT = 1204, - SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT = 1205, - SVGA_3D_CMD_DX_SET_STREAMOUTPUT = 1206, - SVGA_3D_CMD_DX_SET_COTABLE = 1207, - SVGA_3D_CMD_DX_READBACK_COTABLE = 1208, - SVGA_3D_CMD_DX_BUFFER_COPY = 1209, - SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER = 1210, - SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK = 1211, - SVGA_3D_CMD_DX_MOVE_QUERY = 1212, - SVGA_3D_CMD_DX_BIND_ALL_QUERY = 1213, - SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214, - SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215, - SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216, - SVGA_3D_CMD_DX_BIND_ALL_SHADER = 1217, - SVGA_3D_CMD_DX_HINT = 1218, - SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219, - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220, - SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221, - SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222, - SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET = 1223, - SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET = 1224, - SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET = 1225, - - SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER = 1226, - SVGA_3D_CMD_DX_MAX = 1227, - - SVGA_3D_CMD_SCREEN_COPY = 1227, - - SVGA_3D_CMD_RESERVED1 = 1228, - SVGA_3D_CMD_RESERVED2 = 1229, - SVGA_3D_CMD_RESERVED3 = 1230, - SVGA_3D_CMD_RESERVED4 = 1231, - SVGA_3D_CMD_RESERVED5 = 1232, - SVGA_3D_CMD_RESERVED6 = 1233, - SVGA_3D_CMD_RESERVED7 = 1234, - SVGA_3D_CMD_RESERVED8 = 1235, - - SVGA_3D_CMD_GROW_OTABLE = 1236, - SVGA_3D_CMD_DX_GROW_COTABLE = 1237, - SVGA_3D_CMD_INTRA_SURFACE_COPY = 1238, - - SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 = 1239, - - SVGA_3D_CMD_DX_RESOLVE_COPY = 1240, - SVGA_3D_CMD_DX_PRED_RESOLVE_COPY = 1241, - SVGA_3D_CMD_DX_PRED_CONVERT_REGION = 1242, - SVGA_3D_CMD_DX_PRED_CONVERT = 1243, - SVGA_3D_CMD_WHOLE_SURFACE_COPY = 1244, - - SVGA_3D_CMD_DX_DEFINE_UA_VIEW = 1245, - SVGA_3D_CMD_DX_DESTROY_UA_VIEW = 1246, - SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT = 1247, - SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT = 1248, - SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT = 1249, - SVGA_3D_CMD_DX_SET_UA_VIEWS = 1250, - - SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT = 1251, - SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT = 1252, - SVGA_3D_CMD_DX_DISPATCH = 1253, - SVGA_3D_CMD_DX_DISPATCH_INDIRECT = 1254, - - SVGA_3D_CMD_WRITE_ZERO_SURFACE = 1255, - SVGA_3D_CMD_HINT_ZERO_SURFACE = 1256, - SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER = 1257, - SVGA_3D_CMD_DX_SET_STRUCTURE_COUNT = 1258, - - SVGA_3D_CMD_LOGICOPS_BITBLT = 1259, - SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1260, - SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1261, - SVGA_3D_CMD_LOGICOPS_COLORFILL = 1262, - SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1263, - SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1264, - - SVGA_3D_CMD_RESERVED2_1 = 1265, - - SVGA_3D_CMD_RESERVED2_2 = 1266, - SVGA_3D_CMD_DEFINE_GB_SURFACE_V4 = 1267, - SVGA_3D_CMD_DX_SET_CS_UA_VIEWS = 1268, - SVGA_3D_CMD_DX_SET_MIN_LOD = 1269, - SVGA_3D_CMD_RESERVED2_3 = 1270, - SVGA_3D_CMD_RESERVED2_4 = 1271, - SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2 = 1272, - SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB = 1273, - SVGA_3D_CMD_DX_SET_SHADER_IFACE = 1274, - SVGA_3D_CMD_DX_BIND_STREAMOUTPUT = 1275, - SVGA_3D_CMD_SURFACE_STRETCHBLT_NON_MS_TO_MS = 1276, - SVGA_3D_CMD_DX_BIND_SHADER_IFACE = 1277, - - SVGA_3D_CMD_MAX = 1278, - SVGA_3D_CMD_FUTURE_MAX = 3000 +#include "svga3d_limits.h" +#include "svga_reg.h" + +typedef enum SVGAFifo3dCmdId { + SVGA_3D_CMD_LEGACY_BASE = 1000, + SVGA_3D_CMD_BASE = 1040, + + SVGA_3D_CMD_SURFACE_DEFINE = 1040, + SVGA_3D_CMD_SURFACE_DESTROY = 1041, + SVGA_3D_CMD_SURFACE_COPY = 1042, + SVGA_3D_CMD_SURFACE_STRETCHBLT = 1043, + SVGA_3D_CMD_SURFACE_DMA = 1044, + SVGA_3D_CMD_CONTEXT_DEFINE = 1045, + SVGA_3D_CMD_CONTEXT_DESTROY = 1046, + SVGA_3D_CMD_SETTRANSFORM = 1047, + SVGA_3D_CMD_SETZRANGE = 1048, + SVGA_3D_CMD_SETRENDERSTATE = 1049, + SVGA_3D_CMD_SETRENDERTARGET = 1050, + SVGA_3D_CMD_SETTEXTURESTATE = 1051, + SVGA_3D_CMD_SETMATERIAL = 1052, + SVGA_3D_CMD_SETLIGHTDATA = 1053, + SVGA_3D_CMD_SETLIGHTENABLED = 1054, + SVGA_3D_CMD_SETVIEWPORT = 1055, + SVGA_3D_CMD_SETCLIPPLANE = 1056, + SVGA_3D_CMD_CLEAR = 1057, + SVGA_3D_CMD_PRESENT = 1058, + SVGA_3D_CMD_SHADER_DEFINE = 1059, + SVGA_3D_CMD_SHADER_DESTROY = 1060, + SVGA_3D_CMD_SET_SHADER = 1061, + SVGA_3D_CMD_SET_SHADER_CONST = 1062, + SVGA_3D_CMD_DRAW_PRIMITIVES = 1063, + SVGA_3D_CMD_SETSCISSORRECT = 1064, + SVGA_3D_CMD_BEGIN_QUERY = 1065, + SVGA_3D_CMD_END_QUERY = 1066, + SVGA_3D_CMD_WAIT_FOR_QUERY = 1067, + SVGA_3D_CMD_PRESENT_READBACK = 1068, + SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069, + SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070, + SVGA_3D_CMD_GENERATE_MIPMAPS = 1071, + SVGA_3D_CMD_DEAD4 = 1072, + SVGA_3D_CMD_DEAD5 = 1073, + SVGA_3D_CMD_DEAD6 = 1074, + SVGA_3D_CMD_DEAD7 = 1075, + SVGA_3D_CMD_DEAD8 = 1076, + SVGA_3D_CMD_DEAD9 = 1077, + SVGA_3D_CMD_DEAD10 = 1078, + SVGA_3D_CMD_DEAD11 = 1079, + SVGA_3D_CMD_ACTIVATE_SURFACE = 1080, + SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081, + SVGA_3D_CMD_SCREEN_DMA = 1082, + SVGA_3D_CMD_DEAD1 = 1083, + SVGA_3D_CMD_DEAD2 = 1084, + + SVGA_3D_CMD_DEAD12 = 1085, + SVGA_3D_CMD_DEAD13 = 1086, + SVGA_3D_CMD_DEAD14 = 1087, + SVGA_3D_CMD_DEAD15 = 1088, + SVGA_3D_CMD_DEAD16 = 1089, + SVGA_3D_CMD_DEAD17 = 1090, + + SVGA_3D_CMD_SET_OTABLE_BASE = 1091, + SVGA_3D_CMD_READBACK_OTABLE = 1092, + + SVGA_3D_CMD_DEFINE_GB_MOB = 1093, + SVGA_3D_CMD_DESTROY_GB_MOB = 1094, + SVGA_3D_CMD_DEAD3 = 1095, + SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING = 1096, + + SVGA_3D_CMD_DEFINE_GB_SURFACE = 1097, + SVGA_3D_CMD_DESTROY_GB_SURFACE = 1098, + SVGA_3D_CMD_BIND_GB_SURFACE = 1099, + SVGA_3D_CMD_COND_BIND_GB_SURFACE = 1100, + SVGA_3D_CMD_UPDATE_GB_IMAGE = 1101, + SVGA_3D_CMD_UPDATE_GB_SURFACE = 1102, + SVGA_3D_CMD_READBACK_GB_IMAGE = 1103, + SVGA_3D_CMD_READBACK_GB_SURFACE = 1104, + SVGA_3D_CMD_INVALIDATE_GB_IMAGE = 1105, + SVGA_3D_CMD_INVALIDATE_GB_SURFACE = 1106, + + SVGA_3D_CMD_DEFINE_GB_CONTEXT = 1107, + SVGA_3D_CMD_DESTROY_GB_CONTEXT = 1108, + SVGA_3D_CMD_BIND_GB_CONTEXT = 1109, + SVGA_3D_CMD_READBACK_GB_CONTEXT = 1110, + SVGA_3D_CMD_INVALIDATE_GB_CONTEXT = 1111, + + SVGA_3D_CMD_DEFINE_GB_SHADER = 1112, + SVGA_3D_CMD_DESTROY_GB_SHADER = 1113, + SVGA_3D_CMD_BIND_GB_SHADER = 1114, + + SVGA_3D_CMD_SET_OTABLE_BASE64 = 1115, + + SVGA_3D_CMD_BEGIN_GB_QUERY = 1116, + SVGA_3D_CMD_END_GB_QUERY = 1117, + SVGA_3D_CMD_WAIT_FOR_GB_QUERY = 1118, + + SVGA_3D_CMD_NOP = 1119, + + SVGA_3D_CMD_ENABLE_GART = 1120, + SVGA_3D_CMD_DISABLE_GART = 1121, + SVGA_3D_CMD_MAP_MOB_INTO_GART = 1122, + SVGA_3D_CMD_UNMAP_GART_RANGE = 1123, + + SVGA_3D_CMD_DEFINE_GB_SCREENTARGET = 1124, + SVGA_3D_CMD_DESTROY_GB_SCREENTARGET = 1125, + SVGA_3D_CMD_BIND_GB_SCREENTARGET = 1126, + SVGA_3D_CMD_UPDATE_GB_SCREENTARGET = 1127, + + SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL = 1128, + SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL = 1129, + + SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE = 1130, + + SVGA_3D_CMD_GB_SCREEN_DMA = 1131, + SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH = 1132, + SVGA_3D_CMD_GB_MOB_FENCE = 1133, + SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 = 1134, + SVGA_3D_CMD_DEFINE_GB_MOB64 = 1135, + SVGA_3D_CMD_REDEFINE_GB_MOB64 = 1136, + SVGA_3D_CMD_NOP_ERROR = 1137, + + SVGA_3D_CMD_SET_VERTEX_STREAMS = 1138, + SVGA_3D_CMD_SET_VERTEX_DECLS = 1139, + SVGA_3D_CMD_SET_VERTEX_DIVISORS = 1140, + SVGA_3D_CMD_DRAW = 1141, + SVGA_3D_CMD_DRAW_INDEXED = 1142, + + SVGA_3D_CMD_DX_MIN = 1143, + SVGA_3D_CMD_DX_DEFINE_CONTEXT = 1143, + SVGA_3D_CMD_DX_DESTROY_CONTEXT = 1144, + SVGA_3D_CMD_DX_BIND_CONTEXT = 1145, + SVGA_3D_CMD_DX_READBACK_CONTEXT = 1146, + SVGA_3D_CMD_DX_INVALIDATE_CONTEXT = 1147, + SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER = 1148, + SVGA_3D_CMD_DX_SET_SHADER_RESOURCES = 1149, + SVGA_3D_CMD_DX_SET_SHADER = 1150, + SVGA_3D_CMD_DX_SET_SAMPLERS = 1151, + SVGA_3D_CMD_DX_DRAW = 1152, + SVGA_3D_CMD_DX_DRAW_INDEXED = 1153, + SVGA_3D_CMD_DX_DRAW_INSTANCED = 1154, + SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED = 1155, + SVGA_3D_CMD_DX_DRAW_AUTO = 1156, + SVGA_3D_CMD_DX_SET_INPUT_LAYOUT = 1157, + SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS = 1158, + SVGA_3D_CMD_DX_SET_INDEX_BUFFER = 1159, + SVGA_3D_CMD_DX_SET_TOPOLOGY = 1160, + SVGA_3D_CMD_DX_SET_RENDERTARGETS = 1161, + SVGA_3D_CMD_DX_SET_BLEND_STATE = 1162, + SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE = 1163, + SVGA_3D_CMD_DX_SET_RASTERIZER_STATE = 1164, + SVGA_3D_CMD_DX_DEFINE_QUERY = 1165, + SVGA_3D_CMD_DX_DESTROY_QUERY = 1166, + SVGA_3D_CMD_DX_BIND_QUERY = 1167, + SVGA_3D_CMD_DX_SET_QUERY_OFFSET = 1168, + SVGA_3D_CMD_DX_BEGIN_QUERY = 1169, + SVGA_3D_CMD_DX_END_QUERY = 1170, + SVGA_3D_CMD_DX_READBACK_QUERY = 1171, + SVGA_3D_CMD_DX_SET_PREDICATION = 1172, + SVGA_3D_CMD_DX_SET_SOTARGETS = 1173, + SVGA_3D_CMD_DX_SET_VIEWPORTS = 1174, + SVGA_3D_CMD_DX_SET_SCISSORRECTS = 1175, + SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW = 1176, + SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177, + SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178, + SVGA_3D_CMD_DX_PRED_COPY = 1179, + SVGA_3D_CMD_DX_PRESENTBLT = 1180, + SVGA_3D_CMD_DX_GENMIPS = 1181, + SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182, + SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183, + SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE = 1184, + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW = 1185, + SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW = 1186, + SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW = 1187, + SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW = 1188, + SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW = 1189, + SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW = 1190, + SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT = 1191, + SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT = 1192, + SVGA_3D_CMD_DX_DEFINE_BLEND_STATE = 1193, + SVGA_3D_CMD_DX_DESTROY_BLEND_STATE = 1194, + SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE = 1195, + SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE = 1196, + SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE = 1197, + SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE = 1198, + SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE = 1199, + SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE = 1200, + SVGA_3D_CMD_DX_DEFINE_SHADER = 1201, + SVGA_3D_CMD_DX_DESTROY_SHADER = 1202, + SVGA_3D_CMD_DX_BIND_SHADER = 1203, + SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT = 1204, + SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT = 1205, + SVGA_3D_CMD_DX_SET_STREAMOUTPUT = 1206, + SVGA_3D_CMD_DX_SET_COTABLE = 1207, + SVGA_3D_CMD_DX_READBACK_COTABLE = 1208, + SVGA_3D_CMD_DX_BUFFER_COPY = 1209, + SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER = 1210, + SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK = 1211, + SVGA_3D_CMD_DX_MOVE_QUERY = 1212, + SVGA_3D_CMD_DX_BIND_ALL_QUERY = 1213, + SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214, + SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215, + SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216, + SVGA_3D_CMD_DX_BIND_ALL_SHADER = 1217, + SVGA_3D_CMD_DX_HINT = 1218, + SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219, + SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220, + SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221, + SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222, + SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET = 1223, + SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET = 1224, + SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET = 1225, + + SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER = 1226, + SVGA_3D_CMD_DX_MAX = 1227, + + SVGA_3D_CMD_SCREEN_COPY = 1227, + + SVGA_3D_CMD_RESERVED1 = 1228, + SVGA_3D_CMD_RESERVED2 = 1229, + SVGA_3D_CMD_RESERVED3 = 1230, + SVGA_3D_CMD_RESERVED4 = 1231, + SVGA_3D_CMD_RESERVED5 = 1232, + SVGA_3D_CMD_RESERVED6 = 1233, + SVGA_3D_CMD_RESERVED7 = 1234, + SVGA_3D_CMD_RESERVED8 = 1235, + + SVGA_3D_CMD_GROW_OTABLE = 1236, + SVGA_3D_CMD_DX_GROW_COTABLE = 1237, + SVGA_3D_CMD_INTRA_SURFACE_COPY = 1238, + + SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 = 1239, + + SVGA_3D_CMD_DX_RESOLVE_COPY = 1240, + SVGA_3D_CMD_DX_PRED_RESOLVE_COPY = 1241, + SVGA_3D_CMD_DX_PRED_CONVERT_REGION = 1242, + SVGA_3D_CMD_DX_PRED_CONVERT = 1243, + SVGA_3D_CMD_WHOLE_SURFACE_COPY = 1244, + + SVGA_3D_CMD_DX_DEFINE_UA_VIEW = 1245, + SVGA_3D_CMD_DX_DESTROY_UA_VIEW = 1246, + SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT = 1247, + SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT = 1248, + SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT = 1249, + SVGA_3D_CMD_DX_SET_UA_VIEWS = 1250, + + SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT = 1251, + SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT = 1252, + SVGA_3D_CMD_DX_DISPATCH = 1253, + SVGA_3D_CMD_DX_DISPATCH_INDIRECT = 1254, + + SVGA_3D_CMD_WRITE_ZERO_SURFACE = 1255, + SVGA_3D_CMD_UPDATE_ZERO_SURFACE = 1256, + SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER = 1257, + SVGA_3D_CMD_DX_SET_STRUCTURE_COUNT = 1258, + + SVGA_3D_CMD_LOGICOPS_BITBLT = 1259, + SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1260, + SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1261, + SVGA_3D_CMD_LOGICOPS_COLORFILL = 1262, + SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1263, + SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1264, + + SVGA_3D_CMD_DX_COPY_COTABLE_INTO_MOB = 1265, + + SVGA_3D_CMD_UPDATE_GB_SCREENTARGET_V2 = 1266, + + SVGA_3D_CMD_DEFINE_GB_SURFACE_V4 = 1267, + SVGA_3D_CMD_DX_SET_CS_UA_VIEWS = 1268, + SVGA_3D_CMD_DX_SET_MIN_LOD = 1269, + + SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2 = 1272, + SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB = 1273, + SVGA_3D_CMD_DX_SET_SHADER_IFACE = 1274, + SVGA_3D_CMD_DX_BIND_STREAMOUTPUT = 1275, + SVGA_3D_CMD_SURFACE_STRETCHBLT_NON_MS_TO_MS = 1276, + SVGA_3D_CMD_DX_BIND_SHADER_IFACE = 1277, + + SVGA_3D_CMD_UPDATE_GB_SCREENTARGET_MOVE = 1278, + + SVGA_3D_CMD_DX_PRED_STAGING_COPY = 1281, + SVGA_3D_CMD_DX_STAGING_COPY = 1282, + SVGA_3D_CMD_DX_PRED_STAGING_COPY_REGION = 1283, + SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS_V2 = 1284, + SVGA_3D_CMD_DX_SET_INDEX_BUFFER_V2 = 1285, + SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS_OFFSET_AND_SIZE = 1286, + SVGA_3D_CMD_DX_SET_INDEX_BUFFER_OFFSET_AND_SIZE = 1287, + SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2 = 1288, + SVGA_3D_CMD_DX_PRED_STAGING_CONVERT_REGION = 1289, + SVGA_3D_CMD_DX_PRED_STAGING_CONVERT = 1290, + SVGA_3D_CMD_DX_STAGING_BUFFER_COPY = 1291, + + SVGA_3D_CMD_MAX = 1303, + SVGA_3D_CMD_FUTURE_MAX = 3000 } SVGAFifo3dCmdId; #define SVGA_NUM_3D_CMD (SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE) -/* - * FIFO command format definitions: - */ - -/* - * The data size header following cmdNum for every 3d command - */ -typedef -#include "vmware_pack_begin.h" -struct { - uint32 id; - uint32 size; -} -#include "vmware_pack_end.h" -SVGA3dCmdHeader; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 numMipLevels; -} -#include "vmware_pack_end.h" -SVGA3dSurfaceFace; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; - SVGA3dSurface1Flags surfaceFlags; - SVGA3dSurfaceFormat format; - - /* - * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace - * structures must have the same value of numMipLevels field. - * Otherwise, all but the first SVGA3dSurfaceFace structures must have the - * numMipLevels set to 0. - */ - SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; - - /* - * Followed by an SVGA3dSize structure for each mip level in each face. - * - * A note on surface sizes: Sizes are always specified in pixels, - * even if the true surface size is not a multiple of the minimum - * block size of the surface's format. For example, a 3x3x1 DXT1 - * compressed texture would actually be stored as a 4x4x1 image in - * memory. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; - SVGA3dSurface1Flags surfaceFlags; - SVGA3dSurfaceFormat format; - - /* - * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace - * structures must have the same value of numMipLevels field. - * Otherwise, all but the first SVGA3dSurfaceFace structures must have the - * numMipLevels set to 0. - */ - SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; - uint32 multisampleCount; - SVGA3dTextureFilter autogenFilter; - - /* - * Followed by an SVGA3dSize structure for each mip level in each face. - * - * A note on surface sizes: Sizes are always specified in pixels, - * even if the true surface size is not a multiple of the minimum - * block size of the surface's format. For example, a 3x3x1 DXT1 - * compressed texture would actually be stored as a 4x4x1 image in - * memory. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dClearFlag clearFlag; - uint32 color; - float depth; - uint32 stencil; - /* Followed by variable number of SVGA3dRect structures */ -} -#include "vmware_pack_end.h" -SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dLightType type; - SVGA3dBool inWorldSpace; - float diffuse[4]; - float specular[4]; - float ambient[4]; - float position[4]; - float direction[4]; - float range; - float falloff; - float attenuation0; - float attenuation1; - float attenuation2; - float theta; - float phi; -} -#include "vmware_pack_end.h" -SVGA3dLightData; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; - /* Followed by variable number of SVGA3dCopyRect structures */ -} -#include "vmware_pack_end.h" -SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dRenderStateName state; - union { - uint32 uintValue; - float floatValue; - }; -} -#include "vmware_pack_end.h" -SVGA3dRenderState; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - /* Followed by variable number of SVGA3dRenderState structures */ -} -#include "vmware_pack_end.h" -SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dRenderTargetType type; - SVGA3dSurfaceImageId target; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceImageId src; - SVGA3dSurfaceImageId dest; - /* Followed by variable number of SVGA3dCopyBox structures */ -} -#include "vmware_pack_end.h" -SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */ +#pragma pack(push, 1) +typedef struct { + uint32 id; + uint32 size; +} SVGA3dCmdHeader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 numMipLevels; +} SVGA3dSurfaceFace; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; + SVGA3dSurface1Flags surfaceFlags; + SVGA3dSurfaceFormat format; + + SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; + +} SVGA3dCmdDefineSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; + SVGA3dSurface1Flags surfaceFlags; + SVGA3dSurfaceFormat format; + + SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + +} SVGA3dCmdDefineSurface_v2; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; +} SVGA3dCmdDestroySurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; +} SVGA3dCmdDefineContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; +} SVGA3dCmdDestroyContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dClearFlag clearFlag; + uint32 color; + float depth; + uint32 stencil; + +} SVGA3dCmdClear; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dLightType type; + SVGA3dBool inWorldSpace; + float diffuse[4]; + float specular[4]; + float ambient[4]; + float position[4]; + float direction[4]; + float range; + float falloff; + float attenuation0; + float attenuation1; + float attenuation2; + float theta; + float phi; +} SVGA3dLightData; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; + +} SVGA3dCmdPresent; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dRenderStateName state; + union { + uint32 uintValue; + float floatValue; + }; +} SVGA3dRenderState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + +} SVGA3dCmdSetRenderState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dRenderTargetType type; + SVGA3dSurfaceImageId target; +} SVGA3dCmdSetRenderTarget; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dest; + +} SVGA3dCmdSurfaceCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceImageId surface; + SVGA3dCopyBox box; +} SVGA3dCmdIntraSurfaceCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 srcSid; + uint32 destSid; +} SVGA3dCmdWholeSurfaceCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dest; + SVGA3dBox boxSrc; + SVGA3dBox boxDest; +} SVGA3dCmdSurfaceStretchBltNonMSToMS; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dest; + SVGA3dBox boxSrc; + SVGA3dBox boxDest; + SVGA3dStretchBltMode mode; +} SVGA3dCmdSurfaceStretchBlt; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 discard : 1; + + uint32 unsynchronized : 1; + + uint32 reserved : 30; +} SVGA3dSurfaceDMAFlags; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAGuestImage guest; + SVGA3dSurfaceImageId host; + SVGA3dTransferType transfer; + +} SVGA3dCmdSurfaceDMA; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 suffixSize; + + uint32 maximumOffset; + + SVGA3dSurfaceDMAFlags flags; +} SVGA3dCmdSurfaceDMASuffix; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 first; + uint32 last; +} SVGA3dArrayRangeHint; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 surfaceId; + uint32 offset; + uint32 stride; +} SVGA3dArray; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dDeclType type; + SVGA3dDeclMethod method; + SVGA3dDeclUsage usage; + uint32 usageIndex; +} SVGA3dVertexArrayIdentity; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dVertexDecl { + SVGA3dVertexArrayIdentity identity; + SVGA3dArray array; + SVGA3dArrayRangeHint rangeHint; +} SVGA3dVertexDecl; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dPrimitiveRange { + SVGA3dPrimitiveType primType; + uint32 primitiveCount; + + SVGA3dArray indexArray; + uint32 indexWidth; + + int32 indexBias; +} SVGA3dPrimitiveRange; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + uint32 numVertexDecls; + uint32 numRanges; + +} SVGA3dCmdDrawPrimitives; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + + uint32 primitiveCount; + uint32 startVertexLocation; + + uint8 primitiveType; + uint8 padding[3]; +} SVGA3dCmdDraw; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + + uint8 primitiveType; + + uint32 indexBufferSid; + uint32 indexBufferOffset; + + uint8 indexBufferStride; + + int32 baseVertexLocation; + + uint32 primitiveCount; + uint32 pad0; + uint16 pad1; +} SVGA3dCmdDrawIndexed; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint16 streamOffset; + uint8 stream; + uint8 type; + uint8 method; + uint8 usage; + uint8 usageIndex; + uint8 padding; + +} SVGA3dVertexElement; +#pragma pack(pop) -/* - * Perform a surface copy within the same image. - * The src/dest boxes are allowed to overlap. - */ -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceImageId surface; - SVGA3dCopyBox box; -} -#include "vmware_pack_end.h" -SVGA3dCmdIntraSurfaceCopy; /* SVGA_3D_CMD_INTRA_SURFACE_COPY */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 srcSid; - uint32 destSid; -} -#include "vmware_pack_end.h" -SVGA3dCmdWholeSurfaceCopy; /* SVGA_3D_CMD_WHOLE_SURFACE_COPY */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceImageId src; - SVGA3dSurfaceImageId dest; - SVGA3dBox boxSrc; - SVGA3dBox boxDest; -} -#include "vmware_pack_end.h" -SVGA3dCmdSurfaceStretchBltNonMSToMS; -/* SVGA_3D_CMD_SURFACE_STRETCHBLT_NON_MS_TO_MS */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceImageId src; - SVGA3dSurfaceImageId dest; - SVGA3dBox boxSrc; - SVGA3dBox boxDest; - SVGA3dStretchBltMode mode; -} -#include "vmware_pack_end.h" -SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */ - -typedef -#include "vmware_pack_begin.h" -struct { - /* - * If the discard flag is present in a surface DMA operation, the host may - * discard the contents of the current mipmap level and face of the target - * surface before applying the surface DMA contents. - */ - uint32 discard : 1; - - /* - * If the unsynchronized flag is present, the host may perform this upload - * without syncing to pending reads on this surface. - */ - uint32 unsynchronized : 1; - - /* - * Guests *MUST* set the reserved bits to 0 before submitting the command - * suffix as future flags may occupy these bits. - */ - uint32 reserved : 30; -} -#include "vmware_pack_end.h" -SVGA3dSurfaceDMAFlags; - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAGuestImage guest; - SVGA3dSurfaceImageId host; - SVGA3dTransferType transfer; - - /* - * Followed by variable number of SVGA3dCopyBox structures. For consistency - * in all clipping logic and coordinate translation, we define the - * "source" in each copyBox as the guest image and the - * "destination" as the host image, regardless of transfer - * direction. - * - * For efficiency, the SVGA3D device is free to copy more data than - * specified. For example, it may round copy boxes outwards such - * that they lie on particular alignment boundaries. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */ - -/* - * SVGA3dCmdSurfaceDMASuffix -- - * - * This is a command suffix that will appear after a SurfaceDMA command in - * the FIFO. It contains some extra information that hosts may use to - * optimize performance or protect the guest. This suffix exists to preserve - * backwards compatibility while also allowing for new functionality to be - * implemented. - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 suffixSize; - - /* - * The maximum offset is used to determine the maximum offset from the - * guestPtr base address that will be accessed or written to during this - * surfaceDMA. If the suffix is supported, the host will respect this - * boundary while performing surface DMAs. - * - * Defaults to MAX_UINT32 - */ - uint32 maximumOffset; - - /* - * A set of flags that describes optimizations that the host may perform - * while performing this surface DMA operation. The guest should never rely - * on behaviour that is different when these flags are set for correctness. - * - * Defaults to 0 - */ - SVGA3dSurfaceDMAFlags flags; -} -#include "vmware_pack_end.h" -SVGA3dCmdSurfaceDMASuffix; - -/* - * SVGA_3D_CMD_DRAW_PRIMITIVES -- - * - * This command is the SVGA3D device's generic drawing entry point. - * It can draw multiple ranges of primitives, optionally using an - * index buffer, using an arbitrary collection of vertex buffers. - * - * Each SVGA3dVertexDecl defines a distinct vertex array to bind - * during this draw call. The declarations specify which surface - * the vertex data lives in, what that vertex data is used for, - * and how to interpret it. - * - * Each SVGA3dPrimitiveRange defines a collection of primitives - * to render using the same vertex arrays. An index buffer is - * optional. - */ - -typedef -#include "vmware_pack_begin.h" -struct { - /* - * A range hint is an optional specification for the range of indices - * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed - * that the entire array will be used. - * - * These are only hints. The SVGA3D device may use them for - * performance optimization if possible, but it's also allowed to - * ignore these values. - */ - uint32 first; - uint32 last; -} -#include "vmware_pack_end.h" -SVGA3dArrayRangeHint; - -typedef -#include "vmware_pack_begin.h" -struct { - /* - * Define the origin and shape of a vertex or index array. Both - * 'offset' and 'stride' are in bytes. The provided surface will be - * reinterpreted as a flat array of bytes in the same format used - * by surface DMA operations. To avoid unnecessary conversions, the - * surface should be created with the SVGA3D_BUFFER format. - * - * Index 0 in the array starts 'offset' bytes into the surface. - * Index 1 begins at byte 'offset + stride', etc. Array indices may - * not be negative. - */ - uint32 surfaceId; - uint32 offset; - uint32 stride; -} -#include "vmware_pack_end.h" -SVGA3dArray; - -typedef -#include "vmware_pack_begin.h" -struct { - /* - * Describe a vertex array's data type, and define how it is to be - * used by the fixed function pipeline or the vertex shader. It - * isn't useful to have two VertexDecls with the same - * VertexArrayIdentity in one draw call. - */ - SVGA3dDeclType type; - SVGA3dDeclMethod method; - SVGA3dDeclUsage usage; - uint32 usageIndex; -} -#include "vmware_pack_end.h" -SVGA3dVertexArrayIdentity; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dVertexDecl { - SVGA3dVertexArrayIdentity identity; - SVGA3dArray array; - SVGA3dArrayRangeHint rangeHint; -} -#include "vmware_pack_end.h" -SVGA3dVertexDecl; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dPrimitiveRange { - /* - * Define a group of primitives to render, from sequential indices. - * - * The value of 'primitiveType' and 'primitiveCount' imply the - * total number of vertices that will be rendered. - */ - SVGA3dPrimitiveType primType; - uint32 primitiveCount; - - /* - * Optional index buffer. If indexArray.surfaceId is - * SVGA3D_INVALID_ID, we render without an index buffer. Rendering - * without an index buffer is identical to rendering with an index - * buffer containing the sequence [0, 1, 2, 3, ...]. - * - * If an index buffer is in use, indexWidth specifies the width in - * bytes of each index value. It must be less than or equal to - * indexArray.stride. - * - * (Currently, the SVGA3D device requires index buffers to be tightly - * packed. In other words, indexWidth == indexArray.stride) - */ - SVGA3dArray indexArray; - uint32 indexWidth; - - /* - * Optional index bias. This number is added to all indices from - * indexArray before they are used as vertex array indices. This - * can be used in multiple ways: - * - * - When not using an indexArray, this bias can be used to - * specify where in the vertex arrays to begin rendering. - * - * - A positive number here is equivalent to increasing the - * offset in each vertex array. - * - * - A negative number can be used to render using a small - * vertex array and an index buffer that contains large - * values. This may be used by some applications that - * crop a vertex buffer without modifying their index - * buffer. - * - * Note that rendering with a negative bias value may be slower and - * use more memory than rendering with a positive or zero bias. - */ - int32 indexBias; -} -#include "vmware_pack_end.h" -SVGA3dPrimitiveRange; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - uint32 numVertexDecls; - uint32 numRanges; - - /* - * There are two variable size arrays after the - * SVGA3dCmdDrawPrimitives structure. In order, - * they are: - * - * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than - * SVGA3D_MAX_VERTEX_ARRAYS; - * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than - * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES; - * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains - * the frequency divisor for the corresponding vertex decl). - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - - uint32 primitiveCount; /* How many primitives to render */ - uint32 startVertexLocation; /* Which vertex do we start rendering at. */ - - uint8 primitiveType; /* SVGA3dPrimitiveType */ - uint8 padding[3]; -} -#include "vmware_pack_end.h" -SVGA3dCmdDraw; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - - uint8 primitiveType; /* SVGA3dPrimitiveType */ - - uint32 indexBufferSid; /* Valid index buffer sid. */ - uint32 indexBufferOffset; /* Byte offset into the vertex buffer, almost */ - /* always 0 for pre SM guests, non-zero for OpenGL */ - /* guests. We can't represent non-multiple of */ - /* stride offsets in D3D9Renderer... */ - uint8 indexBufferStride; /* Allowable values = 1, 2, or 4 */ - - int32 baseVertexLocation; /* Bias applied to the index when selecting a */ - /* vertex from the streams, may be negative */ - - uint32 primitiveCount; /* How many primitives to render */ - uint32 pad0; - uint16 pad1; -} -#include "vmware_pack_end.h" -SVGA3dCmdDrawIndexed; - -typedef -#include "vmware_pack_begin.h" -struct { - /* - * Describe a vertex array's data type, and define how it is to be - * used by the fixed function pipeline or the vertex shader. It - * isn't useful to have two VertexDecls with the same - * VertexArrayIdentity in one draw call. - */ - uint16 streamOffset; - uint8 stream; - uint8 type; /* SVGA3dDeclType */ - uint8 method; /* SVGA3dDeclMethod */ - uint8 usage; /* SVGA3dDeclUsage */ - uint8 usageIndex; - uint8 padding; - -} -#include "vmware_pack_end.h" -SVGA3dVertexElement; - -/* - * Should the vertex element respect the stream value? The high bit of the - * stream should be set to indicate that the stream should be respected. If - * the high bit is not set, the stream will be ignored and replaced by the index - * of the position of the currently considered vertex element. - * - * All guests should set this bit and correctly specify the stream going - * forward. - */ #define SVGA3D_VERTEX_ELEMENT_RESPECT_STREAM (1 << 7) -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - - uint32 numElements; - - /* - * Followed by numElements SVGA3dVertexElement structures. - * - * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements - * are cleared and will not be used by following draws. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdSetVertexDecls; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; - uint32 stride; - uint32 offset; -} -#include "vmware_pack_end.h" -SVGA3dVertexStream; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - - uint32 numStreams; - /* - * Followed by numStream SVGA3dVertexStream structures. - * - * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams - * are cleared and will not be used by following draws. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdSetVertexStreams; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - uint32 numDivisors; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetVertexDivisors; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 stage; - SVGA3dTextureStateName name; - union { - uint32 value; - float floatValue; - }; -} -#include "vmware_pack_end.h" -SVGA3dTextureState; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - /* Followed by variable number of SVGA3dTextureState structures */ -} -#include "vmware_pack_end.h" -SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dTransformType type; - float matrix[16]; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */ - -typedef -#include "vmware_pack_begin.h" -struct { - float min; - float max; -} -#include "vmware_pack_end.h" -SVGA3dZRange; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dZRange zRange; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */ - -typedef -#include "vmware_pack_begin.h" -struct { - float diffuse[4]; - float ambient[4]; - float specular[4]; - float emissive[4]; - float shininess; -} -#include "vmware_pack_end.h" -SVGA3dMaterial; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dFace face; - SVGA3dMaterial material; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - uint32 index; - SVGA3dLightData data; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - uint32 index; - uint32 enabled; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dRect rect; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dRect rect; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - uint32 index; - float plane[4]; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - uint32 shid; - SVGA3dShaderType type; - /* Followed by variable number of DWORDs for shader bycode */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - uint32 shid; - SVGA3dShaderType type; -} -#include "vmware_pack_end.h" -SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - uint32 reg; /* register number */ - SVGA3dShaderType type; - SVGA3dShaderConstType ctype; - uint32 values[4]; - - /* - * Followed by a variable number of additional values. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dShaderType type; - uint32 shid; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dQueryType type; -} -#include "vmware_pack_end.h" -SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dQueryType type; - SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */ -} -#include "vmware_pack_end.h" -SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */ - - -/* - * SVGA3D_CMD_WAIT_FOR_QUERY -- - * - * Will read the SVGA3dQueryResult structure pointed to by guestResult, - * and if the state member is set to anything else than - * SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op. - * - * Otherwise, in addition to the query explicitly waited for, - * All queries with the same type and issued with the same cid, for which - * an SVGA_3D_CMD_END_QUERY command has previously been sent, will - * be finished after execution of this command. - * - * A query will be identified by the gmrId and offset of the guestResult - * member. If the device can't find an SVGA_3D_CMD_END_QUERY that has - * been sent previously with an indentical gmrId and offset, it will - * effectively end all queries with an identical type issued with the - * same cid, and the SVGA3dQueryResult structure pointed to by - * guestResult will not be written to. This property can be used to - * implement a query barrier for a given cid and query type. - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; /* Same parameters passed to END_QUERY */ - SVGA3dQueryType type; - SVGAGuestPtr guestResult; -} -#include "vmware_pack_end.h" -SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 totalSize; /* Set by guest before query is ended. */ - SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */ - union { /* Set by host on exit from PENDING state */ - uint32 result32; - uint32 queryCookie; /* May be used to identify which QueryGetData this - result corresponds to. */ - }; -} -#include "vmware_pack_end.h" -SVGA3dQueryResult; - - -/* - * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN -- - * - * This is a blit from an SVGA3D surface to a Screen Object. - * This blit must be directed at a specific screen. - * - * The blit copies from a rectangular region of an SVGA3D surface - * image to a rectangular region of a screen. - * - * This command takes an optional variable-length list of clipping - * rectangles after the body of the command. If no rectangles are - * specified, there is no clipping region. The entire destRect is - * drawn to. If one or more rectangles are included, they describe - * a clipping region. The clip rectangle coordinates are measured - * relative to the top-left corner of destRect. - * - * The srcImage must be from mip=0 face=0. - * - * This supports scaling if the src and dest are of different sizes. - * - * Availability: - * SVGA_FIFO_CAP_SCREEN_OBJECT - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceImageId srcImage; - SVGASignedRect srcRect; - uint32 destScreenId; /* Screen Object ID */ - SVGASignedRect destRect; - /* Clipping: zero or more SVGASignedRects follow */ -} -#include "vmware_pack_end.h" -SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; - SVGA3dTextureFilter filter; -} -#include "vmware_pack_end.h" -SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdActivateSurface; /* SVGA_3D_CMD_ACTIVATE_SURFACE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDeactivateSurface; /* SVGA_3D_CMD_DEACTIVATE_SURFACE */ - -/* - * Screen DMA command - * - * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2. The SVGA_CAP_3D device - * cap bit is not required. - * - * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could - * be different, but it is required that guest makes sure refBuffer has - * exactly the same contents that were written to when last time screen DMA - * command is received by host. - * - * - changemap is generated by lib/blit, and it has the changes from last - * received screen DMA or more. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdScreenDMA { - uint32 screenId; - SVGAGuestImage refBuffer; - SVGAGuestImage destBuffer; - SVGAGuestImage changeMap; -} -#include "vmware_pack_end.h" -SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */ - -/* - * Logic ops - */ - -#define SVGA3D_LOTRANSBLT_HONORALPHA (0x01) -#define SVGA3D_LOSTRETCHBLT_MIRRORX (0x01) -#define SVGA3D_LOSTRETCHBLT_MIRRORY (0x02) -#define SVGA3D_LOALPHABLEND_SRCHASALPHA (0x01) - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdLogicOpsBitBlt { - /* - * All LogicOps surfaces are one-level - * surfaces so mipmap & face should always - * be zero. - */ - SVGA3dSurfaceImageId src; - SVGA3dSurfaceImageId dst; - SVGA3dLogicOp logicOp; - SVGA3dLogicOpRop3 logicOpRop3; - /* Followed by variable number of SVGA3dCopyBox structures */ -} -#include "vmware_pack_end.h" -SVGA3dCmdLogicOpsBitBlt; /* SVGA_3D_CMD_LOGICOPS_BITBLT */ - - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdLogicOpsTransBlt { - /* - * All LogicOps surfaces are one-level - * surfaces so mipmap & face should always - * be zero. - */ - SVGA3dSurfaceImageId src; - SVGA3dSurfaceImageId dst; - uint32 color; - uint32 flags; - SVGA3dBox srcBox; - SVGA3dSignedBox dstBox; - SVGA3dBox clipBox; -} -#include "vmware_pack_end.h" -SVGA3dCmdLogicOpsTransBlt; /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */ - - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdLogicOpsStretchBlt { - /* - * All LogicOps surfaces are one-level - * surfaces so mipmap & face should always - * be zero. - */ - SVGA3dSurfaceImageId src; - SVGA3dSurfaceImageId dst; - uint16 mode; - uint16 flags; - SVGA3dBox srcBox; - SVGA3dSignedBox dstBox; - SVGA3dBox clipBox; -} -#include "vmware_pack_end.h" -SVGA3dCmdLogicOpsStretchBlt; /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */ - - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdLogicOpsColorFill { - /* - * All LogicOps surfaces are one-level - * surfaces so mipmap & face should always - * be zero. - */ - SVGA3dSurfaceImageId dst; - uint32 color; - SVGA3dLogicOp logicOp; - SVGA3dLogicOpRop3 logicOpRop3; - /* Followed by variable number of SVGA3dRect structures. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdLogicOpsColorFill; /* SVGA_3D_CMD_LOGICOPS_COLORFILL */ - - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdLogicOpsAlphaBlend { - /* - * All LogicOps surfaces are one-level - * surfaces so mipmap & face should always - * be zero. - */ - SVGA3dSurfaceImageId src; - SVGA3dSurfaceImageId dst; - uint32 alphaVal; - uint32 flags; - SVGA3dBox srcBox; - SVGA3dSignedBox dstBox; - SVGA3dBox clipBox; -} -#include "vmware_pack_end.h" -SVGA3dCmdLogicOpsAlphaBlend; /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */ +#pragma pack(push, 1) +typedef struct { + uint32 cid; + + uint32 numElements; + +} SVGA3dCmdSetVertexDecls; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; + uint32 stride; + uint32 offset; +} SVGA3dVertexStream; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + + uint32 numStreams; + +} SVGA3dCmdSetVertexStreams; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + uint32 numDivisors; +} SVGA3dCmdSetVertexDivisors; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 stage; + SVGA3dTextureStateName name; + union { + uint32 value; + float floatValue; + }; +} SVGA3dTextureState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + +} SVGA3dCmdSetTextureState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dTransformType type; + float matrix[16]; +} SVGA3dCmdSetTransform; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + float min; + float max; +} SVGA3dZRange; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dZRange zRange; +} SVGA3dCmdSetZRange; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + float diffuse[4]; + float ambient[4]; + float specular[4]; + float emissive[4]; + float shininess; +} SVGA3dMaterial; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dFace face; + SVGA3dMaterial material; +} SVGA3dCmdSetMaterial; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + uint32 index; + SVGA3dLightData data; +} SVGA3dCmdSetLightData; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + uint32 index; + uint32 enabled; +} SVGA3dCmdSetLightEnabled; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dRect rect; +} SVGA3dCmdSetViewport; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dRect rect; +} SVGA3dCmdSetScissorRect; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + uint32 index; + float plane[4]; +} SVGA3dCmdSetClipPlane; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + uint32 shid; + SVGA3dShaderType type; + +} SVGA3dCmdDefineShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + uint32 shid; + SVGA3dShaderType type; +} SVGA3dCmdDestroyShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + uint32 reg; + SVGA3dShaderType type; + SVGA3dShaderConstType ctype; + uint32 values[4]; + +} SVGA3dCmdSetShaderConst; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dShaderType type; + uint32 shid; +} SVGA3dCmdSetShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dQueryType type; +} SVGA3dCmdBeginQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dQueryType type; + SVGAGuestPtr guestResult; +} SVGA3dCmdEndQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dQueryType type; + SVGAGuestPtr guestResult; +} SVGA3dCmdWaitForQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 totalSize; + SVGA3dQueryState state; + union { + uint32 result32; + uint32 queryCookie; + }; +} SVGA3dQueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceImageId srcImage; + SVGASignedRect srcRect; + uint32 destScreenId; + SVGASignedRect destRect; + +} SVGA3dCmdBlitSurfaceToScreen; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; + SVGA3dTextureFilter filter; +} SVGA3dCmdGenerateMipmaps; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; +} SVGA3dCmdActivateSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; +} SVGA3dCmdDeactivateSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdScreenDMA { + uint32 screenId; + SVGAGuestImage refBuffer; + SVGAGuestImage destBuffer; + SVGAGuestImage changeMap; +} SVGA3dCmdScreenDMA; +#pragma pack(pop) + +#define SVGA3D_LOTRANSBLT_HONORALPHA (0x01) +#define SVGA3D_LOSTRETCHBLT_MIRRORX (0x01) +#define SVGA3D_LOSTRETCHBLT_MIRRORY (0x02) +#define SVGA3D_LOALPHABLEND_SRCHASALPHA (0x01) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdLogicOpsBitBlt { + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dst; + SVGA3dLogicOp logicOp; + SVGA3dLogicOpRop3 logicOpRop3; + +} SVGA3dCmdLogicOpsBitBlt; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdLogicOpsTransBlt { + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dst; + uint32 color; + uint32 flags; + SVGA3dBox srcBox; + SVGA3dSignedBox dstBox; + SVGA3dBox clipBox; +} SVGA3dCmdLogicOpsTransBlt; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdLogicOpsStretchBlt { + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dst; + uint16 mode; + uint16 flags; + SVGA3dBox srcBox; + SVGA3dSignedBox dstBox; + SVGA3dBox clipBox; +} SVGA3dCmdLogicOpsStretchBlt; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdLogicOpsColorFill { + SVGA3dSurfaceImageId dst; + uint32 color; + SVGA3dLogicOp logicOp; + SVGA3dLogicOpRop3 logicOpRop3; + +} SVGA3dCmdLogicOpsColorFill; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdLogicOpsAlphaBlend { + SVGA3dSurfaceImageId src; + SVGA3dSurfaceImageId dst; + uint32 alphaVal; + uint32 flags; + SVGA3dBox srcBox; + SVGA3dSignedBox dstBox; + SVGA3dBox clipBox; +} SVGA3dCmdLogicOpsAlphaBlend; +#pragma pack(pop) #define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF -#define SVGA3D_CLEARTYPE_GAMMA_WIDTH 512 +#define SVGA3D_CLEARTYPE_GAMMA_WIDTH 512 #define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16 -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdLogicOpsClearTypeBlend { - /* - * All LogicOps surfaces are one-level - * surfaces so mipmap & face should always - * be zero. - */ - SVGA3dSurfaceImageId tmp; - SVGA3dSurfaceImageId dst; - SVGA3dSurfaceImageId gammaSurf; - SVGA3dSurfaceImageId alphaSurf; - uint32 gamma; - uint32 color; - uint32 color2; - int32 alphaOffsetX; - int32 alphaOffsetY; - /* Followed by variable number of SVGA3dBox structures */ -} -#include "vmware_pack_end.h" -SVGA3dCmdLogicOpsClearTypeBlend; /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */ - - -/* - * Guest-backed objects definitions. - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAMobFormat ptDepth; - uint32 sizeInBytes; - PPN64 base; -} -#include "vmware_pack_end.h" -SVGAOTableMobEntry; -#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry)) - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceFormat format; - SVGA3dSurface1Flags surface1Flags; - uint32 numMipLevels; - uint32 multisampleCount; - SVGA3dTextureFilter autogenFilter; - SVGA3dSize size; - SVGAMobId mobid; - uint32 arraySize; - uint32 mobPitch; - SVGA3dSurface2Flags surface2Flags; - uint8 multisamplePattern; - uint8 qualityLevel; - uint16 bufferByteStride; - float minLOD; - uint32 pad0[2]; -} -#include "vmware_pack_end.h" -SVGAOTableSurfaceEntry; -#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry)) - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGAOTableContextEntry; -#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry)) - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dShaderType type; - uint32 sizeInBytes; - uint32 offsetInBytes; - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGAOTableShaderEntry; -#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry)) - -#define SVGA_STFLAG_PRIMARY (1 << 0) -#define SVGA_STFLAG_RESERVED (1 << 1) /* Added with cap SVGA_CAP_HP_CMD_QUEUE */ +#pragma pack(push, 1) +typedef struct SVGA3dCmdLogicOpsClearTypeBlend { + SVGA3dSurfaceImageId tmp; + SVGA3dSurfaceImageId dst; + SVGA3dSurfaceImageId gammaSurf; + SVGA3dSurfaceImageId alphaSurf; + uint32 gamma; + uint32 color; + uint32 color2; + int32 alphaOffsetX; + int32 alphaOffsetY; + +} SVGA3dCmdLogicOpsClearTypeBlend; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAMobFormat ptDepth; + uint32 sizeInBytes; + PPN64 base; +} SVGAOTableMobEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceFormat format; + SVGA3dSurface1Flags surface1Flags; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; + SVGAMobId mobid; + uint32 arraySize; + uint32 mobPitch; + SVGA3dSurface2Flags surface2Flags; + uint8 multisamplePattern; + uint8 qualityLevel; + uint16 bufferByteStride; + float minLOD; + uint32 pad0[2]; +} SVGAOTableSurfaceEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGAMobId mobid; +} SVGAOTableContextEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dShaderType type; + uint32 sizeInBytes; + uint32 offsetInBytes; + SVGAMobId mobid; +} SVGAOTableShaderEntry; +#pragma pack(pop) + +#define SVGA_STFLAG_PRIMARY (1 << 0) +#define SVGA_STFLAG_RESERVED (1 << 1) typedef uint32 SVGAScreenTargetFlags; -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceImageId image; - uint32 width; - uint32 height; - int32 xRoot; - int32 yRoot; - SVGAScreenTargetFlags flags; - uint32 dpi; - uint32 pad[7]; -} -#include "vmware_pack_end.h" -SVGAOTableScreenTargetEntry; -#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \ - (sizeof(SVGAOTableScreenTargetEntry)) - -typedef -#include "vmware_pack_begin.h" -struct { - float value[4]; -} -#include "vmware_pack_end.h" -SVGA3dShaderConstFloat; - -typedef -#include "vmware_pack_begin.h" -struct { - int32 value[4]; -} -#include "vmware_pack_end.h" -SVGA3dShaderConstInt; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 value; -} -#include "vmware_pack_end.h" -SVGA3dShaderConstBool; - -typedef -#include "vmware_pack_begin.h" -struct { - uint16 streamOffset; - uint8 stream; - uint8 type; - uint8 methodUsage; - uint8 usageIndex; -} -#include "vmware_pack_end.h" -SVGAGBVertexElement; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; - uint16 stride; - uint32 offset; -} -#include "vmware_pack_end.h" -SVGAGBVertexStream; -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dRect viewport; - SVGA3dRect scissorRect; - SVGA3dZRange zRange; - - SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX]; - SVGAGBVertexElement decl1[4]; - - uint32 renderStates[SVGA3D_RS_MAX]; - SVGAGBVertexElement decl2[18]; - uint32 pad0[2]; - - struct { - SVGA3dFace face; - SVGA3dMaterial material; - } material; - - float clipPlanes[SVGA3D_NUM_CLIPPLANES][4]; - float matrices[SVGA3D_TRANSFORM_MAX][16]; - - SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS]; - SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS]; - - /* - * Shaders currently bound - */ - uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX]; - SVGAGBVertexElement decl3[10]; - uint32 pad1[3]; - - uint32 occQueryActive; - uint32 occQueryValue; - - /* - * Int/Bool Shader constants - */ - SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX]; - SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX]; - uint16 pShaderBValues; - uint16 vShaderBValues; - - - SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS]; - SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS]; - uint32 numVertexDecls; - uint32 numVertexStreams; - uint32 numVertexDivisors; - uint32 pad2[30]; - - /* - * Texture Stages - * - * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the - * textureStages array. - * SVGA3D_TS_COLOR_KEY is in tsColorKey. - */ - uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS]; - uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1]; - uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS]; - - /* - * Float Shader constants. - */ - SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX]; - SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX]; -} -#include "vmware_pack_end.h" -SVGAGBContextData; -#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData)) - -/* - * SVGA3dCmdSetOTableBase -- - * - * This command allows the guest to specify the base PPN of the - * specified object table. - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAOTableType type; - PPN32 baseAddress; - uint32 sizeInBytes; - uint32 validSizeInBytes; - SVGAMobFormat ptDepth; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAOTableType type; - PPN64 baseAddress; - uint32 sizeInBytes; - uint32 validSizeInBytes; - SVGAMobFormat ptDepth; -} -#include "vmware_pack_end.h" -SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ - -/* - * Guests using SVGA_3D_CMD_GROW_OTABLE are promising that - * the new OTable contains the same contents as the old one, except possibly - * for some new invalid entries at the end. - * - * (Otherwise, guests should use one of the SetOTableBase commands.) - */ -typedef -#include "vmware_pack_begin.h" -struct { - SVGAOTableType type; - PPN64 baseAddress; - uint32 sizeInBytes; - uint32 validSizeInBytes; - SVGAMobFormat ptDepth; -} -#include "vmware_pack_end.h" -SVGA3dCmdGrowOTable; /* SVGA_3D_CMD_GROW_OTABLE */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAOTableType type; -} -#include "vmware_pack_end.h" -SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ - -/* - * Define a memory object (Mob) in the OTable. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDefineGBMob { - SVGAMobId mobid; - SVGAMobFormat ptDepth; - PPN32 base; - uint32 sizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ - - -/* - * Destroys an object in the OTable. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDestroyGBMob { - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ - -/* - * Define a memory object (Mob) in the OTable with a PPN64 base. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDefineGBMob64 { - SVGAMobId mobid; - SVGAMobFormat ptDepth; - PPN64 base; - uint32 sizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ - -/* - * Redefine an object in the OTable with PPN64 base. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdRedefineGBMob64 { - SVGAMobId mobid; - SVGAMobFormat ptDepth; - PPN64 base; - uint32 sizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ - -/* - * Notification that the page tables have been modified. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdUpdateGBMobMapping { - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ - -/* - * Define a guest-backed surface. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDefineGBSurface { - uint32 sid; - SVGA3dSurface1Flags surfaceFlags; - SVGA3dSurfaceFormat format; - uint32 numMipLevels; - uint32 multisampleCount; - SVGA3dTextureFilter autogenFilter; - SVGA3dSize size; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ - -/* - * Defines a guest-backed surface, adding the arraySize field. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDefineGBSurface_v2 { - uint32 sid; - SVGA3dSurface1Flags surfaceFlags; - SVGA3dSurfaceFormat format; - uint32 numMipLevels; - uint32 multisampleCount; - SVGA3dTextureFilter autogenFilter; - SVGA3dSize size; - uint32 arraySize; - uint32 pad; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */ - -/* - * Defines a guest-backed surface, adding the larger flags. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDefineGBSurface_v3 { - uint32 sid; - SVGA3dSurfaceAllFlags surfaceFlags; - SVGA3dSurfaceFormat format; - uint32 numMipLevels; - uint32 multisampleCount; - SVGA3dMSPattern multisamplePattern; - SVGA3dMSQualityLevel qualityLevel; - SVGA3dTextureFilter autogenFilter; - SVGA3dSize size; - uint32 arraySize; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBSurface_v3; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */ - -/* - * Defines a guest-backed surface, adding buffer byte stride. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDefineGBSurface_v4 { - uint32 sid; - SVGA3dSurfaceAllFlags surfaceFlags; - SVGA3dSurfaceFormat format; - uint32 numMipLevels; - uint32 multisampleCount; - SVGA3dMSPattern multisamplePattern; - SVGA3dMSQualityLevel qualityLevel; - SVGA3dTextureFilter autogenFilter; - SVGA3dSize size; - uint32 arraySize; - uint32 bufferByteStride; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBSurface_v4; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V4 */ - -/* - * Destroy a guest-backed surface. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDestroyGBSurface { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ - -/* - * Bind a guest-backed surface to a mob. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdBindGBSurface { - uint32 sid; - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdBindGBSurfaceWithPitch { - uint32 sid; - SVGAMobId mobid; - uint32 baseLevelPitch; -} -#include "vmware_pack_end.h" -SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */ - -/* - * Conditionally bind a mob to a guest-backed surface if testMobid - * matches the currently bound mob. Optionally issue a - * readback/update on the surface while it is still bound to the old - * mobid if the mobid is changed by this command. - */ +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceImageId image; + uint32 width; + uint32 height; + int32 xRoot; + int32 yRoot; + SVGAScreenTargetFlags flags; + uint32 dpi; + uint32 pad[7]; +} SVGAOTableScreenTargetEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + float value[4]; +} SVGA3dShaderConstFloat; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + int32 value[4]; +} SVGA3dShaderConstInt; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 value; +} SVGA3dShaderConstBool; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint16 streamOffset; + uint8 stream; + uint8 type; + uint8 methodUsage; + uint8 usageIndex; +} SVGAGBVertexElement; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; + uint16 stride; + uint32 offset; +} SVGAGBVertexStream; +#pragma pack(pop) +#pragma pack(push, 1) +typedef struct { + SVGA3dRect viewport; + SVGA3dRect scissorRect; + SVGA3dZRange zRange; + + SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX]; + SVGAGBVertexElement decl1[4]; + + uint32 renderStates[SVGA3D_RS_MAX]; + SVGAGBVertexElement decl2[18]; + uint32 pad0[2]; + + struct { + SVGA3dFace face; + SVGA3dMaterial material; + } material; + + float clipPlanes[SVGA3D_MAX_CLIP_PLANES][4]; + float matrices[SVGA3D_TRANSFORM_MAX][16]; + + SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS]; + SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS]; + + uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX]; + SVGAGBVertexElement decl3[10]; + uint32 pad1[3]; + + uint32 occQueryActive; + uint32 occQueryValue; + + SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX]; + SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX]; + uint16 pShaderBValues; + uint16 vShaderBValues; + + SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS]; + SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS]; + uint32 numVertexDecls; + uint32 numVertexStreams; + uint32 numVertexDivisors; + uint32 pad2[30]; + + uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS]; + uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1]; + uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS]; + + SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX]; + SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX]; +} SVGAGBContextData; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAOTableType type; + PPN32 baseAddress; + uint32 sizeInBytes; + uint32 validSizeInBytes; + SVGAMobFormat ptDepth; +} SVGA3dCmdSetOTableBase; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAOTableType type; + PPN64 baseAddress; + uint32 sizeInBytes; + uint32 validSizeInBytes; + SVGAMobFormat ptDepth; +} SVGA3dCmdSetOTableBase64; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAOTableType type; + PPN64 baseAddress; + uint32 sizeInBytes; + uint32 validSizeInBytes; + SVGAMobFormat ptDepth; +} SVGA3dCmdGrowOTable; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAOTableType type; +} SVGA3dCmdReadbackOTable; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDefineGBMob { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN32 base; + uint32 sizeInBytes; +} SVGA3dCmdDefineGBMob; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDestroyGBMob { + SVGAMobId mobid; +} SVGA3dCmdDestroyGBMob; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDefineGBMob64 { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN64 base; + uint32 sizeInBytes; +} SVGA3dCmdDefineGBMob64; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdRedefineGBMob64 { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN64 base; + uint32 sizeInBytes; +} SVGA3dCmdRedefineGBMob64; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdUpdateGBMobMapping { + SVGAMobId mobid; +} SVGA3dCmdUpdateGBMobMapping; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDefineGBSurface { + uint32 sid; + SVGA3dSurface1Flags surfaceFlags; + SVGA3dSurfaceFormat format; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; +} SVGA3dCmdDefineGBSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDefineGBSurface_v2 { + uint32 sid; + SVGA3dSurface1Flags surfaceFlags; + SVGA3dSurfaceFormat format; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; + uint32 arraySize; + uint32 pad; +} SVGA3dCmdDefineGBSurface_v2; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDefineGBSurface_v3 { + uint32 sid; + SVGA3dSurfaceAllFlags surfaceFlags; + SVGA3dSurfaceFormat format; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dMSPattern multisamplePattern; + SVGA3dMSQualityLevel qualityLevel; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; + uint32 arraySize; +} SVGA3dCmdDefineGBSurface_v3; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDefineGBSurface_v4 { + uint32 sid; + SVGA3dSurfaceAllFlags surfaceFlags; + SVGA3dSurfaceFormat format; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dMSPattern multisamplePattern; + SVGA3dMSQualityLevel qualityLevel; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; + uint32 arraySize; + uint32 bufferByteStride; +} SVGA3dCmdDefineGBSurface_v4; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDestroyGBSurface { + uint32 sid; +} SVGA3dCmdDestroyGBSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdBindGBSurface { + uint32 sid; + SVGAMobId mobid; +} SVGA3dCmdBindGBSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdBindGBSurfaceWithPitch { + uint32 sid; + SVGAMobId mobid; + uint32 baseLevelPitch; +} SVGA3dCmdBindGBSurfaceWithPitch; +#pragma pack(pop) #define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0) -#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE (1 << 1) - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdCondBindGBSurface { - uint32 sid; - SVGAMobId testMobid; - SVGAMobId mobid; - uint32 flags; -} -#include "vmware_pack_end.h" -SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ - -/* - * Update an image in a guest-backed surface. - * (Inform the device that the guest-contents have been updated.) - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdUpdateGBImage { - SVGA3dSurfaceImageId image; - SVGA3dBox box; -} -#include "vmware_pack_end.h" -SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ - -/* - * Update an entire guest-backed surface. - * (Inform the device that the guest-contents have been updated.) - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdUpdateGBSurface { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ - -/* - * Readback an image in a guest-backed surface. - * (Request the device to flush the dirty contents into the guest.) - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdReadbackGBImage { - SVGA3dSurfaceImageId image; -} -#include "vmware_pack_end.h" -SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE */ - -/* - * Readback an entire guest-backed surface. - * (Request the device to flush the dirty contents into the guest.) - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdReadbackGBSurface { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ - -/* - * Readback a sub rect of an image in a guest-backed surface. After - * issuing this command the driver is required to issue an update call - * of the same region before issuing any other commands that reference - * this surface or rendering is not guaranteed. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdReadbackGBImagePartial { - SVGA3dSurfaceImageId image; - SVGA3dBox box; - uint32 invertBox; -} -#include "vmware_pack_end.h" -SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ - - -/* - * Invalidate an image in a guest-backed surface. - * (Notify the device that the contents can be lost.) - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdInvalidateGBImage { - SVGA3dSurfaceImageId image; -} -#include "vmware_pack_end.h" -SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ - -/* - * Invalidate an entire guest-backed surface. - * (Notify the device that the contents if all images can be lost.) - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdInvalidateGBSurface { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ - -/* - * Invalidate a sub rect of an image in a guest-backed surface. After - * issuing this command the driver is required to issue an update call - * of the same region before issuing any other commands that reference - * this surface or rendering is not guaranteed. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdInvalidateGBImagePartial { - SVGA3dSurfaceImageId image; - SVGA3dBox box; - uint32 invertBox; -} -#include "vmware_pack_end.h" -SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ - - -/* - * Define a guest-backed context. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDefineGBContext { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ - -/* - * Destroy a guest-backed context. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDestroyGBContext { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ - -/* - * Bind a guest-backed context. - * - * validContents should be set to 0 for new contexts, - * and 1 if this is an old context which is getting paged - * back on to the device. - * - * For new contexts, it is recommended that the driver - * issue commands to initialize all interesting state - * prior to rendering. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdBindGBContext { - uint32 cid; - SVGAMobId mobid; - uint32 validContents; -} -#include "vmware_pack_end.h" -SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ - -/* - * Readback a guest-backed context. - * (Request that the device flush the contents back into guest memory.) - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdReadbackGBContext { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ - -/* - * Invalidate a guest-backed context. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdInvalidateGBContext { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ - -/* - * Define a guest-backed shader. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDefineGBShader { - uint32 shid; - SVGA3dShaderType type; - uint32 sizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ - -/* - * Bind a guest-backed shader. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdBindGBShader { - uint32 shid; - SVGAMobId mobid; - uint32 offsetInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ - -/* - * Destroy a guest-backed shader. - */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDestroyGBShader { - uint32 shid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - uint32 regStart; - SVGA3dShaderType shaderType; - SVGA3dShaderConstType constType; - - /* - * Followed by a variable number of shader constants. - * - * Note that FLOAT and INT constants are 4-dwords in length, while - * BOOL constants are 1-dword in length. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdSetGBShaderConstInline; /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ - - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dQueryType type; -} -#include "vmware_pack_end.h" -SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dQueryType type; - SVGAMobId mobid; - uint32 offset; -} -#include "vmware_pack_end.h" -SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ - - -/* - * SVGA_3D_CMD_WAIT_FOR_GB_QUERY -- - * - * The semantics of this command are identical to the - * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written - * to a Mob instead of a GMR. - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGA3dQueryType type; - SVGAMobId mobid; - uint32 offset; -} -#include "vmware_pack_end.h" -SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ - - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAMobId mobid; - uint32 mustBeZero; - uint32 initialized; -} -#include "vmware_pack_end.h" -SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAMobId mobid; - uint32 gartOffset; -} -#include "vmware_pack_end.h" -SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ - - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 gartOffset; - uint32 numPages; -} -#include "vmware_pack_end.h" -SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ - - -/* - * Screen Targets - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 stid; - uint32 width; - uint32 height; - int32 xRoot; - int32 yRoot; - SVGAScreenTargetFlags flags; - - /* - * The physical DPI that the guest expects this screen displayed at. - * - * Guests which are not DPI-aware should set this to zero. - */ - uint32 dpi; -} -#include "vmware_pack_end.h" -SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 stid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 stid; - SVGA3dSurfaceImageId image; -} -#include "vmware_pack_end.h" -SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 stid; - SVGA3dRect rect; -} -#include "vmware_pack_end.h" -SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdGBScreenDMA { - uint32 screenId; - uint32 dead; - SVGAMobId destMobID; - uint32 destPitch; - SVGAMobId changeMapMobID; -} -#include "vmware_pack_end.h" -SVGA3dCmdGBScreenDMA; /* SVGA_3D_CMD_GB_SCREEN_DMA */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 value; - uint32 mobId; - uint32 mobOffset; -} -#include "vmware_pack_end.h" -SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 stid; - SVGA3dSurfaceImageId dest; - - uint32 statusMobId; - uint32 statusMobOffset; - - /* Reserved fields */ - uint32 mustBeInvalidId; - uint32 mustBeZero; -} -#include "vmware_pack_end.h" -SVGA3dCmdScreenCopy; /* SVGA_3D_CMD_SCREEN_COPY */ +#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE (1 << 1) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdCondBindGBSurface { + uint32 sid; + SVGAMobId testMobid; + SVGAMobId mobid; + uint32 flags; +} SVGA3dCmdCondBindGBSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdUpdateGBImage { + SVGA3dSurfaceImageId image; + SVGA3dBox box; +} SVGA3dCmdUpdateGBImage; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdUpdateGBSurface { + uint32 sid; +} SVGA3dCmdUpdateGBSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdReadbackGBImage { + SVGA3dSurfaceImageId image; +} SVGA3dCmdReadbackGBImage; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdReadbackGBSurface { + uint32 sid; +} SVGA3dCmdReadbackGBSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdReadbackGBImagePartial { + SVGA3dSurfaceImageId image; + SVGA3dBox box; + uint32 invertBox; +} SVGA3dCmdReadbackGBImagePartial; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdInvalidateGBImage { + SVGA3dSurfaceImageId image; +} SVGA3dCmdInvalidateGBImage; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdInvalidateGBSurface { + uint32 sid; +} SVGA3dCmdInvalidateGBSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdInvalidateGBImagePartial { + SVGA3dSurfaceImageId image; + SVGA3dBox box; + uint32 invertBox; +} SVGA3dCmdInvalidateGBImagePartial; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDefineGBContext { + uint32 cid; +} SVGA3dCmdDefineGBContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDestroyGBContext { + uint32 cid; +} SVGA3dCmdDestroyGBContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdBindGBContext { + uint32 cid; + SVGAMobId mobid; + uint32 validContents; +} SVGA3dCmdBindGBContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdReadbackGBContext { + uint32 cid; +} SVGA3dCmdReadbackGBContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdInvalidateGBContext { + uint32 cid; +} SVGA3dCmdInvalidateGBContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDefineGBShader { + uint32 shid; + SVGA3dShaderType type; + uint32 sizeInBytes; +} SVGA3dCmdDefineGBShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdBindGBShader { + uint32 shid; + SVGAMobId mobid; + uint32 offsetInBytes; +} SVGA3dCmdBindGBShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDestroyGBShader { + uint32 shid; +} SVGA3dCmdDestroyGBShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + uint32 regStart; + SVGA3dShaderType shaderType; + SVGA3dShaderConstType constType; + +} SVGA3dCmdSetGBShaderConstInline; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dQueryType type; +} SVGA3dCmdBeginGBQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dQueryType type; + SVGAMobId mobid; + uint32 offset; +} SVGA3dCmdEndGBQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGA3dQueryType type; + SVGAMobId mobid; + uint32 offset; +} SVGA3dCmdWaitForGBQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAMobId mobid; + uint32 mustBeZero; + uint32 initialized; +} SVGA3dCmdEnableGart; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAMobId mobid; + uint32 gartOffset; +} SVGA3dCmdMapMobIntoGart; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 gartOffset; + uint32 numPages; +} SVGA3dCmdUnmapGartRange; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 stid; + uint32 width; + uint32 height; + int32 xRoot; + int32 yRoot; + SVGAScreenTargetFlags flags; + + uint32 dpi; +} SVGA3dCmdDefineGBScreenTarget; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 stid; +} SVGA3dCmdDestroyGBScreenTarget; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 stid; + SVGA3dSurfaceImageId image; +} SVGA3dCmdBindGBScreenTarget; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 stid; + SVGA3dRect rect; +} SVGA3dCmdUpdateGBScreenTarget; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 stid; + SVGA3dRect rect; + SVGA3dFrameUpdateType type; +} SVGA3dCmdUpdateGBScreenTarget_v2; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 stid; + SVGA3dRect rect; + SVGA3dFrameUpdateType type; + SVGAUnsignedPoint srcPoint; +} SVGA3dCmdUpdateGBScreenTargetMove; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdGBScreenDMA { + uint32 screenId; + uint32 dead; + SVGAMobId destMobID; + uint32 destPitch; + SVGAMobId changeMapMobID; +} SVGA3dCmdGBScreenDMA; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 value; + uint32 mobId; + uint32 mobOffset; +} SVGA3dCmdGBMobFence; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 stid; + SVGA3dSurfaceImageId dest; + + uint32 statusMobId; + uint32 statusMobOffset; + + uint32 mustBeInvalidId; + uint32 mustBeZero; +} SVGA3dCmdScreenCopy; +#pragma pack(pop) #define SVGA_SCREEN_COPY_STATUS_FAILURE 0x00 #define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01 #define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdWriteZeroSurface; /* SVGA_3D_CMD_WRITE_ZERO_SURFACE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 sid; -} -#include "vmware_pack_end.h" -SVGA3dCmdHintZeroSurface; /* SVGA_3D_CMD_HINT_ZERO_SURFACE */ - -#endif /* _SVGA3D_CMD_H_ */ +#pragma pack(push, 1) +typedef struct { + uint32 sid; +} SVGA3dCmdWriteZeroSurface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 sid; +} SVGA3dCmdUpdateZeroSurface; +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h index 617b468c626c..379ec15c7758 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 1998-2019 VMware, Inc. + * Copyright 1998-2021 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -27,484 +27,345 @@ /* * svga3d_devcaps.h -- * - * SVGA 3d caps definitions + * SVGA 3d caps definitions */ -#ifndef _SVGA3D_DEVCAPS_H_ -#define _SVGA3D_DEVCAPS_H_ -#define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_VMCORE -#include "includeCheck.h" +#ifndef _SVGA3D_DEVCAPS_H_ +#define _SVGA3D_DEVCAPS_H_ #include "svga3d_types.h" -/* - * 3D Hardware Version - * - * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo - * register. Is set by the host and read by the guest. This lets - * us make new guest drivers which are backwards-compatible with old - * SVGA hardware revisions. It does not let us support old guest - * drivers. Good enough for now. - * - */ - -#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) -#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16) -#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF) +#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor)&0xFF)) +#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16) +#define SVGA3D_MINOR_HWVERSION(version) ((version)&0xFF) typedef enum { - SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1), - SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2), - SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3), - SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1), - SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4), - SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0), - SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1), - SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1, + SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1), + SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2), + SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3), + SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1), + SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4), + SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0), + SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1), + SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1, } SVGA3dHardwareVersion; -/* - * DevCap indexes. - */ - typedef uint32 SVGA3dDevCapIndex; -#define SVGA3D_DEVCAP_INVALID ((uint32)-1) -#define SVGA3D_DEVCAP_3D 0 -#define SVGA3D_DEVCAP_MAX_LIGHTS 1 - -/* - * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of - * fixed-function texture units available. Each of these units - * work in both FFP and Shader modes, and they support texture - * transforms and texture coordinates. The host may have additional - * texture image units that are only usable with shaders. - */ -#define SVGA3D_DEVCAP_MAX_TEXTURES 2 -#define SVGA3D_DEVCAP_MAX_CLIP_PLANES 3 -#define SVGA3D_DEVCAP_VERTEX_SHADER_VERSION 4 -#define SVGA3D_DEVCAP_VERTEX_SHADER 5 -#define SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION 6 -#define SVGA3D_DEVCAP_FRAGMENT_SHADER 7 -#define SVGA3D_DEVCAP_MAX_RENDER_TARGETS 8 -#define SVGA3D_DEVCAP_S23E8_TEXTURES 9 -#define SVGA3D_DEVCAP_S10E5_TEXTURES 10 -#define SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND 11 -#define SVGA3D_DEVCAP_D16_BUFFER_FORMAT 12 -#define SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT 13 -#define SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT 14 -#define SVGA3D_DEVCAP_QUERY_TYPES 15 -#define SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING 16 -#define SVGA3D_DEVCAP_MAX_POINT_SIZE 17 -#define SVGA3D_DEVCAP_MAX_SHADER_TEXTURES 18 -#define SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH 19 -#define SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT 20 -#define SVGA3D_DEVCAP_MAX_VOLUME_EXTENT 21 -#define SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT 22 -#define SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO 23 -#define SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY 24 -#define SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT 25 -#define SVGA3D_DEVCAP_MAX_VERTEX_INDEX 26 -#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS 27 -#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS 28 -#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS 29 -#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS 30 -#define SVGA3D_DEVCAP_TEXTURE_OPS 31 -#define SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 32 -#define SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 33 -#define SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 34 -#define SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 35 -#define SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 36 -#define SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 37 -#define SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 38 -#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 39 -#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 40 -#define SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 41 -#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 42 -#define SVGA3D_DEVCAP_SURFACEFMT_Z_D16 43 -#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 44 -#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 45 -#define SVGA3D_DEVCAP_SURFACEFMT_DXT1 46 -#define SVGA3D_DEVCAP_SURFACEFMT_DXT2 47 -#define SVGA3D_DEVCAP_SURFACEFMT_DXT3 48 -#define SVGA3D_DEVCAP_SURFACEFMT_DXT4 49 -#define SVGA3D_DEVCAP_SURFACEFMT_DXT5 50 -#define SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 51 -#define SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 52 -#define SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 53 -#define SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 54 -#define SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 55 -#define SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 56 -#define SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 57 -#define SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 58 -#define SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 59 -#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 60 -#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 61 - -/* - * There is a hole in our devcap definitions for - * historical reasons. - * - * Define a constant just for completeness. - */ -#define SVGA3D_DEVCAP_MISSING62 62 - -#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES 63 - -/* - * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color - * render targets. This does not include the depth or stencil targets. - */ -#define SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS 64 - -#define SVGA3D_DEVCAP_SURFACEFMT_V16U16 65 -#define SVGA3D_DEVCAP_SURFACEFMT_G16R16 66 -#define SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 67 -#define SVGA3D_DEVCAP_SURFACEFMT_UYVY 68 -#define SVGA3D_DEVCAP_SURFACEFMT_YUY2 69 - -/* - * Deprecated. - */ -#define SVGA3D_DEVCAP_DEAD4 70 -#define SVGA3D_DEVCAP_DEAD5 71 -#define SVGA3D_DEVCAP_DEAD7 72 -#define SVGA3D_DEVCAP_DEAD6 73 - -#define SVGA3D_DEVCAP_AUTOGENMIPMAPS 74 -#define SVGA3D_DEVCAP_SURFACEFMT_NV12 75 -#define SVGA3D_DEVCAP_DEAD10 76 - -/* - * This is the maximum number of SVGA context IDs that the guest - * can define using SVGA_3D_CMD_CONTEXT_DEFINE. - */ -#define SVGA3D_DEVCAP_MAX_CONTEXT_IDS 77 - -/* - * This is the maximum number of SVGA surface IDs that the guest - * can define using SVGA_3D_CMD_SURFACE_DEFINE*. - */ -#define SVGA3D_DEVCAP_MAX_SURFACE_IDS 78 - -#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 79 -#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 80 -#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT 81 - -#define SVGA3D_DEVCAP_SURFACEFMT_ATI1 82 -#define SVGA3D_DEVCAP_SURFACEFMT_ATI2 83 - -/* - * Deprecated. - */ -#define SVGA3D_DEVCAP_DEAD1 84 -#define SVGA3D_DEVCAP_DEAD8 85 -#define SVGA3D_DEVCAP_DEAD9 86 - -#define SVGA3D_DEVCAP_LINE_AA 87 /* boolean */ -#define SVGA3D_DEVCAP_LINE_STIPPLE 88 /* boolean */ -#define SVGA3D_DEVCAP_MAX_LINE_WIDTH 89 /* float */ -#define SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH 90 /* float */ - -#define SVGA3D_DEVCAP_SURFACEFMT_YV12 91 - -/* - * Deprecated. - */ -#define SVGA3D_DEVCAP_DEAD3 92 - -/* - * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported? - */ -#define SVGA3D_DEVCAP_TS_COLOR_KEY 93 /* boolean */ - -/* - * Deprecated. - */ -#define SVGA3D_DEVCAP_DEAD2 94 - -/* - * Does the device support DXContexts? - */ -#define SVGA3D_DEVCAP_DXCONTEXT 95 - -/* - * Deprecated. - */ -#define SVGA3D_DEVCAP_DEAD11 96 - -/* - * What is the maximum number of vertex buffers or vertex input registers - * that can be expected to work correctly with a DXContext? - * - * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but - * anything in excess of this cap is not guaranteed to render correctly. - * - * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS - * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or - * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1, - * but only the registers up to this cap value are guaranteed to render - * correctly. - * - * If guest-drivers are able to expose a lower-limit, it's recommended - * that they clamp to this value. Otherwise, the host will make a - * best-effort on case-by-case basis if guests exceed this. - */ -#define SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS 97 - -/* - * What is the maximum number of constant buffers that can be expected to - * work correctly with a DX context? - * - * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but - * anything in excess of this cap is not guaranteed to render correctly. - * - * If guest-drivers are able to expose a lower-limit, it's recommended - * that they clamp to this value. Otherwise, the host will make a - * best-effort on case-by-case basis if guests exceed this. - */ -#define SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS 98 - -/* - * Does the device support provoking vertex control? - * - * If this cap is present, the provokingVertexLast field in the - * rasterizer state is enabled. (Guests can then set it to FALSE, - * meaning that the first vertex is the provoking vertex, or TRUE, - * meaning that the last verteix is the provoking vertex.) - * - * If this cap is FALSE, then guests should set the provokingVertexLast - * to FALSE, otherwise rendering behavior is undefined. - */ -#define SVGA3D_DEVCAP_DX_PROVOKING_VERTEX 99 - -#define SVGA3D_DEVCAP_DXFMT_X8R8G8B8 100 -#define SVGA3D_DEVCAP_DXFMT_A8R8G8B8 101 -#define SVGA3D_DEVCAP_DXFMT_R5G6B5 102 -#define SVGA3D_DEVCAP_DXFMT_X1R5G5B5 103 -#define SVGA3D_DEVCAP_DXFMT_A1R5G5B5 104 -#define SVGA3D_DEVCAP_DXFMT_A4R4G4B4 105 -#define SVGA3D_DEVCAP_DXFMT_Z_D32 106 -#define SVGA3D_DEVCAP_DXFMT_Z_D16 107 -#define SVGA3D_DEVCAP_DXFMT_Z_D24S8 108 -#define SVGA3D_DEVCAP_DXFMT_Z_D15S1 109 -#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8 110 -#define SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 111 -#define SVGA3D_DEVCAP_DXFMT_LUMINANCE16 112 -#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 113 -#define SVGA3D_DEVCAP_DXFMT_DXT1 114 -#define SVGA3D_DEVCAP_DXFMT_DXT2 115 -#define SVGA3D_DEVCAP_DXFMT_DXT3 116 -#define SVGA3D_DEVCAP_DXFMT_DXT4 117 -#define SVGA3D_DEVCAP_DXFMT_DXT5 118 -#define SVGA3D_DEVCAP_DXFMT_BUMPU8V8 119 -#define SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 120 -#define SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 121 -#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 122 -#define SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 123 -#define SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 124 -#define SVGA3D_DEVCAP_DXFMT_A2R10G10B10 125 -#define SVGA3D_DEVCAP_DXFMT_V8U8 126 -#define SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 127 -#define SVGA3D_DEVCAP_DXFMT_CxV8U8 128 -#define SVGA3D_DEVCAP_DXFMT_X8L8V8U8 129 -#define SVGA3D_DEVCAP_DXFMT_A2W10V10U10 130 -#define SVGA3D_DEVCAP_DXFMT_ALPHA8 131 -#define SVGA3D_DEVCAP_DXFMT_R_S10E5 132 -#define SVGA3D_DEVCAP_DXFMT_R_S23E8 133 -#define SVGA3D_DEVCAP_DXFMT_RG_S10E5 134 -#define SVGA3D_DEVCAP_DXFMT_RG_S23E8 135 -#define SVGA3D_DEVCAP_DXFMT_BUFFER 136 -#define SVGA3D_DEVCAP_DXFMT_Z_D24X8 137 -#define SVGA3D_DEVCAP_DXFMT_V16U16 138 -#define SVGA3D_DEVCAP_DXFMT_G16R16 139 -#define SVGA3D_DEVCAP_DXFMT_A16B16G16R16 140 -#define SVGA3D_DEVCAP_DXFMT_UYVY 141 -#define SVGA3D_DEVCAP_DXFMT_YUY2 142 -#define SVGA3D_DEVCAP_DXFMT_NV12 143 -#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD2 144 -#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS 145 -#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT 146 -#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT 147 -#define SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS 148 -#define SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT 149 -#define SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT 150 -#define SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT 151 -#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS 152 -#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT 153 -#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM 154 -#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT 155 -#define SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS 156 -#define SVGA3D_DEVCAP_DXFMT_R32G32_UINT 157 -#define SVGA3D_DEVCAP_DXFMT_R32G32_SINT 158 -#define SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS 159 -#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT 160 -#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 161 -#define SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT 162 -#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS 163 -#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT 164 -#define SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT 165 -#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS 166 -#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM 167 -#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB 168 -#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT 169 -#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT 170 -#define SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS 171 -#define SVGA3D_DEVCAP_DXFMT_R16G16_UINT 172 -#define SVGA3D_DEVCAP_DXFMT_R16G16_SINT 173 -#define SVGA3D_DEVCAP_DXFMT_R32_TYPELESS 174 -#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT 175 -#define SVGA3D_DEVCAP_DXFMT_R32_UINT 176 -#define SVGA3D_DEVCAP_DXFMT_R32_SINT 177 -#define SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS 178 -#define SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT 179 -#define SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 180 -#define SVGA3D_DEVCAP_DXFMT_X24_G8_UINT 181 -#define SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS 182 -#define SVGA3D_DEVCAP_DXFMT_R8G8_UNORM 183 -#define SVGA3D_DEVCAP_DXFMT_R8G8_UINT 184 -#define SVGA3D_DEVCAP_DXFMT_R8G8_SINT 185 -#define SVGA3D_DEVCAP_DXFMT_R16_TYPELESS 186 -#define SVGA3D_DEVCAP_DXFMT_R16_UNORM 187 -#define SVGA3D_DEVCAP_DXFMT_R16_UINT 188 -#define SVGA3D_DEVCAP_DXFMT_R16_SNORM 189 -#define SVGA3D_DEVCAP_DXFMT_R16_SINT 190 -#define SVGA3D_DEVCAP_DXFMT_R8_TYPELESS 191 -#define SVGA3D_DEVCAP_DXFMT_R8_UNORM 192 -#define SVGA3D_DEVCAP_DXFMT_R8_UINT 193 -#define SVGA3D_DEVCAP_DXFMT_R8_SNORM 194 -#define SVGA3D_DEVCAP_DXFMT_R8_SINT 195 -#define SVGA3D_DEVCAP_DXFMT_P8 196 -#define SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP 197 -#define SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM 198 -#define SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM 199 -#define SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS 200 -#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB 201 -#define SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS 202 -#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB 203 -#define SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS 204 -#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB 205 -#define SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS 206 -#define SVGA3D_DEVCAP_DXFMT_ATI1 207 -#define SVGA3D_DEVCAP_DXFMT_BC4_SNORM 208 -#define SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS 209 -#define SVGA3D_DEVCAP_DXFMT_ATI2 210 -#define SVGA3D_DEVCAP_DXFMT_BC5_SNORM 211 -#define SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM 212 -#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS 213 -#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB 214 -#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS 215 -#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB 216 -#define SVGA3D_DEVCAP_DXFMT_Z_DF16 217 -#define SVGA3D_DEVCAP_DXFMT_Z_DF24 218 -#define SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT 219 -#define SVGA3D_DEVCAP_DXFMT_YV12 220 -#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT 221 -#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT 222 -#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM 223 -#define SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT 224 -#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM 225 -#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM 226 -#define SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT 227 -#define SVGA3D_DEVCAP_DXFMT_R16G16_UNORM 228 -#define SVGA3D_DEVCAP_DXFMT_R16G16_SNORM 229 -#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT 230 -#define SVGA3D_DEVCAP_DXFMT_R8G8_SNORM 231 -#define SVGA3D_DEVCAP_DXFMT_R16_FLOAT 232 -#define SVGA3D_DEVCAP_DXFMT_D16_UNORM 233 -#define SVGA3D_DEVCAP_DXFMT_A8_UNORM 234 -#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM 235 -#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM 236 -#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM 237 -#define SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM 238 -#define SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM 239 -#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM 240 -#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM 241 -#define SVGA3D_DEVCAP_DXFMT_BC4_UNORM 242 -#define SVGA3D_DEVCAP_DXFMT_BC5_UNORM 243 - -/* - * Advertises shaderModel 4.1 support, independent blend-states, - * cube-map arrays, and a higher vertex input registers limit. - * - * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.) - */ -#define SVGA3D_DEVCAP_SM41 244 -#define SVGA3D_DEVCAP_MULTISAMPLE_2X 245 -#define SVGA3D_DEVCAP_MULTISAMPLE_4X 246 - -/* - * Indicates that the device has rendering support for - * the full multisample quality. If this cap is not present, - * the host may or may not support full quality rendering. - * - * See also SVGA_REG_MS_HINT_RESOLVED. - */ -#define SVGA3D_DEVCAP_MS_FULL_QUALITY 247 - -/* - * Advertises support for the SVGA3D LogicOps commands. - */ -#define SVGA3D_DEVCAP_LOGICOPS 248 - -/* - * Advertises support for using logicOps in the DXBlendStates. - */ -#define SVGA3D_DEVCAP_LOGIC_BLENDOPS 249 - -/* -* Note DXFMT range is now non-contiguous. -*/ -#define SVGA3D_DEVCAP_RESERVED_1 250 -#define SVGA3D_DEVCAP_DXFMT_BC6H_TYPELESS 251 -#define SVGA3D_DEVCAP_DXFMT_BC6H_UF16 252 -#define SVGA3D_DEVCAP_DXFMT_BC6H_SF16 253 -#define SVGA3D_DEVCAP_DXFMT_BC7_TYPELESS 254 -#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM 255 -#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM_SRGB 256 -#define SVGA3D_DEVCAP_RESERVED_2 257 - -#define SVGA3D_DEVCAP_SM5 258 -#define SVGA3D_DEVCAP_MULTISAMPLE_8X 259 - -/* This must be the last index. */ -#define SVGA3D_DEVCAP_MAX 260 - -/* - * Bit definitions for DXFMT devcaps - * - * - * SUPPORTED: Can the format be defined? - * SHADER_SAMPLE: Can the format be sampled from a shader? - * COLOR_RENDERTARGET: Can the format be a color render target? - * DEPTH_RENDERTARGET: Can the format be a depth render target? - * BLENDABLE: Is the format blendable? - * MIPS: Does the format support mip levels? - * ARRAY: Does the format support texture arrays? - * VOLUME: Does the format support having volume? - * MULTISAMPLE: Does the format support multisample? - */ -#define SVGA3D_DXFMT_SUPPORTED (1 << 0) -#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1) -#define SVGA3D_DXFMT_COLOR_RENDERTARGET (1 << 2) -#define SVGA3D_DXFMT_DEPTH_RENDERTARGET (1 << 3) -#define SVGA3D_DXFMT_BLENDABLE (1 << 4) -#define SVGA3D_DXFMT_MIPS (1 << 5) -#define SVGA3D_DXFMT_ARRAY (1 << 6) -#define SVGA3D_DXFMT_VOLUME (1 << 7) -#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8) -#define SVGA3D_DXFMT_MULTISAMPLE (1 << 9) -#define SVGA3D_DXFMT_MAX (1 << 10) +#define SVGA3D_DEVCAP_INVALID ((uint32)-1) +#define SVGA3D_DEVCAP_3D 0 +#define SVGA3D_DEVCAP_MAX_LIGHTS 1 + +#define SVGA3D_DEVCAP_MAX_TEXTURES 2 +#define SVGA3D_DEVCAP_MAX_CLIP_PLANES 3 +#define SVGA3D_DEVCAP_VERTEX_SHADER_VERSION 4 +#define SVGA3D_DEVCAP_VERTEX_SHADER 5 +#define SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION 6 +#define SVGA3D_DEVCAP_FRAGMENT_SHADER 7 +#define SVGA3D_DEVCAP_MAX_RENDER_TARGETS 8 +#define SVGA3D_DEVCAP_S23E8_TEXTURES 9 +#define SVGA3D_DEVCAP_S10E5_TEXTURES 10 +#define SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND 11 +#define SVGA3D_DEVCAP_D16_BUFFER_FORMAT 12 +#define SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT 13 +#define SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT 14 +#define SVGA3D_DEVCAP_QUERY_TYPES 15 +#define SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING 16 +#define SVGA3D_DEVCAP_MAX_POINT_SIZE 17 +#define SVGA3D_DEVCAP_MAX_SHADER_TEXTURES 18 +#define SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH 19 +#define SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT 20 +#define SVGA3D_DEVCAP_MAX_VOLUME_EXTENT 21 +#define SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT 22 +#define SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO 23 +#define SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY 24 +#define SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT 25 +#define SVGA3D_DEVCAP_MAX_VERTEX_INDEX 26 +#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS 27 +#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS 28 +#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS 29 +#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS 30 +#define SVGA3D_DEVCAP_TEXTURE_OPS 31 +#define SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 32 +#define SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 33 +#define SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 34 +#define SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 35 +#define SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 36 +#define SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 37 +#define SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 38 +#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 39 +#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 40 +#define SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 41 +#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 42 +#define SVGA3D_DEVCAP_SURFACEFMT_Z_D16 43 +#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 44 +#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 45 +#define SVGA3D_DEVCAP_SURFACEFMT_DXT1 46 +#define SVGA3D_DEVCAP_SURFACEFMT_DXT2 47 +#define SVGA3D_DEVCAP_SURFACEFMT_DXT3 48 +#define SVGA3D_DEVCAP_SURFACEFMT_DXT4 49 +#define SVGA3D_DEVCAP_SURFACEFMT_DXT5 50 +#define SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 51 +#define SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 52 +#define SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 53 +#define SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 54 +#define SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 55 +#define SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 56 +#define SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 57 +#define SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 58 +#define SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 59 +#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 60 +#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 61 + +#define SVGA3D_DEVCAP_MISSING62 62 + +#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES 63 + +#define SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS 64 + +#define SVGA3D_DEVCAP_SURFACEFMT_V16U16 65 +#define SVGA3D_DEVCAP_SURFACEFMT_G16R16 66 +#define SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 67 +#define SVGA3D_DEVCAP_SURFACEFMT_UYVY 68 +#define SVGA3D_DEVCAP_SURFACEFMT_YUY2 69 + +#define SVGA3D_DEVCAP_DEAD4 70 +#define SVGA3D_DEVCAP_DEAD5 71 +#define SVGA3D_DEVCAP_DEAD7 72 +#define SVGA3D_DEVCAP_DEAD6 73 + +#define SVGA3D_DEVCAP_AUTOGENMIPMAPS 74 +#define SVGA3D_DEVCAP_SURFACEFMT_NV12 75 +#define SVGA3D_DEVCAP_DEAD10 76 + +#define SVGA3D_DEVCAP_MAX_CONTEXT_IDS 77 + +#define SVGA3D_DEVCAP_MAX_SURFACE_IDS 78 + +#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 79 +#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 80 +#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT 81 + +#define SVGA3D_DEVCAP_SURFACEFMT_ATI1 82 +#define SVGA3D_DEVCAP_SURFACEFMT_ATI2 83 + +#define SVGA3D_DEVCAP_DEAD1 84 +#define SVGA3D_DEVCAP_DEAD8 85 +#define SVGA3D_DEVCAP_DEAD9 86 + +#define SVGA3D_DEVCAP_LINE_AA 87 +#define SVGA3D_DEVCAP_LINE_STIPPLE 88 +#define SVGA3D_DEVCAP_MAX_LINE_WIDTH 89 +#define SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH 90 + +#define SVGA3D_DEVCAP_SURFACEFMT_YV12 91 + +#define SVGA3D_DEVCAP_DEAD3 92 + +#define SVGA3D_DEVCAP_TS_COLOR_KEY 93 + +#define SVGA3D_DEVCAP_DEAD2 94 + +#define SVGA3D_DEVCAP_DXCONTEXT 95 + +#define SVGA3D_DEVCAP_DEAD11 96 + +#define SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS 97 + +#define SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS 98 + +#define SVGA3D_DEVCAP_DX_PROVOKING_VERTEX 99 + +#define SVGA3D_DEVCAP_DXFMT_X8R8G8B8 100 +#define SVGA3D_DEVCAP_DXFMT_A8R8G8B8 101 +#define SVGA3D_DEVCAP_DXFMT_R5G6B5 102 +#define SVGA3D_DEVCAP_DXFMT_X1R5G5B5 103 +#define SVGA3D_DEVCAP_DXFMT_A1R5G5B5 104 +#define SVGA3D_DEVCAP_DXFMT_A4R4G4B4 105 +#define SVGA3D_DEVCAP_DXFMT_Z_D32 106 +#define SVGA3D_DEVCAP_DXFMT_Z_D16 107 +#define SVGA3D_DEVCAP_DXFMT_Z_D24S8 108 +#define SVGA3D_DEVCAP_DXFMT_Z_D15S1 109 +#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8 110 +#define SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 111 +#define SVGA3D_DEVCAP_DXFMT_LUMINANCE16 112 +#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 113 +#define SVGA3D_DEVCAP_DXFMT_DXT1 114 +#define SVGA3D_DEVCAP_DXFMT_DXT2 115 +#define SVGA3D_DEVCAP_DXFMT_DXT3 116 +#define SVGA3D_DEVCAP_DXFMT_DXT4 117 +#define SVGA3D_DEVCAP_DXFMT_DXT5 118 +#define SVGA3D_DEVCAP_DXFMT_BUMPU8V8 119 +#define SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 120 +#define SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 121 +#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 122 +#define SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 123 +#define SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 124 +#define SVGA3D_DEVCAP_DXFMT_A2R10G10B10 125 +#define SVGA3D_DEVCAP_DXFMT_V8U8 126 +#define SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 127 +#define SVGA3D_DEVCAP_DXFMT_CxV8U8 128 +#define SVGA3D_DEVCAP_DXFMT_X8L8V8U8 129 +#define SVGA3D_DEVCAP_DXFMT_A2W10V10U10 130 +#define SVGA3D_DEVCAP_DXFMT_ALPHA8 131 +#define SVGA3D_DEVCAP_DXFMT_R_S10E5 132 +#define SVGA3D_DEVCAP_DXFMT_R_S23E8 133 +#define SVGA3D_DEVCAP_DXFMT_RG_S10E5 134 +#define SVGA3D_DEVCAP_DXFMT_RG_S23E8 135 +#define SVGA3D_DEVCAP_DXFMT_BUFFER 136 +#define SVGA3D_DEVCAP_DXFMT_Z_D24X8 137 +#define SVGA3D_DEVCAP_DXFMT_V16U16 138 +#define SVGA3D_DEVCAP_DXFMT_G16R16 139 +#define SVGA3D_DEVCAP_DXFMT_A16B16G16R16 140 +#define SVGA3D_DEVCAP_DXFMT_UYVY 141 +#define SVGA3D_DEVCAP_DXFMT_YUY2 142 +#define SVGA3D_DEVCAP_DXFMT_NV12 143 +#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD2 144 +#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS 145 +#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT 146 +#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT 147 +#define SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS 148 +#define SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT 149 +#define SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT 150 +#define SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT 151 +#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS 152 +#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT 153 +#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM 154 +#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT 155 +#define SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS 156 +#define SVGA3D_DEVCAP_DXFMT_R32G32_UINT 157 +#define SVGA3D_DEVCAP_DXFMT_R32G32_SINT 158 +#define SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS 159 +#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT 160 +#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 161 +#define SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT 162 +#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS 163 +#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT 164 +#define SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT 165 +#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS 166 +#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM 167 +#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB 168 +#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT 169 +#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT 170 +#define SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS 171 +#define SVGA3D_DEVCAP_DXFMT_R16G16_UINT 172 +#define SVGA3D_DEVCAP_DXFMT_R16G16_SINT 173 +#define SVGA3D_DEVCAP_DXFMT_R32_TYPELESS 174 +#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT 175 +#define SVGA3D_DEVCAP_DXFMT_R32_UINT 176 +#define SVGA3D_DEVCAP_DXFMT_R32_SINT 177 +#define SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS 178 +#define SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT 179 +#define SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 180 +#define SVGA3D_DEVCAP_DXFMT_X24_G8_UINT 181 +#define SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS 182 +#define SVGA3D_DEVCAP_DXFMT_R8G8_UNORM 183 +#define SVGA3D_DEVCAP_DXFMT_R8G8_UINT 184 +#define SVGA3D_DEVCAP_DXFMT_R8G8_SINT 185 +#define SVGA3D_DEVCAP_DXFMT_R16_TYPELESS 186 +#define SVGA3D_DEVCAP_DXFMT_R16_UNORM 187 +#define SVGA3D_DEVCAP_DXFMT_R16_UINT 188 +#define SVGA3D_DEVCAP_DXFMT_R16_SNORM 189 +#define SVGA3D_DEVCAP_DXFMT_R16_SINT 190 +#define SVGA3D_DEVCAP_DXFMT_R8_TYPELESS 191 +#define SVGA3D_DEVCAP_DXFMT_R8_UNORM 192 +#define SVGA3D_DEVCAP_DXFMT_R8_UINT 193 +#define SVGA3D_DEVCAP_DXFMT_R8_SNORM 194 +#define SVGA3D_DEVCAP_DXFMT_R8_SINT 195 +#define SVGA3D_DEVCAP_DXFMT_P8 196 +#define SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP 197 +#define SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM 198 +#define SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM 199 +#define SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS 200 +#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB 201 +#define SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS 202 +#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB 203 +#define SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS 204 +#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB 205 +#define SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS 206 +#define SVGA3D_DEVCAP_DXFMT_ATI1 207 +#define SVGA3D_DEVCAP_DXFMT_BC4_SNORM 208 +#define SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS 209 +#define SVGA3D_DEVCAP_DXFMT_ATI2 210 +#define SVGA3D_DEVCAP_DXFMT_BC5_SNORM 211 +#define SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM 212 +#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS 213 +#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB 214 +#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS 215 +#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB 216 +#define SVGA3D_DEVCAP_DXFMT_Z_DF16 217 +#define SVGA3D_DEVCAP_DXFMT_Z_DF24 218 +#define SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT 219 +#define SVGA3D_DEVCAP_DXFMT_YV12 220 +#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT 221 +#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT 222 +#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM 223 +#define SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT 224 +#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM 225 +#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM 226 +#define SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT 227 +#define SVGA3D_DEVCAP_DXFMT_R16G16_UNORM 228 +#define SVGA3D_DEVCAP_DXFMT_R16G16_SNORM 229 +#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT 230 +#define SVGA3D_DEVCAP_DXFMT_R8G8_SNORM 231 +#define SVGA3D_DEVCAP_DXFMT_R16_FLOAT 232 +#define SVGA3D_DEVCAP_DXFMT_D16_UNORM 233 +#define SVGA3D_DEVCAP_DXFMT_A8_UNORM 234 +#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM 235 +#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM 236 +#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM 237 +#define SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM 238 +#define SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM 239 +#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM 240 +#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM 241 +#define SVGA3D_DEVCAP_DXFMT_BC4_UNORM 242 +#define SVGA3D_DEVCAP_DXFMT_BC5_UNORM 243 + +#define SVGA3D_DEVCAP_SM41 244 +#define SVGA3D_DEVCAP_MULTISAMPLE_2X 245 +#define SVGA3D_DEVCAP_MULTISAMPLE_4X 246 + +#define SVGA3D_DEVCAP_MS_FULL_QUALITY 247 + +#define SVGA3D_DEVCAP_LOGICOPS 248 + +#define SVGA3D_DEVCAP_LOGIC_BLENDOPS 249 + +#define SVGA3D_DEVCAP_DEAD12 250 + +#define SVGA3D_DEVCAP_DXFMT_BC6H_TYPELESS 251 +#define SVGA3D_DEVCAP_DXFMT_BC6H_UF16 252 +#define SVGA3D_DEVCAP_DXFMT_BC6H_SF16 253 +#define SVGA3D_DEVCAP_DXFMT_BC7_TYPELESS 254 +#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM 255 +#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM_SRGB 256 + +#define SVGA3D_DEVCAP_DEAD13 257 + +#define SVGA3D_DEVCAP_SM5 258 +#define SVGA3D_DEVCAP_MULTISAMPLE_8X 259 + +#define SVGA3D_DEVCAP_MAX 262 + +#define SVGA3D_DXFMT_SUPPORTED (1 << 0) +#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1) +#define SVGA3D_DXFMT_COLOR_RENDERTARGET (1 << 2) +#define SVGA3D_DXFMT_DEPTH_RENDERTARGET (1 << 3) +#define SVGA3D_DXFMT_BLENDABLE (1 << 4) +#define SVGA3D_DXFMT_MIPS (1 << 5) +#define SVGA3D_DXFMT_ARRAY (1 << 6) +#define SVGA3D_DXFMT_VOLUME (1 << 7) +#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8) +#define SVGA3D_DXFMT_MULTISAMPLE (1 << 9) +#define SVGA3D_DXFMT_MAX (1 << 10) typedef union { - SVGA3dBool b; - uint32 u; - int32 i; - float f; + SVGA3dBool b; + uint32 u; + int32 i; + float f; } SVGA3dDevCapResult; -#endif /* _SVGA3D_DEVCAPS_H_ */ +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h index f703ac2b1768..5af442dad542 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2012-2019 VMware, Inc. + * Copyright 2012-2021 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -27,88 +27,70 @@ /* * svga3d_dx.h -- * - * SVGA 3d hardware definitions for DX10 support. + * SVGA 3d hardware definitions for DX10 support. */ + + #ifndef _SVGA3D_DX_H_ #define _SVGA3D_DX_H_ -#define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_VMCORE -#include "includeCheck.h" - +#include "svga_reg.h" #include "svga3d_limits.h" +#include "svga3d_types.h" -#define SVGA3D_INPUT_MIN 0 -#define SVGA3D_INPUT_PER_VERTEX_DATA 0 +#define SVGA3D_INPUT_MIN 0 +#define SVGA3D_INPUT_PER_VERTEX_DATA 0 #define SVGA3D_INPUT_PER_INSTANCE_DATA 1 -#define SVGA3D_INPUT_MAX 2 +#define SVGA3D_INPUT_MAX 2 typedef uint32 SVGA3dInputClassification; -#define SVGA3D_RESOURCE_TYPE_MIN 1 -#define SVGA3D_RESOURCE_BUFFER 1 -#define SVGA3D_RESOURCE_TEXTURE1D 2 -#define SVGA3D_RESOURCE_TEXTURE2D 3 -#define SVGA3D_RESOURCE_TEXTURE3D 4 -#define SVGA3D_RESOURCE_TEXTURECUBE 5 -#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6 -#define SVGA3D_RESOURCE_BUFFEREX 6 -#define SVGA3D_RESOURCE_TYPE_MAX 7 -typedef uint32 SVGA3dResourceType; - -#define SVGA3D_COLOR_WRITE_ENABLE_RED (1 << 0) -#define SVGA3D_COLOR_WRITE_ENABLE_GREEN (1 << 1) -#define SVGA3D_COLOR_WRITE_ENABLE_BLUE (1 << 2) -#define SVGA3D_COLOR_WRITE_ENABLE_ALPHA (1 << 3) -#define SVGA3D_COLOR_WRITE_ENABLE_ALL (SVGA3D_COLOR_WRITE_ENABLE_RED | \ - SVGA3D_COLOR_WRITE_ENABLE_GREEN | \ - SVGA3D_COLOR_WRITE_ENABLE_BLUE | \ - SVGA3D_COLOR_WRITE_ENABLE_ALPHA) +#define SVGA3D_COLOR_WRITE_ENABLE_RED (1 << 0) +#define SVGA3D_COLOR_WRITE_ENABLE_GREEN (1 << 1) +#define SVGA3D_COLOR_WRITE_ENABLE_BLUE (1 << 2) +#define SVGA3D_COLOR_WRITE_ENABLE_ALPHA (1 << 3) +#define SVGA3D_COLOR_WRITE_ENABLE_ALL \ + (SVGA3D_COLOR_WRITE_ENABLE_RED | SVGA3D_COLOR_WRITE_ENABLE_GREEN | \ + SVGA3D_COLOR_WRITE_ENABLE_BLUE | SVGA3D_COLOR_WRITE_ENABLE_ALPHA) typedef uint8 SVGA3dColorWriteEnable; -#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0 -#define SVGA3D_DEPTH_WRITE_MASK_ALL 1 +#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0 +#define SVGA3D_DEPTH_WRITE_MASK_ALL 1 typedef uint8 SVGA3dDepthWriteMask; -#define SVGA3D_FILTER_MIP_LINEAR (1 << 0) -#define SVGA3D_FILTER_MAG_LINEAR (1 << 2) -#define SVGA3D_FILTER_MIN_LINEAR (1 << 4) +#define SVGA3D_FILTER_MIP_LINEAR (1 << 0) +#define SVGA3D_FILTER_MAG_LINEAR (1 << 2) +#define SVGA3D_FILTER_MIN_LINEAR (1 << 4) #define SVGA3D_FILTER_ANISOTROPIC (1 << 6) -#define SVGA3D_FILTER_COMPARE (1 << 7) +#define SVGA3D_FILTER_COMPARE (1 << 7) typedef uint32 SVGA3dFilter; #define SVGA3D_CULL_INVALID 0 -#define SVGA3D_CULL_MIN 1 -#define SVGA3D_CULL_NONE 1 -#define SVGA3D_CULL_FRONT 2 -#define SVGA3D_CULL_BACK 3 -#define SVGA3D_CULL_MAX 4 +#define SVGA3D_CULL_MIN 1 +#define SVGA3D_CULL_NONE 1 +#define SVGA3D_CULL_FRONT 2 +#define SVGA3D_CULL_BACK 3 +#define SVGA3D_CULL_MAX 4 typedef uint8 SVGA3dCullMode; -#define SVGA3D_COMPARISON_INVALID 0 -#define SVGA3D_COMPARISON_MIN 1 -#define SVGA3D_COMPARISON_NEVER 1 -#define SVGA3D_COMPARISON_LESS 2 -#define SVGA3D_COMPARISON_EQUAL 3 -#define SVGA3D_COMPARISON_LESS_EQUAL 4 -#define SVGA3D_COMPARISON_GREATER 5 -#define SVGA3D_COMPARISON_NOT_EQUAL 6 -#define SVGA3D_COMPARISON_GREATER_EQUAL 7 -#define SVGA3D_COMPARISON_ALWAYS 8 -#define SVGA3D_COMPARISON_MAX 9 +#define SVGA3D_COMPARISON_INVALID 0 +#define SVGA3D_COMPARISON_MIN 1 +#define SVGA3D_COMPARISON_NEVER 1 +#define SVGA3D_COMPARISON_LESS 2 +#define SVGA3D_COMPARISON_EQUAL 3 +#define SVGA3D_COMPARISON_LESS_EQUAL 4 +#define SVGA3D_COMPARISON_GREATER 5 +#define SVGA3D_COMPARISON_NOT_EQUAL 6 +#define SVGA3D_COMPARISON_GREATER_EQUAL 7 +#define SVGA3D_COMPARISON_ALWAYS 8 +#define SVGA3D_COMPARISON_MAX 9 typedef uint8 SVGA3dComparisonFunc; -/* - * SVGA3D_MULTISAMPLE_RAST_DISABLE disables MSAA for all primitives. - * SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE, which is supported in SM41, - * disables MSAA for lines only. - */ -#define SVGA3D_MULTISAMPLE_RAST_DISABLE 0 -#define SVGA3D_MULTISAMPLE_RAST_ENABLE 1 -#define SVGA3D_MULTISAMPLE_RAST_DX_MAX 1 -#define SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE 2 -#define SVGA3D_MULTISAMPLE_RAST_MAX 2 +#define SVGA3D_MULTISAMPLE_RAST_DISABLE 0 +#define SVGA3D_MULTISAMPLE_RAST_ENABLE 1 +#define SVGA3D_MULTISAMPLE_RAST_DX_MAX 1 +#define SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE 2 +#define SVGA3D_MULTISAMPLE_RAST_MAX 2 typedef uint8 SVGA3dMultisampleRastEnable; #define SVGA3D_DX_MAX_VERTEXBUFFERS 32 @@ -137,1531 +119,1273 @@ typedef uint32 SVGA3dQueryId; typedef uint32 SVGA3dStreamOutputId; typedef union { - struct { - float r; - float g; - float b; - float a; - }; - - float value[4]; -} SVGA3dRGBAFloat; - -typedef union { - struct { - uint32 r; - uint32 g; - uint32 b; - uint32 a; - }; - - uint32 value[4]; + struct { + uint32 r; + uint32 g; + uint32 b; + uint32 a; + }; + + uint32 value[4]; } SVGA3dRGBAUint32; -typedef -#include "vmware_pack_begin.h" -struct { - uint32 cid; - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGAOTableDXContextEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineContext { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineContext; /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyContext { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyContext; /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */ - -/* - * Bind a DX context. - * - * validContents should be set to 0 for new contexts, - * and 1 if this is an old context which is getting paged - * back on to the device. - * - * For new contexts, it is recommended that the driver - * issue commands to initialize all interesting state - * prior to rendering. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBindContext { - uint32 cid; - SVGAMobId mobid; - uint32 validContents; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBindContext; /* SVGA_3D_CMD_DX_BIND_CONTEXT */ - -/* - * Readback a DX context. - * (Request that the device flush the contents back into guest memory.) - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXReadbackContext { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXReadbackContext; /* SVGA_3D_CMD_DX_READBACK_CONTEXT */ - -/* - * Invalidate a guest-backed context. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXInvalidateContext { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetSingleConstantBuffer { - uint32 slot; - SVGA3dShaderType type; - SVGA3dSurfaceId sid; - uint32 offsetInBytes; - uint32 sizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetSingleConstantBuffer; -/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetShaderResources { - uint32 startView; - SVGA3dShaderType type; - - /* - * Followed by a variable number of SVGA3dShaderResourceViewId's. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetShader { - SVGA3dShaderId shaderId; - SVGA3dShaderType type; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */ +#pragma pack(push, 1) +typedef struct { + uint32 cid; + SVGAMobId mobid; +} SVGAOTableDXContextEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineContext { + uint32 cid; +} SVGA3dCmdDXDefineContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyContext { + uint32 cid; +} SVGA3dCmdDXDestroyContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBindContext { + uint32 cid; + SVGAMobId mobid; + uint32 validContents; +} SVGA3dCmdDXBindContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXReadbackContext { + uint32 cid; +} SVGA3dCmdDXReadbackContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXInvalidateContext { + uint32 cid; +} SVGA3dCmdDXInvalidateContext; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetSingleConstantBuffer { + uint32 slot; + SVGA3dShaderType type; + SVGA3dSurfaceId sid; + uint32 offsetInBytes; + uint32 sizeInBytes; +} SVGA3dCmdDXSetSingleConstantBuffer; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetShaderResources { + uint32 startView; + SVGA3dShaderType type; + +} SVGA3dCmdDXSetShaderResources; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetShader { + SVGA3dShaderId shaderId; + SVGA3dShaderType type; +} SVGA3dCmdDXSetShader; +#pragma pack(pop) typedef union { - struct { - uint32 cbOffset : 12; - uint32 cbId : 4; - uint32 baseSamp : 4; - uint32 baseTex : 7; - uint32 reserved : 5; - }; - uint32 value; + struct { + uint32 cbOffset : 12; + uint32 cbId : 4; + uint32 baseSamp : 4; + uint32 baseTex : 7; + uint32 reserved : 5; + }; + uint32 value; } SVGA3dIfaceData; -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetShaderIface { - SVGA3dShaderType type; - uint32 numClassInstances; - uint32 index; - uint32 iface; - SVGA3dIfaceData data; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetShaderIface; /* SVGA_3D_CMD_DX_SET_SHADER_IFACE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBindShaderIface { - uint32 cid; - SVGAMobId mobid; - uint32 offsetInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBindShaderIface; /* SVGA_3D_CMD_DX_BIND_SHADER_IFACE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetSamplers { - uint32 startSampler; - SVGA3dShaderType type; - - /* - * Followed by a variable number of SVGA3dSamplerId's. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDraw { - uint32 vertexCount; - uint32 startVertexLocation; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDrawIndexed { - uint32 indexCount; - uint32 startIndexLocation; - int32 baseVertexLocation; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDrawInstanced { - uint32 vertexCountPerInstance; - uint32 instanceCount; - uint32 startVertexLocation; - uint32 startInstanceLocation; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDrawIndexedInstanced { - uint32 indexCountPerInstance; - uint32 instanceCount; - uint32 startIndexLocation; - int32 baseVertexLocation; - uint32 startInstanceLocation; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDrawIndexedInstancedIndirect { - SVGA3dSurfaceId argsBufferSid; - uint32 byteOffsetForArgs; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDrawIndexedInstancedIndirect; -/* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDrawInstancedIndirect { - SVGA3dSurfaceId argsBufferSid; - uint32 byteOffsetForArgs; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDrawInstancedIndirect; -/* SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDrawAuto { - uint32 pad0; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDispatch { - uint32 threadGroupCountX; - uint32 threadGroupCountY; - uint32 threadGroupCountZ; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDispatch; -/* SVGA_3D_CMD_DX_DISPATCH */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDispatchIndirect { - SVGA3dSurfaceId argsBufferSid; - uint32 byteOffsetForArgs; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDispatchIndirect; -/* SVGA_3D_CMD_DX_DISPATCH_INDIRECT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetInputLayout { - SVGA3dElementLayoutId elementLayoutId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dVertexBuffer { - SVGA3dSurfaceId sid; - uint32 stride; - uint32 offset; -} -#include "vmware_pack_end.h" -SVGA3dVertexBuffer; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetVertexBuffers { - uint32 startBuffer; - /* Followed by a variable number of SVGA3dVertexBuffer's. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetIndexBuffer { - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - uint32 offset; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetTopology { - SVGA3dPrimitiveType topology; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetRenderTargets { - SVGA3dDepthStencilViewId depthStencilViewId; - /* Followed by a variable number of SVGA3dRenderTargetViewId's. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetBlendState { - SVGA3dBlendStateId blendId; - float blendFactor[4]; - uint32 sampleMask; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetDepthStencilState { - SVGA3dDepthStencilStateId depthStencilId; - uint32 stencilRef; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetRasterizerState { - SVGA3dRasterizerStateId rasterizerId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */ +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetShaderIface { + SVGA3dShaderType type; + uint32 numClassInstances; + uint32 index; + uint32 iface; + SVGA3dIfaceData data; +} SVGA3dCmdDXSetShaderIface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBindShaderIface { + uint32 cid; + SVGAMobId mobid; + uint32 offsetInBytes; +} SVGA3dCmdDXBindShaderIface; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetSamplers { + uint32 startSampler; + SVGA3dShaderType type; + +} SVGA3dCmdDXSetSamplers; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDraw { + uint32 vertexCount; + uint32 startVertexLocation; +} SVGA3dCmdDXDraw; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDrawIndexed { + uint32 indexCount; + uint32 startIndexLocation; + int32 baseVertexLocation; +} SVGA3dCmdDXDrawIndexed; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDrawInstanced { + uint32 vertexCountPerInstance; + uint32 instanceCount; + uint32 startVertexLocation; + uint32 startInstanceLocation; +} SVGA3dCmdDXDrawInstanced; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDrawIndexedInstanced { + uint32 indexCountPerInstance; + uint32 instanceCount; + uint32 startIndexLocation; + int32 baseVertexLocation; + uint32 startInstanceLocation; +} SVGA3dCmdDXDrawIndexedInstanced; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDrawIndexedInstancedIndirect { + SVGA3dSurfaceId argsBufferSid; + uint32 byteOffsetForArgs; +} SVGA3dCmdDXDrawIndexedInstancedIndirect; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDrawInstancedIndirect { + SVGA3dSurfaceId argsBufferSid; + uint32 byteOffsetForArgs; +} SVGA3dCmdDXDrawInstancedIndirect; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDrawAuto { + uint32 pad0; +} SVGA3dCmdDXDrawAuto; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDispatch { + uint32 threadGroupCountX; + uint32 threadGroupCountY; + uint32 threadGroupCountZ; +} SVGA3dCmdDXDispatch; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDispatchIndirect { + SVGA3dSurfaceId argsBufferSid; + uint32 byteOffsetForArgs; +} SVGA3dCmdDXDispatchIndirect; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetInputLayout { + SVGA3dElementLayoutId elementLayoutId; +} SVGA3dCmdDXSetInputLayout; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dVertexBuffer { + SVGA3dSurfaceId sid; + uint32 stride; + uint32 offset; +} SVGA3dVertexBuffer; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetVertexBuffers { + uint32 startBuffer; + +} SVGA3dCmdDXSetVertexBuffers; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dVertexBuffer_v2 { + SVGA3dSurfaceId sid; + uint32 stride; + uint32 offset; + uint32 sizeInBytes; +} SVGA3dVertexBuffer_v2; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetVertexBuffers_v2 { + uint32 startBuffer; + +} SVGA3dCmdDXSetVertexBuffers_v2; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dVertexBufferOffsetAndSize { + uint32 stride; + uint32 offset; + uint32 sizeInBytes; +} SVGA3dVertexBufferOffsetAndSize; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetVertexBuffersOffsetAndSize { + uint32 startBuffer; + +} SVGA3dCmdDXSetVertexBuffersOffsetAndSize; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetIndexBuffer { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + uint32 offset; +} SVGA3dCmdDXSetIndexBuffer; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetIndexBuffer_v2 { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + uint32 offset; + uint32 sizeInBytes; +} SVGA3dCmdDXSetIndexBuffer_v2; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetIndexBufferOffsetAndSize { + SVGA3dSurfaceFormat format; + uint32 offset; + uint32 sizeInBytes; +} SVGA3dCmdDXSetIndexBufferOffsetAndSize; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetTopology { + SVGA3dPrimitiveType topology; +} SVGA3dCmdDXSetTopology; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetRenderTargets { + SVGA3dDepthStencilViewId depthStencilViewId; + +} SVGA3dCmdDXSetRenderTargets; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetBlendState { + SVGA3dBlendStateId blendId; + float blendFactor[4]; + uint32 sampleMask; +} SVGA3dCmdDXSetBlendState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetDepthStencilState { + SVGA3dDepthStencilStateId depthStencilId; + uint32 stencilRef; +} SVGA3dCmdDXSetDepthStencilState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetRasterizerState { + SVGA3dRasterizerStateId rasterizerId; +} SVGA3dCmdDXSetRasterizerState; +#pragma pack(pop) #define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0) typedef uint32 SVGA3dDXQueryFlags; -/* - * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device - * to track query state transitions, but are not intended to be used by the - * driver. - */ -#define SVGADX_QDSTATE_INVALID ((uint8)-1) /* Query has no state */ -#define SVGADX_QDSTATE_MIN 0 -#define SVGADX_QDSTATE_IDLE 0 /* Query hasn't started yet */ -#define SVGADX_QDSTATE_ACTIVE 1 /* Query is actively gathering data */ -#define SVGADX_QDSTATE_PENDING 2 /* Query is waiting for results */ -#define SVGADX_QDSTATE_FINISHED 3 /* Query has completed */ -#define SVGADX_QDSTATE_MAX 4 +#define SVGADX_QDSTATE_INVALID ((uint8)-1) +#define SVGADX_QDSTATE_MIN 0 +#define SVGADX_QDSTATE_IDLE 0 +#define SVGADX_QDSTATE_ACTIVE 1 +#define SVGADX_QDSTATE_PENDING 2 +#define SVGADX_QDSTATE_FINISHED 3 +#define SVGADX_QDSTATE_MAX 4 typedef uint8 SVGADXQueryDeviceState; -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dQueryTypeUint8 type; - uint16 pad0; - SVGADXQueryDeviceState state; - SVGA3dDXQueryFlags flags; - SVGAMobId mobid; - uint32 offset; -} -#include "vmware_pack_end.h" -SVGACOTableDXQueryEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineQuery { - SVGA3dQueryId queryId; - SVGA3dQueryType type; - SVGA3dDXQueryFlags flags; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyQuery { - SVGA3dQueryId queryId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBindQuery { - SVGA3dQueryId queryId; - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetQueryOffset { - SVGA3dQueryId queryId; - uint32 mobOffset; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBeginQuery { - SVGA3dQueryId queryId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXEndQuery { - SVGA3dQueryId queryId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXReadbackQuery { - SVGA3dQueryId queryId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXMoveQuery { - SVGA3dQueryId queryId; - SVGAMobId mobid; - uint32 mobOffset; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBindAllQuery { - uint32 cid; - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXReadbackAllQuery { - uint32 cid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetPredication { - SVGA3dQueryId queryId; - uint32 predicateValue; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */ - -typedef -#include "vmware_pack_begin.h" -struct MKS3dDXSOState { - uint32 offset; /* Starting offset */ - uint32 intOffset; /* Internal offset */ - uint32 vertexCount; /* vertices written */ - uint32 dead; -} -#include "vmware_pack_end.h" -SVGA3dDXSOState; - -/* Set the offset field to this value to append SO values to the buffer */ -#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u) - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dSoTarget { - SVGA3dSurfaceId sid; - uint32 offset; - uint32 sizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dSoTarget; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetSOTargets { - uint32 pad0; - /* Followed by a variable number of SVGA3dSOTarget's. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dViewport -{ - float x; - float y; - float width; - float height; - float minDepth; - float maxDepth; -} -#include "vmware_pack_end.h" -SVGA3dViewport; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetViewports { - uint32 pad0; - /* Followed by a variable number of SVGA3dViewport's. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */ - -#define SVGA3D_DX_MAX_VIEWPORTS 16 - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetScissorRects { - uint32 pad0; - /* Followed by a variable number of SVGASignedRect's. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */ - -#define SVGA3D_DX_MAX_SCISSORRECTS 16 - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXClearRenderTargetView { - SVGA3dRenderTargetViewId renderTargetViewId; - SVGA3dRGBAFloat rgba; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXClearDepthStencilView { - uint16 flags; - uint16 stencil; - SVGA3dDepthStencilViewId depthStencilViewId; - float depth; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXPredCopyRegion { - SVGA3dSurfaceId dstSid; - uint32 dstSubResource; - SVGA3dSurfaceId srcSid; - uint32 srcSubResource; - SVGA3dCopyBox box; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXPredCopyRegion; -/* SVGA_3D_CMD_DX_PRED_COPY_REGION */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXPredCopy { - SVGA3dSurfaceId dstSid; - SVGA3dSurfaceId srcSid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXPredConvertRegion { - SVGA3dSurfaceId dstSid; - uint32 dstSubResource; - SVGA3dBox destBox; - SVGA3dSurfaceId srcSid; - uint32 srcSubResource; - SVGA3dBox srcBox; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXPredConvertRegion; /* SVGA_3D_CMD_DX_PRED_CONVERT_REGION */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXPredConvert { - SVGA3dSurfaceId dstSid; - SVGA3dSurfaceId srcSid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXPredConvert; /* SVGA_3D_CMD_DX_PRED_CONVERT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBufferCopy { - SVGA3dSurfaceId dest; - SVGA3dSurfaceId src; - uint32 destX; - uint32 srcX; - uint32 width; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBufferCopy; -/* SVGA_3D_CMD_DX_BUFFER_COPY */ - -/* - * Perform a surface copy between a multisample, and a non-multisampled - * surface. - */ -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceId dstSid; - uint32 dstSubResource; - SVGA3dSurfaceId srcSid; - uint32 srcSubResource; - SVGA3dSurfaceFormat copyFormat; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXResolveCopy; /* SVGA_3D_CMD_DX_RESOLVE_COPY */ - -/* - * Perform a predicated surface copy between a multisample, and a - * non-multisampled surface. - */ -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceId dstSid; - uint32 dstSubResource; - SVGA3dSurfaceId srcSid; - uint32 srcSubResource; - SVGA3dSurfaceFormat copyFormat; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXPredResolveCopy; /* SVGA_3D_CMD_DX_PRED_RESOLVE_COPY */ +#pragma pack(push, 1) +typedef struct { + SVGA3dQueryTypeUint8 type; + uint16 pad0; + SVGADXQueryDeviceState state; + SVGA3dDXQueryFlags flags; + SVGAMobId mobid; + uint32 offset; +} SVGACOTableDXQueryEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineQuery { + SVGA3dQueryId queryId; + SVGA3dQueryType type; + SVGA3dDXQueryFlags flags; +} SVGA3dCmdDXDefineQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyQuery { + SVGA3dQueryId queryId; +} SVGA3dCmdDXDestroyQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBindQuery { + SVGA3dQueryId queryId; + SVGAMobId mobid; +} SVGA3dCmdDXBindQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetQueryOffset { + SVGA3dQueryId queryId; + uint32 mobOffset; +} SVGA3dCmdDXSetQueryOffset; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBeginQuery { + SVGA3dQueryId queryId; +} SVGA3dCmdDXBeginQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXEndQuery { + SVGA3dQueryId queryId; +} SVGA3dCmdDXEndQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXReadbackQuery { + SVGA3dQueryId queryId; +} SVGA3dCmdDXReadbackQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXMoveQuery { + SVGA3dQueryId queryId; + SVGAMobId mobid; + uint32 mobOffset; +} SVGA3dCmdDXMoveQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBindAllQuery { + uint32 cid; + SVGAMobId mobid; +} SVGA3dCmdDXBindAllQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXReadbackAllQuery { + uint32 cid; +} SVGA3dCmdDXReadbackAllQuery; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetPredication { + SVGA3dQueryId queryId; + uint32 predicateValue; +} SVGA3dCmdDXSetPredication; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct MKS3dDXSOState { + uint32 offset; + uint32 intOffset; + uint32 vertexCount; + uint32 dead; +} SVGA3dDXSOState; +#pragma pack(pop) + +#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32)~0u) + +#pragma pack(push, 1) +typedef struct SVGA3dSoTarget { + SVGA3dSurfaceId sid; + uint32 offset; + uint32 sizeInBytes; +} SVGA3dSoTarget; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetSOTargets { + uint32 pad0; + +} SVGA3dCmdDXSetSOTargets; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dViewport { + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} SVGA3dViewport; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetViewports { + uint32 pad0; + +} SVGA3dCmdDXSetViewports; +#pragma pack(pop) + +#define SVGA3D_DX_MAX_VIEWPORTS 16 + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetScissorRects { + uint32 pad0; + +} SVGA3dCmdDXSetScissorRects; +#pragma pack(pop) + +#define SVGA3D_DX_MAX_SCISSORRECTS 16 + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXClearRenderTargetView { + SVGA3dRenderTargetViewId renderTargetViewId; + SVGA3dRGBAFloat rgba; +} SVGA3dCmdDXClearRenderTargetView; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXClearDepthStencilView { + uint16 flags; + uint16 stencil; + SVGA3dDepthStencilViewId depthStencilViewId; + float depth; +} SVGA3dCmdDXClearDepthStencilView; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPredCopyRegion { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dCopyBox box; +} SVGA3dCmdDXPredCopyRegion; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPredStagingCopyRegion { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dCopyBox box; + uint8 readback; + uint8 unsynchronized; + uint8 mustBeZero[2]; +} SVGA3dCmdDXPredStagingCopyRegion; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPredCopy { + SVGA3dSurfaceId dstSid; + SVGA3dSurfaceId srcSid; +} SVGA3dCmdDXPredCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPredConvertRegion { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dBox destBox; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dBox srcBox; +} SVGA3dCmdDXPredConvertRegion; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPredStagingConvertRegion { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dBox destBox; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dBox srcBox; + uint8 readback; + uint8 unsynchronized; + uint8 mustBeZero[2]; +} SVGA3dCmdDXPredStagingConvertRegion; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPredConvert { + SVGA3dSurfaceId dstSid; + SVGA3dSurfaceId srcSid; +} SVGA3dCmdDXPredConvert; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPredStagingConvert { + SVGA3dSurfaceId dstSid; + SVGA3dSurfaceId srcSid; + uint8 readback; + uint8 unsynchronized; + uint8 mustBeZero[2]; +} SVGA3dCmdDXPredStagingConvert; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBufferCopy { + SVGA3dSurfaceId dest; + SVGA3dSurfaceId src; + uint32 destX; + uint32 srcX; + uint32 width; +} SVGA3dCmdDXBufferCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXStagingBufferCopy { + SVGA3dSurfaceId dest; + SVGA3dSurfaceId src; + uint32 destX; + uint32 srcX; + uint32 width; + uint8 readback; + uint8 unsynchronized; + uint8 mustBeZero[2]; +} SVGA3dCmdDXStagingBufferCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dSurfaceFormat copyFormat; +} SVGA3dCmdDXResolveCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceId dstSid; + uint32 dstSubResource; + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dSurfaceFormat copyFormat; +} SVGA3dCmdDXPredResolveCopy; +#pragma pack(pop) typedef uint32 SVGA3dDXPresentBltMode; -#define SVGADX_PRESENTBLT_LINEAR (1 << 0) -#define SVGADX_PRESENTBLT_FORCE_SRC_SRGB (1 << 1) +#define SVGADX_PRESENTBLT_LINEAR (1 << 0) +#define SVGADX_PRESENTBLT_FORCE_SRC_SRGB (1 << 1) #define SVGADX_PRESENTBLT_FORCE_SRC_XRBIAS (1 << 2) -#define SVGADX_PRESENTBLT_MODE_MAX (1 << 3) - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXPresentBlt { - SVGA3dSurfaceId srcSid; - uint32 srcSubResource; - SVGA3dSurfaceId dstSid; - uint32 destSubResource; - SVGA3dBox boxSrc; - SVGA3dBox boxDest; - SVGA3dDXPresentBltMode mode; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXPresentBlt; /* SVGA_3D_CMD_DX_PRESENTBLT*/ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXGenMips { - SVGA3dShaderResourceViewId shaderResourceViewId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */ - -/* - * Update a sub-resource in a guest-backed resource. - * (Inform the device that the guest-contents have been updated.) - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXUpdateSubResource { - SVGA3dSurfaceId sid; - uint32 subResource; - SVGA3dBox box; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXUpdateSubResource; /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */ - -/* - * Readback a subresource in a guest-backed resource. - * (Request the device to flush the dirty contents into the guest.) - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXReadbackSubResource { - SVGA3dSurfaceId sid; - uint32 subResource; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXReadbackSubResource; /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */ - -/* - * Invalidate an image in a guest-backed surface. - * (Notify the device that the contents can be lost.) - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXInvalidateSubResource { - SVGA3dSurfaceId sid; - uint32 subResource; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */ - - -/* - * Raw byte wise transfer from a buffer surface into another surface - * of the requested box. Supported if 3d is enabled and SVGA_CAP_DX - * is set. This command does not take a context. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXTransferFromBuffer { - SVGA3dSurfaceId srcSid; - uint32 srcOffset; - uint32 srcPitch; - uint32 srcSlicePitch; - SVGA3dSurfaceId destSid; - uint32 destSubResource; - SVGA3dBox destBox; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXTransferFromBuffer; /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */ - - -#define SVGA3D_TRANSFER_TO_BUFFER_READBACK (1 << 0) +#define SVGADX_PRESENTBLT_MODE_MAX (1 << 3) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPresentBlt { + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dSurfaceId dstSid; + uint32 destSubResource; + SVGA3dBox boxSrc; + SVGA3dBox boxDest; + SVGA3dDXPresentBltMode mode; +} SVGA3dCmdDXPresentBlt; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXGenMips { + SVGA3dShaderResourceViewId shaderResourceViewId; +} SVGA3dCmdDXGenMips; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXUpdateSubResource { + SVGA3dSurfaceId sid; + uint32 subResource; + SVGA3dBox box; +} SVGA3dCmdDXUpdateSubResource; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXReadbackSubResource { + SVGA3dSurfaceId sid; + uint32 subResource; +} SVGA3dCmdDXReadbackSubResource; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXInvalidateSubResource { + SVGA3dSurfaceId sid; + uint32 subResource; +} SVGA3dCmdDXInvalidateSubResource; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXTransferFromBuffer { + SVGA3dSurfaceId srcSid; + uint32 srcOffset; + uint32 srcPitch; + uint32 srcSlicePitch; + SVGA3dSurfaceId destSid; + uint32 destSubResource; + SVGA3dBox destBox; +} SVGA3dCmdDXTransferFromBuffer; +#pragma pack(pop) + +#define SVGA3D_TRANSFER_TO_BUFFER_READBACK (1 << 0) #define SVGA3D_TRANSFER_TO_BUFFER_FLAGS_MASK (1 << 0) typedef uint32 SVGA3dTransferToBufferFlags; -/* - * Raw byte wise transfer to a buffer surface from another surface - * of the requested box. Supported if SVGA_CAP_DX2 is set. This - * command does not take a context. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXTransferToBuffer { - SVGA3dSurfaceId srcSid; - uint32 srcSubResource; - SVGA3dBox srcBox; - SVGA3dSurfaceId destSid; - uint32 destOffset; - uint32 destPitch; - uint32 destSlicePitch; - SVGA3dTransferToBufferFlags flags; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXTransferToBuffer; /* SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER */ - +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXTransferToBuffer { + SVGA3dSurfaceId srcSid; + uint32 srcSubResource; + SVGA3dBox srcBox; + SVGA3dSurfaceId destSid; + uint32 destOffset; + uint32 destPitch; + uint32 destSlicePitch; + SVGA3dTransferToBufferFlags flags; +} SVGA3dCmdDXTransferToBuffer; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPredTransferFromBuffer { + SVGA3dSurfaceId srcSid; + uint32 srcOffset; + uint32 srcPitch; + uint32 srcSlicePitch; + SVGA3dSurfaceId destSid; + uint32 destSubResource; + SVGA3dBox destBox; +} SVGA3dCmdDXPredTransferFromBuffer; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSurfaceCopyAndReadback { + SVGA3dSurfaceId srcSid; + SVGA3dSurfaceId destSid; + SVGA3dCopyBox box; +} SVGA3dCmdDXSurfaceCopyAndReadback; +#pragma pack(pop) -/* - * Raw byte wise transfer from a buffer surface into another surface - * of the requested box. Supported if SVGA3D_DEVCAP_DXCONTEXT is set. - * The context is implied from the command buffer header. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXPredTransferFromBuffer { - SVGA3dSurfaceId srcSid; - uint32 srcOffset; - uint32 srcPitch; - uint32 srcSlicePitch; - SVGA3dSurfaceId destSid; - uint32 destSubResource; - SVGA3dBox destBox; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXPredTransferFromBuffer; -/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */ - - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSurfaceCopyAndReadback { - SVGA3dSurfaceId srcSid; - SVGA3dSurfaceId destSid; - SVGA3dCopyBox box; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSurfaceCopyAndReadback; -/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */ - -/* - * SVGA_DX_HINT_NONE: Does nothing. - * - * SVGA_DX_HINT_PREFETCH_OBJECT: - * SVGA_DX_HINT_PREEVICT_OBJECT: - * Consumes a SVGAObjectRef, and hints that the host should consider - * fetching/evicting the specified object. - * - * An id of SVGA3D_INVALID_ID can be used if the guest isn't sure - * what object was affected. (For instance, if the guest knows that - * it is about to evict a DXShader, but doesn't know precisely which one, - * the device can still use this to help limit it's search, or track - * how many page-outs have happened.) - * - * SVGA_DX_HINT_PREFETCH_COBJECT: - * SVGA_DX_HINT_PREEVICT_COBJECT: - * Same as the above, except they consume an SVGACObjectRef. - */ typedef uint32 SVGADXHintId; -#define SVGA_DX_HINT_NONE 0 -#define SVGA_DX_HINT_PREFETCH_OBJECT 1 -#define SVGA_DX_HINT_PREEVICT_OBJECT 2 -#define SVGA_DX_HINT_PREFETCH_COBJECT 3 -#define SVGA_DX_HINT_PREEVICT_COBJECT 4 -#define SVGA_DX_HINT_MAX 5 - -typedef -#include "vmware_pack_begin.h" -struct SVGAObjectRef { - SVGAOTableType type; - uint32 id; -} -#include "vmware_pack_end.h" -SVGAObjectRef; - -typedef -#include "vmware_pack_begin.h" -struct SVGACObjectRef { - SVGACOTableType type; - uint32 cid; - uint32 id; -} -#include "vmware_pack_end.h" -SVGACObjectRef; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXHint { - SVGADXHintId hintId; - - /* - * Followed by variable sized data depending on the hintId. - */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXHint; -/* SVGA_3D_CMD_DX_HINT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBufferUpdate { - SVGA3dSurfaceId sid; - uint32 x; - uint32 width; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBufferUpdate; -/* SVGA_3D_CMD_DX_BUFFER_UPDATE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetConstantBufferOffset { - uint32 slot; - uint32 offsetInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetConstantBufferOffset; +#define SVGA_DX_HINT_NONE 0 +#define SVGA_DX_HINT_PREFETCH_OBJECT 1 +#define SVGA_DX_HINT_PREEVICT_OBJECT 2 +#define SVGA_DX_HINT_PREFETCH_COBJECT 3 +#define SVGA_DX_HINT_PREEVICT_COBJECT 4 +#define SVGA_DX_HINT_MAX 5 + +#pragma pack(push, 1) +typedef struct SVGAObjectRef { + SVGAOTableType type; + uint32 id; +} SVGAObjectRef; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGACObjectRef { + SVGACOTableType type; + uint32 cid; + uint32 id; +} SVGACObjectRef; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXHint { + SVGADXHintId hintId; + +} SVGA3dCmdDXHint; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBufferUpdate { + SVGA3dSurfaceId sid; + uint32 x; + uint32 width; +} SVGA3dCmdDXBufferUpdate; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetConstantBufferOffset { + uint32 slot; + uint32 offsetInBytes; +} SVGA3dCmdDXSetConstantBufferOffset; +#pragma pack(pop) typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetVSConstantBufferOffset; -/* SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET */ typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset; -/* SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET */ typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset; -/* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */ typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetHSConstantBufferOffset; -/* SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET */ typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetDSConstantBufferOffset; -/* SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET */ typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetCSConstantBufferOffset; -/* SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET */ - -#define SVGA3D_BUFFEREX_SRV_RAW (1 << 0) -#define SVGA3D_BUFFEREX_SRV_FLAGS_MAX (1 << 1) +#define SVGA3D_BUFFEREX_SRV_RAW (1 << 0) +#define SVGA3D_BUFFEREX_SRV_FLAGS_MAX (1 << 1) #define SVGA3D_BUFFEREX_SRV_FLAGS_MASK (SVGA3D_BUFFEREX_SRV_FLAGS_MAX - 1) typedef uint32 SVGA3dBufferExFlags; -typedef -#include "vmware_pack_begin.h" -struct { - union { - struct { - uint32 firstElement; - uint32 numElements; - uint32 pad0; - uint32 pad1; - } buffer; - struct { - uint32 mostDetailedMip; - uint32 firstArraySlice; - uint32 mipLevels; - uint32 arraySize; - } tex; /* 1d, 2d, 3d, cube */ - struct { - uint32 firstElement; - uint32 numElements; - SVGA3dBufferExFlags flags; - uint32 pad0; - } bufferex; - }; -} -#include "vmware_pack_end.h" -SVGA3dShaderResourceViewDesc; - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - SVGA3dResourceType resourceDimension; - SVGA3dShaderResourceViewDesc desc; - uint32 pad; -} -#include "vmware_pack_end.h" -SVGACOTableDXSRViewEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineShaderResourceView { - SVGA3dShaderResourceViewId shaderResourceViewId; - - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - SVGA3dResourceType resourceDimension; - - SVGA3dShaderResourceViewDesc desc; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineShaderResourceView; -/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyShaderResourceView { - SVGA3dShaderResourceViewId shaderResourceViewId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyShaderResourceView; -/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dRenderTargetViewDesc { - union { - struct { - uint32 firstElement; - uint32 numElements; - uint32 padding0; - } buffer; - struct { - uint32 mipSlice; - uint32 firstArraySlice; - uint32 arraySize; - } tex; /* 1d, 2d, cube */ - struct { - uint32 mipSlice; - uint32 firstW; - uint32 wSize; - } tex3D; - }; -} -#include "vmware_pack_end.h" -SVGA3dRenderTargetViewDesc; - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - SVGA3dResourceType resourceDimension; - SVGA3dRenderTargetViewDesc desc; - uint32 pad[2]; -} -#include "vmware_pack_end.h" -SVGACOTableDXRTViewEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineRenderTargetView { - SVGA3dRenderTargetViewId renderTargetViewId; - - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - SVGA3dResourceType resourceDimension; - - SVGA3dRenderTargetViewDesc desc; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineRenderTargetView; -/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyRenderTargetView { - SVGA3dRenderTargetViewId renderTargetViewId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyRenderTargetView; -/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */ - -/* - */ -#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH 0x01 +#pragma pack(push, 1) +typedef struct { + union { + struct { + uint32 firstElement; + uint32 numElements; + uint32 pad0; + uint32 pad1; + } buffer; + struct { + uint32 mostDetailedMip; + uint32 firstArraySlice; + uint32 mipLevels; + uint32 arraySize; + } tex; + struct { + uint32 firstElement; + uint32 numElements; + SVGA3dBufferExFlags flags; + uint32 pad0; + } bufferex; + }; +} SVGA3dShaderResourceViewDesc; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + SVGA3dShaderResourceViewDesc desc; + uint32 pad; +} SVGACOTableDXSRViewEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineShaderResourceView { + SVGA3dShaderResourceViewId shaderResourceViewId; + + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + + SVGA3dShaderResourceViewDesc desc; +} SVGA3dCmdDXDefineShaderResourceView; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyShaderResourceView { + SVGA3dShaderResourceViewId shaderResourceViewId; +} SVGA3dCmdDXDestroyShaderResourceView; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dRenderTargetViewDesc { + union { + struct { + uint32 firstElement; + uint32 numElements; + uint32 padding0; + } buffer; + struct { + uint32 mipSlice; + uint32 firstArraySlice; + uint32 arraySize; + } tex; + struct { + uint32 mipSlice; + uint32 firstW; + uint32 wSize; + } tex3D; + }; +} SVGA3dRenderTargetViewDesc; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + SVGA3dRenderTargetViewDesc desc; + uint32 pad[2]; +} SVGACOTableDXRTViewEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineRenderTargetView { + SVGA3dRenderTargetViewId renderTargetViewId; + + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + + SVGA3dRenderTargetViewDesc desc; +} SVGA3dCmdDXDefineRenderTargetView; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyRenderTargetView { + SVGA3dRenderTargetViewId renderTargetViewId; +} SVGA3dCmdDXDestroyRenderTargetView; +#pragma pack(pop) + +#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH 0x01 #define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02 -#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK 0x03 +#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK 0x03 typedef uint8 SVGA3DCreateDSViewFlags; -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - SVGA3dResourceType resourceDimension; - uint32 mipSlice; - uint32 firstArraySlice; - uint32 arraySize; - SVGA3DCreateDSViewFlags flags; - uint8 pad0; - uint16 pad1; - uint32 pad2; -} -#include "vmware_pack_end.h" -SVGACOTableDXDSViewEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineDepthStencilView { - SVGA3dDepthStencilViewId depthStencilViewId; - - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - SVGA3dResourceType resourceDimension; - uint32 mipSlice; - uint32 firstArraySlice; - uint32 arraySize; - SVGA3DCreateDSViewFlags flags; - uint8 pad0; - uint16 pad1; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineDepthStencilView; -/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */ - -/* - * Version 2 needed in order to start validating and using the flags - * field. Unfortunately the device wasn't validating or using the - * flags field and the driver wasn't initializing it in shipped code, - * so a new version of the command is needed to allow that code to - * continue to work. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineDepthStencilView_v2 { - SVGA3dDepthStencilViewId depthStencilViewId; - - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - SVGA3dResourceType resourceDimension; - uint32 mipSlice; - uint32 firstArraySlice; - uint32 arraySize; - SVGA3DCreateDSViewFlags flags; - uint8 pad0; - uint16 pad1; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineDepthStencilView_v2; -/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2 */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyDepthStencilView { - SVGA3dDepthStencilViewId depthStencilViewId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyDepthStencilView; -/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */ - - -#define SVGA3D_UABUFFER_RAW (1 << 0) -#define SVGA3D_UABUFFER_APPEND (1 << 1) +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + uint32 mipSlice; + uint32 firstArraySlice; + uint32 arraySize; + SVGA3DCreateDSViewFlags flags; + uint8 pad0; + uint16 pad1; + uint32 pad2; +} SVGACOTableDXDSViewEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineDepthStencilView { + SVGA3dDepthStencilViewId depthStencilViewId; + + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + uint32 mipSlice; + uint32 firstArraySlice; + uint32 arraySize; + SVGA3DCreateDSViewFlags flags; + uint8 pad0; + uint16 pad1; +} SVGA3dCmdDXDefineDepthStencilView; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineDepthStencilView_v2 { + SVGA3dDepthStencilViewId depthStencilViewId; + + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + uint32 mipSlice; + uint32 firstArraySlice; + uint32 arraySize; + SVGA3DCreateDSViewFlags flags; + uint8 pad0; + uint16 pad1; +} SVGA3dCmdDXDefineDepthStencilView_v2; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyDepthStencilView { + SVGA3dDepthStencilViewId depthStencilViewId; +} SVGA3dCmdDXDestroyDepthStencilView; +#pragma pack(pop) + +#define SVGA3D_UABUFFER_RAW (1 << 0) +#define SVGA3D_UABUFFER_APPEND (1 << 1) #define SVGA3D_UABUFFER_COUNTER (1 << 2) typedef uint32 SVGA3dUABufferFlags; -typedef -#include "vmware_pack_begin.h" -struct { - union { - struct { - uint32 firstElement; - uint32 numElements; - SVGA3dUABufferFlags flags; - uint32 padding0; - uint32 padding1; - } buffer; - struct { - uint32 mipSlice; - uint32 firstArraySlice; - uint32 arraySize; - uint32 padding0; - uint32 padding1; - } tex; /* 1d, 2d */ - struct { - uint32 mipSlice; - uint32 firstW; - uint32 wSize; - uint32 padding0; - uint32 padding1; - } tex3D; - }; -} -#include "vmware_pack_end.h" -SVGA3dUAViewDesc; - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - SVGA3dResourceType resourceDimension; - SVGA3dUAViewDesc desc; - uint32 structureCount; - uint32 pad[7]; -} -#include "vmware_pack_end.h" -SVGACOTableDXUAViewEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineUAView { - SVGA3dUAViewId uaViewId; - - SVGA3dSurfaceId sid; - SVGA3dSurfaceFormat format; - SVGA3dResourceType resourceDimension; - - SVGA3dUAViewDesc desc; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineUAView; -/* SVGA_3D_CMD_DX_DEFINE_UA_VIEW */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyUAView { - SVGA3dUAViewId uaViewId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyUAView; -/* SVGA_3D_CMD_DX_DESTROY_UA_VIEW */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXClearUAViewUint { - SVGA3dUAViewId uaViewId; - SVGA3dRGBAUint32 value; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXClearUAViewUint; -/* SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXClearUAViewFloat { - SVGA3dUAViewId uaViewId; - SVGA3dRGBAFloat value; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXClearUAViewFloat; -/* SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXCopyStructureCount { - SVGA3dUAViewId srcUAViewId; - SVGA3dSurfaceId destSid; - uint32 destByteOffset; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXCopyStructureCount; -/* SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetStructureCount { - SVGA3dUAViewId uaViewId; - uint32 structureCount; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetStructureCount; -/* SVGA_3D_CMD_DX_SET_STRUCTURE_COUNT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetUAViews { - uint32 uavSpliceIndex; - /* Followed by a variable number of SVGA3dUAViewId's. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetUAViews; /* SVGA_3D_CMD_DX_SET_UA_VIEWS */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetCSUAViews { - uint32 startIndex; - /* Followed by a variable number of SVGA3dUAViewId's. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetCSUAViews; /* SVGA_3D_CMD_DX_SET_CS_UA_VIEWS */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dInputElementDesc { - uint32 inputSlot; - uint32 alignedByteOffset; - SVGA3dSurfaceFormat format; - SVGA3dInputClassification inputSlotClass; - uint32 instanceDataStepRate; - uint32 inputRegister; -} -#include "vmware_pack_end.h" -SVGA3dInputElementDesc; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 elid; - uint32 numDescs; - SVGA3dInputElementDesc descs[32]; - uint32 pad[62]; -} -#include "vmware_pack_end.h" -SVGACOTableDXElementLayoutEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineElementLayout { - SVGA3dElementLayoutId elementLayoutId; - /* Followed by a variable number of SVGA3dInputElementDesc's. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineElementLayout; -/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyElementLayout { - SVGA3dElementLayoutId elementLayoutId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyElementLayout; -/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */ - +#pragma pack(push, 1) +typedef struct { + union { + struct { + uint32 firstElement; + uint32 numElements; + SVGA3dUABufferFlags flags; + uint32 padding0; + uint32 padding1; + } buffer; + struct { + uint32 mipSlice; + uint32 firstArraySlice; + uint32 arraySize; + uint32 padding0; + uint32 padding1; + } tex; + struct { + uint32 mipSlice; + uint32 firstW; + uint32 wSize; + uint32 padding0; + uint32 padding1; + } tex3D; + }; +} SVGA3dUAViewDesc; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + SVGA3dUAViewDesc desc; + uint32 structureCount; + uint32 pad[7]; +} SVGACOTableDXUAViewEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineUAView { + SVGA3dUAViewId uaViewId; + + SVGA3dSurfaceId sid; + SVGA3dSurfaceFormat format; + SVGA3dResourceType resourceDimension; + + SVGA3dUAViewDesc desc; +} SVGA3dCmdDXDefineUAView; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyUAView { + SVGA3dUAViewId uaViewId; +} SVGA3dCmdDXDestroyUAView; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXClearUAViewUint { + SVGA3dUAViewId uaViewId; + SVGA3dRGBAUint32 value; +} SVGA3dCmdDXClearUAViewUint; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXClearUAViewFloat { + SVGA3dUAViewId uaViewId; + SVGA3dRGBAFloat value; +} SVGA3dCmdDXClearUAViewFloat; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXCopyStructureCount { + SVGA3dUAViewId srcUAViewId; + SVGA3dSurfaceId destSid; + uint32 destByteOffset; +} SVGA3dCmdDXCopyStructureCount; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetStructureCount { + SVGA3dUAViewId uaViewId; + uint32 structureCount; +} SVGA3dCmdDXSetStructureCount; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetUAViews { + uint32 uavSpliceIndex; + +} SVGA3dCmdDXSetUAViews; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetCSUAViews { + uint32 startIndex; + +} SVGA3dCmdDXSetCSUAViews; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dInputElementDesc { + uint32 inputSlot; + uint32 alignedByteOffset; + SVGA3dSurfaceFormat format; + SVGA3dInputClassification inputSlotClass; + uint32 instanceDataStepRate; + uint32 inputRegister; +} SVGA3dInputElementDesc; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 elid; + uint32 numDescs; + SVGA3dInputElementDesc descs[32]; + uint32 pad[62]; +} SVGACOTableDXElementLayoutEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineElementLayout { + SVGA3dElementLayoutId elementLayoutId; + +} SVGA3dCmdDXDefineElementLayout; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyElementLayout { + SVGA3dElementLayoutId elementLayoutId; +} SVGA3dCmdDXDestroyElementLayout; +#pragma pack(pop) #define SVGA3D_DX_MAX_RENDER_TARGETS 8 -typedef -#include "vmware_pack_begin.h" -struct SVGA3dDXBlendStatePerRT { - uint8 blendEnable; - uint8 srcBlend; - uint8 destBlend; - uint8 blendOp; - uint8 srcBlendAlpha; - uint8 destBlendAlpha; - uint8 blendOpAlpha; - SVGA3dColorWriteEnable renderTargetWriteMask; - uint8 logicOpEnable; - uint8 logicOp; - uint16 pad0; -} -#include "vmware_pack_end.h" -SVGA3dDXBlendStatePerRT; - -typedef -#include "vmware_pack_begin.h" -struct { - uint8 alphaToCoverageEnable; - uint8 independentBlendEnable; - uint16 pad0; - SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS]; - uint32 pad1[7]; -} -#include "vmware_pack_end.h" -SVGACOTableDXBlendStateEntry; - -/* - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineBlendState { - SVGA3dBlendStateId blendId; - uint8 alphaToCoverageEnable; - uint8 independentBlendEnable; - uint16 pad0; - SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS]; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyBlendState { - SVGA3dBlendStateId blendId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint8 depthEnable; - SVGA3dDepthWriteMask depthWriteMask; - SVGA3dComparisonFunc depthFunc; - uint8 stencilEnable; - uint8 frontEnable; - uint8 backEnable; - uint8 stencilReadMask; - uint8 stencilWriteMask; - - uint8 frontStencilFailOp; - uint8 frontStencilDepthFailOp; - uint8 frontStencilPassOp; - SVGA3dComparisonFunc frontStencilFunc; - - uint8 backStencilFailOp; - uint8 backStencilDepthFailOp; - uint8 backStencilPassOp; - SVGA3dComparisonFunc backStencilFunc; -} -#include "vmware_pack_end.h" -SVGACOTableDXDepthStencilEntry; - -/* - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineDepthStencilState { - SVGA3dDepthStencilStateId depthStencilId; - - uint8 depthEnable; - SVGA3dDepthWriteMask depthWriteMask; - SVGA3dComparisonFunc depthFunc; - uint8 stencilEnable; - uint8 frontEnable; - uint8 backEnable; - uint8 stencilReadMask; - uint8 stencilWriteMask; - - uint8 frontStencilFailOp; - uint8 frontStencilDepthFailOp; - uint8 frontStencilPassOp; - SVGA3dComparisonFunc frontStencilFunc; - - uint8 backStencilFailOp; - uint8 backStencilDepthFailOp; - uint8 backStencilPassOp; - SVGA3dComparisonFunc backStencilFunc; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineDepthStencilState; -/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyDepthStencilState { - SVGA3dDepthStencilStateId depthStencilId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyDepthStencilState; -/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint8 fillMode; - SVGA3dCullMode cullMode; - uint8 frontCounterClockwise; - uint8 provokingVertexLast; - int32 depthBias; - float depthBiasClamp; - float slopeScaledDepthBias; - uint8 depthClipEnable; - uint8 scissorEnable; - SVGA3dMultisampleRastEnable multisampleEnable; - uint8 antialiasedLineEnable; - float lineWidth; - uint8 lineStippleEnable; - uint8 lineStippleFactor; - uint16 lineStipplePattern; - uint8 forcedSampleCount; - uint8 mustBeZero[3]; -} -#include "vmware_pack_end.h" -SVGACOTableDXRasterizerStateEntry; - -/* - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineRasterizerState { - SVGA3dRasterizerStateId rasterizerId; - - uint8 fillMode; - SVGA3dCullMode cullMode; - uint8 frontCounterClockwise; - uint8 provokingVertexLast; - int32 depthBias; - float depthBiasClamp; - float slopeScaledDepthBias; - uint8 depthClipEnable; - uint8 scissorEnable; - SVGA3dMultisampleRastEnable multisampleEnable; - uint8 antialiasedLineEnable; - float lineWidth; - uint8 lineStippleEnable; - uint8 lineStippleFactor; - uint16 lineStipplePattern; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineRasterizerState; -/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyRasterizerState { - SVGA3dRasterizerStateId rasterizerId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyRasterizerState; -/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGA3dFilter filter; - uint8 addressU; - uint8 addressV; - uint8 addressW; - uint8 pad0; - float mipLODBias; - uint8 maxAnisotropy; - SVGA3dComparisonFunc comparisonFunc; - uint16 pad1; - SVGA3dRGBAFloat borderColor; - float minLOD; - float maxLOD; - uint32 pad2[6]; -} -#include "vmware_pack_end.h" -SVGACOTableDXSamplerEntry; - -/* - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineSamplerState { - SVGA3dSamplerId samplerId; - SVGA3dFilter filter; - uint8 addressU; - uint8 addressV; - uint8 addressW; - uint8 pad0; - float mipLODBias; - uint8 maxAnisotropy; - SVGA3dComparisonFunc comparisonFunc; - uint16 pad1; - SVGA3dRGBAFloat borderColor; - float minLOD; - float maxLOD; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroySamplerState { - SVGA3dSamplerId samplerId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */ - - -#define SVGADX_SIGNATURE_SEMANTIC_NAME_UNDEFINED 0 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_POSITION 1 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_CLIP_DISTANCE 2 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_CULL_DISTANCE 3 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_RENDER_TARGET_ARRAY_INDEX 4 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_VIEWPORT_ARRAY_INDEX 5 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_VERTEX_ID 6 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_PRIMITIVE_ID 7 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_INSTANCE_ID 8 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_IS_FRONT_FACE 9 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_SAMPLE_INDEX 10 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_0_EDGE_TESSFACTOR 11 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_0_EDGE_TESSFACTOR 12 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_1_EDGE_TESSFACTOR 13 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_1_EDGE_TESSFACTOR 14 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_INSIDE_TESSFACTOR 15 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_INSIDE_TESSFACTOR 16 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_U_EQ_0_EDGE_TESSFACTOR 17 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_V_EQ_0_EDGE_TESSFACTOR 18 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_W_EQ_0_EDGE_TESSFACTOR 19 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_INSIDE_TESSFACTOR 20 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DETAIL_TESSFACTOR 21 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DENSITY_TESSFACTOR 22 -#define SVGADX_SIGNATURE_SEMANTIC_NAME_MAX 23 +#pragma pack(push, 1) +typedef struct SVGA3dDXBlendStatePerRT { + uint8 blendEnable; + uint8 srcBlend; + uint8 destBlend; + uint8 blendOp; + uint8 srcBlendAlpha; + uint8 destBlendAlpha; + uint8 blendOpAlpha; + SVGA3dColorWriteEnable renderTargetWriteMask; + uint8 logicOpEnable; + uint8 logicOp; + uint16 pad0; +} SVGA3dDXBlendStatePerRT; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint8 alphaToCoverageEnable; + uint8 independentBlendEnable; + uint16 pad0; + SVGA3dDXBlendStatePerRT perRT[SVGA3D_DX_MAX_RENDER_TARGETS]; + uint32 pad1[7]; +} SVGACOTableDXBlendStateEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineBlendState { + SVGA3dBlendStateId blendId; + uint8 alphaToCoverageEnable; + uint8 independentBlendEnable; + uint16 pad0; + SVGA3dDXBlendStatePerRT perRT[SVGA3D_DX_MAX_RENDER_TARGETS]; +} SVGA3dCmdDXDefineBlendState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyBlendState { + SVGA3dBlendStateId blendId; +} SVGA3dCmdDXDestroyBlendState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint8 depthEnable; + SVGA3dDepthWriteMask depthWriteMask; + SVGA3dComparisonFunc depthFunc; + uint8 stencilEnable; + uint8 frontEnable; + uint8 backEnable; + uint8 stencilReadMask; + uint8 stencilWriteMask; + + uint8 frontStencilFailOp; + uint8 frontStencilDepthFailOp; + uint8 frontStencilPassOp; + SVGA3dComparisonFunc frontStencilFunc; + + uint8 backStencilFailOp; + uint8 backStencilDepthFailOp; + uint8 backStencilPassOp; + SVGA3dComparisonFunc backStencilFunc; +} SVGACOTableDXDepthStencilEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineDepthStencilState { + SVGA3dDepthStencilStateId depthStencilId; + + uint8 depthEnable; + SVGA3dDepthWriteMask depthWriteMask; + SVGA3dComparisonFunc depthFunc; + uint8 stencilEnable; + uint8 frontEnable; + uint8 backEnable; + uint8 stencilReadMask; + uint8 stencilWriteMask; + + uint8 frontStencilFailOp; + uint8 frontStencilDepthFailOp; + uint8 frontStencilPassOp; + SVGA3dComparisonFunc frontStencilFunc; + + uint8 backStencilFailOp; + uint8 backStencilDepthFailOp; + uint8 backStencilPassOp; + SVGA3dComparisonFunc backStencilFunc; +} SVGA3dCmdDXDefineDepthStencilState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyDepthStencilState { + SVGA3dDepthStencilStateId depthStencilId; +} SVGA3dCmdDXDestroyDepthStencilState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint8 fillMode; + SVGA3dCullMode cullMode; + uint8 frontCounterClockwise; + uint8 provokingVertexLast; + int32 depthBias; + float depthBiasClamp; + float slopeScaledDepthBias; + uint8 depthClipEnable; + uint8 scissorEnable; + SVGA3dMultisampleRastEnable multisampleEnable; + uint8 antialiasedLineEnable; + float lineWidth; + uint8 lineStippleEnable; + uint8 lineStippleFactor; + uint16 lineStipplePattern; + uint8 forcedSampleCount; + uint8 mustBeZero[3]; +} SVGACOTableDXRasterizerStateEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineRasterizerState { + SVGA3dRasterizerStateId rasterizerId; + + uint8 fillMode; + SVGA3dCullMode cullMode; + uint8 frontCounterClockwise; + uint8 provokingVertexLast; + int32 depthBias; + float depthBiasClamp; + float slopeScaledDepthBias; + uint8 depthClipEnable; + uint8 scissorEnable; + SVGA3dMultisampleRastEnable multisampleEnable; + uint8 antialiasedLineEnable; + float lineWidth; + uint8 lineStippleEnable; + uint8 lineStippleFactor; + uint16 lineStipplePattern; +} SVGA3dCmdDXDefineRasterizerState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineRasterizerState_v2 { + SVGA3dRasterizerStateId rasterizerId; + + uint8 fillMode; + SVGA3dCullMode cullMode; + uint8 frontCounterClockwise; + uint8 provokingVertexLast; + int32 depthBias; + float depthBiasClamp; + float slopeScaledDepthBias; + uint8 depthClipEnable; + uint8 scissorEnable; + SVGA3dMultisampleRastEnable multisampleEnable; + uint8 antialiasedLineEnable; + float lineWidth; + uint8 lineStippleEnable; + uint8 lineStippleFactor; + uint16 lineStipplePattern; + uint32 forcedSampleCount; +} SVGA3dCmdDXDefineRasterizerState_v2; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyRasterizerState { + SVGA3dRasterizerStateId rasterizerId; +} SVGA3dCmdDXDestroyRasterizerState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGA3dFilter filter; + uint8 addressU; + uint8 addressV; + uint8 addressW; + uint8 pad0; + float mipLODBias; + uint8 maxAnisotropy; + SVGA3dComparisonFunc comparisonFunc; + uint16 pad1; + SVGA3dRGBAFloat borderColor; + float minLOD; + float maxLOD; + uint32 pad2[6]; +} SVGACOTableDXSamplerEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineSamplerState { + SVGA3dSamplerId samplerId; + SVGA3dFilter filter; + uint8 addressU; + uint8 addressV; + uint8 addressW; + uint8 pad0; + float mipLODBias; + uint8 maxAnisotropy; + SVGA3dComparisonFunc comparisonFunc; + uint16 pad1; + SVGA3dRGBAFloat borderColor; + float minLOD; + float maxLOD; +} SVGA3dCmdDXDefineSamplerState; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroySamplerState { + SVGA3dSamplerId samplerId; +} SVGA3dCmdDXDestroySamplerState; +#pragma pack(pop) + +#define SVGADX_SIGNATURE_SEMANTIC_NAME_UNDEFINED 0 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_POSITION 1 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_CLIP_DISTANCE 2 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_CULL_DISTANCE 3 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_RENDER_TARGET_ARRAY_INDEX 4 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_VIEWPORT_ARRAY_INDEX 5 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_VERTEX_ID 6 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_PRIMITIVE_ID 7 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_INSTANCE_ID 8 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_IS_FRONT_FACE 9 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_SAMPLE_INDEX 10 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_0_EDGE_TESSFACTOR 11 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_0_EDGE_TESSFACTOR 12 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_1_EDGE_TESSFACTOR 13 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_1_EDGE_TESSFACTOR 14 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_INSIDE_TESSFACTOR 15 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_INSIDE_TESSFACTOR 16 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_U_EQ_0_EDGE_TESSFACTOR 17 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_V_EQ_0_EDGE_TESSFACTOR 18 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_W_EQ_0_EDGE_TESSFACTOR 19 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_INSIDE_TESSFACTOR 20 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DETAIL_TESSFACTOR 21 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DENSITY_TESSFACTOR 22 +#define SVGADX_SIGNATURE_SEMANTIC_NAME_MAX 23 typedef uint32 SVGA3dDXSignatureSemanticName; #define SVGADX_SIGNATURE_REGISTER_COMPONENT_UNKNOWN 0 @@ -1670,398 +1394,331 @@ typedef uint32 SVGA3dDXSignatureRegisterComponentType; #define SVGADX_SIGNATURE_MIN_PRECISION_DEFAULT 0 typedef uint32 SVGA3dDXSignatureMinPrecision; -typedef -#include "vmware_pack_begin.h" -struct SVGA3dDXSignatureEntry { - uint32 registerIndex; - SVGA3dDXSignatureSemanticName semanticName; - uint32 mask; /* Lower 4 bits represent X, Y, Z, W channels */ - SVGA3dDXSignatureRegisterComponentType componentType; - SVGA3dDXSignatureMinPrecision minPrecision; -} -#include "vmware_pack_end.h" -SVGA3dDXShaderSignatureEntry; +#pragma pack(push, 1) +typedef struct SVGA3dDXSignatureEntry { + uint32 registerIndex; + SVGA3dDXSignatureSemanticName semanticName; + uint32 mask; + SVGA3dDXSignatureRegisterComponentType componentType; + SVGA3dDXSignatureMinPrecision minPrecision; +} SVGA3dDXShaderSignatureEntry; +#pragma pack(pop) #define SVGADX_SIGNATURE_HEADER_VERSION_0 0x08a92d12 -/* - * The SVGA3dDXSignatureHeader structure is added after the shader - * body in the mob that is bound to the shader. It is followed by the - * specified number of SVGA3dDXSignatureEntry structures for each of - * the three types of signatures in the order (input, output, patch - * constants). - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dDXSignatureHeader { - uint32 headerVersion; - uint32 numInputSignatures; - uint32 numOutputSignatures; - uint32 numPatchConstantSignatures; -} -#include "vmware_pack_end.h" -SVGA3dDXShaderSignatureHeader; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineShader { - SVGA3dShaderId shaderId; - SVGA3dShaderType type; - uint32 sizeInBytes; /* Number of bytes of shader text. */ -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */ - -typedef -#include "vmware_pack_begin.h" -struct SVGACOTableDXShaderEntry { - SVGA3dShaderType type; - uint32 sizeInBytes; - uint32 offsetInBytes; - SVGAMobId mobid; - uint32 pad[4]; -} -#include "vmware_pack_end.h" -SVGACOTableDXShaderEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyShader { - SVGA3dShaderId shaderId; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBindShader { - uint32 cid; - uint32 shid; - SVGAMobId mobid; - uint32 offsetInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBindAllShader { - uint32 cid; - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBindAllShader; /* SVGA_3D_CMD_DX_BIND_ALL_SHADER */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXCondBindAllShader { - uint32 cid; - SVGAMobId testMobid; - SVGAMobId mobid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXCondBindAllShader; /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */ +#pragma pack(push, 1) +typedef struct SVGA3dDXSignatureHeader { + uint32 headerVersion; + uint32 numInputSignatures; + uint32 numOutputSignatures; + uint32 numPatchConstantSignatures; +} SVGA3dDXShaderSignatureHeader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineShader { + SVGA3dShaderId shaderId; + SVGA3dShaderType type; + uint32 sizeInBytes; +} SVGA3dCmdDXDefineShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGACOTableDXShaderEntry { + SVGA3dShaderType type; + uint32 sizeInBytes; + uint32 offsetInBytes; + SVGAMobId mobid; + uint32 pad[4]; +} SVGACOTableDXShaderEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyShader { + SVGA3dShaderId shaderId; +} SVGA3dCmdDXDestroyShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBindShader { + uint32 cid; + uint32 shid; + SVGAMobId mobid; + uint32 offsetInBytes; +} SVGA3dCmdDXBindShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBindAllShader { + uint32 cid; + SVGAMobId mobid; +} SVGA3dCmdDXBindAllShader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXCondBindAllShader { + uint32 cid; + SVGAMobId testMobid; + SVGAMobId mobid; +} SVGA3dCmdDXCondBindAllShader; +#pragma pack(pop) -/* - * The maximum number of streamout decl's in each streamout entry. - */ #define SVGA3D_MAX_DX10_STREAMOUT_DECLS 64 #define SVGA3D_MAX_STREAMOUT_DECLS 512 -typedef -#include "vmware_pack_begin.h" -struct SVGA3dStreamOutputDeclarationEntry { - uint32 outputSlot; - uint32 registerIndex; - uint8 registerMask; - uint8 pad0; - uint16 pad1; - uint32 stream; -} -#include "vmware_pack_end.h" -SVGA3dStreamOutputDeclarationEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGAOTableStreamOutputEntry { - uint32 numOutputStreamEntries; - SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS]; - uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS]; - uint32 rasterizedStream; - uint32 numOutputStreamStrides; - uint32 mobid; - uint32 offsetInBytes; - uint8 usesMob; - uint8 pad0; - uint16 pad1; - uint32 pad2[246]; -} -#include "vmware_pack_end.h" -SVGACOTableDXStreamOutputEntry; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineStreamOutput { - SVGA3dStreamOutputId soid; - uint32 numOutputStreamEntries; - SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS]; - uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS]; - uint32 rasterizedStream; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */ - -/* - * Version 2 needed in order to start validating and using the - * rasterizedStream field. Unfortunately the device wasn't validating - * or using this field and the driver wasn't initializing it in shipped - * code, so a new version of the command is needed to allow that code - * to continue to work. Also added new numOutputStreamStrides field. - */ +#pragma pack(push, 1) +typedef struct SVGA3dStreamOutputDeclarationEntry { + uint32 outputSlot; + uint32 registerIndex; + uint8 registerMask; + uint8 pad0; + uint16 pad1; + uint32 stream; +} SVGA3dStreamOutputDeclarationEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGAOTableStreamOutputEntry { + uint32 numOutputStreamEntries; + SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS]; + uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS]; + uint32 rasterizedStream; + uint32 numOutputStreamStrides; + uint32 mobid; + uint32 offsetInBytes; + uint8 usesMob; + uint8 pad0; + uint16 pad1; + uint32 pad2[246]; +} SVGACOTableDXStreamOutputEntry; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineStreamOutput { + SVGA3dStreamOutputId soid; + uint32 numOutputStreamEntries; + SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS]; + uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS]; + uint32 rasterizedStream; +} SVGA3dCmdDXDefineStreamOutput; +#pragma pack(pop) #define SVGA3D_DX_SO_NO_RASTERIZED_STREAM 0xFFFFFFFF -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDefineStreamOutputWithMob { - SVGA3dStreamOutputId soid; - uint32 numOutputStreamEntries; - uint32 numOutputStreamStrides; - uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS]; - uint32 rasterizedStream; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDefineStreamOutputWithMob; -/* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXBindStreamOutput { - SVGA3dStreamOutputId soid; - uint32 mobid; - uint32 offsetInBytes; - uint32 sizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXBindStreamOutput; /* SVGA_3D_CMD_DX_BIND_STREAMOUTPUT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXDestroyStreamOutput { - SVGA3dStreamOutputId soid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetStreamOutput { - SVGA3dStreamOutputId soid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetMinLOD { - SVGA3dSurfaceId sid; - float minLOD; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetMinLOD; /* SVGA_3D_CMD_DX_SET_MIN_LOD */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint64 value; - uint32 mobId; - uint32 mobOffset; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */ +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDefineStreamOutputWithMob { + SVGA3dStreamOutputId soid; + uint32 numOutputStreamEntries; + uint32 numOutputStreamStrides; + uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS]; + uint32 rasterizedStream; +} SVGA3dCmdDXDefineStreamOutputWithMob; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXBindStreamOutput { + SVGA3dStreamOutputId soid; + uint32 mobid; + uint32 offsetInBytes; + uint32 sizeInBytes; +} SVGA3dCmdDXBindStreamOutput; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXDestroyStreamOutput { + SVGA3dStreamOutputId soid; +} SVGA3dCmdDXDestroyStreamOutput; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetStreamOutput { + SVGA3dStreamOutputId soid; +} SVGA3dCmdDXSetStreamOutput; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetMinLOD { + SVGA3dSurfaceId sid; + float minLOD; +} SVGA3dCmdDXSetMinLOD; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint64 value; + uint32 mobId; + uint32 mobOffset; +} SVGA3dCmdDXMobFence64; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXSetCOTable { + uint32 cid; + uint32 mobid; + SVGACOTableType type; + uint32 validSizeInBytes; +} SVGA3dCmdDXSetCOTable; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXGrowCOTable { + uint32 cid; + uint32 mobid; + SVGACOTableType type; + uint32 validSizeInBytes; +} SVGA3dCmdDXGrowCOTable; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXReadbackCOTable { + uint32 cid; + SVGACOTableType type; +} SVGA3dCmdDXReadbackCOTable; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXCopyCOTableIntoMob { + uint32 cid; + SVGACOTableType type; + uint32 mobid; +} SVGA3dCmdDXCopyCOTableIntoMob; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXPredStagingCopy { + SVGA3dSurfaceId dstSid; + SVGA3dSurfaceId srcSid; + uint8 readback; + uint8 unsynchronized; + uint8 mustBeZero[2]; + +} SVGA3dCmdDXPredStagingCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCmdDXStagingCopy { + SVGA3dSurfaceId dstSid; + SVGA3dSurfaceId srcSid; + uint8 readback; + uint8 unsynchronized; + uint8 mustBeZero[2]; + +} SVGA3dCmdDXStagingCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCOTableData { + uint32 mobid; +} SVGA3dCOTableData; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dBufferBinding { + uint32 bufferId; + uint32 stride; + uint32 offset; +} SVGA3dBufferBinding; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dConstantBufferBinding { + uint32 sid; + uint32 offsetInBytes; + uint32 sizeInBytes; +} SVGA3dConstantBufferBinding; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGADXInputAssemblyMobFormat { + uint32 layoutId; + SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS]; + uint32 indexBufferSid; + uint32 pad; + uint32 indexBufferOffset; + uint32 indexBufferFormat; + uint32 topology; +} SVGADXInputAssemblyMobFormat; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGADXContextMobFormat { + SVGADXInputAssemblyMobFormat inputAssembly; + + struct { + uint32 blendStateId; + uint32 blendFactor[4]; + uint32 sampleMask; + uint32 depthStencilStateId; + uint32 stencilRef; + uint32 rasterizerStateId; + uint32 depthStencilViewId; + uint32 renderTargetViewIds[SVGA3D_DX_MAX_RENDER_TARGETS]; + } renderState; + + uint32 pad0[8]; + + struct { + uint32 targets[SVGA3D_DX_MAX_SOTARGETS]; + uint32 soid; + } streamOut; + + uint32 pad1[10]; + + uint32 uavSpliceIndex; + + uint8 numViewports; + uint8 numScissorRects; + uint16 pad2[1]; + + uint32 pad3[3]; + + SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS]; + uint32 pad4[32]; + + SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS]; + uint32 pad5[64]; + + struct { + uint32 queryID; + uint32 value; + } predication; + + SVGAMobId shaderIfaceMobid; + uint32 shaderIfaceOffset; + struct { + uint32 shaderId; + SVGA3dConstantBufferBinding + constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS]; + uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS]; + uint32 samplers[SVGA3D_DX_MAX_SAMPLERS]; + } shaderState[SVGA3D_NUM_SHADERTYPE]; + uint32 pad6[26]; + + SVGA3dQueryId queryID[SVGA3D_MAX_QUERY]; + + SVGA3dCOTableData cotables[SVGA_COTABLE_MAX]; + + uint32 pad7[64]; + + uint32 uaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS]; + uint32 csuaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS]; + + uint32 pad8[188]; +} SVGADXContextMobFormat; +#pragma pack(pop) -/* - * SVGA3dCmdSetCOTable -- - * - * This command allows the guest to bind a mob to a context-object table. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXSetCOTable { - uint32 cid; - uint32 mobid; - SVGACOTableType type; - uint32 validSizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */ +#define SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED 256 -/* - * Guests using SVGA_3D_CMD_DX_GROW_COTABLE are promising that - * the new COTable contains the same contents as the old one, except possibly - * for some new invalid entries at the end. - * - * If there is an old cotable mob bound, it also has to still be valid. - * - * (Otherwise, guests should use the DXSetCOTableBase command.) - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXGrowCOTable { - uint32 cid; - uint32 mobid; - SVGACOTableType type; - uint32 validSizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXGrowCOTable; /* SVGA_3D_CMD_DX_GROW_COTABLE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXReadbackCOTable { - uint32 cid; - SVGACOTableType type; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */ - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCOTableData { - uint32 mobid; -} -#include "vmware_pack_end.h" -SVGA3dCOTableData; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dBufferBinding { - uint32 bufferId; - uint32 stride; - uint32 offset; -} -#include "vmware_pack_end.h" -SVGA3dBufferBinding; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dConstantBufferBinding { - uint32 sid; - uint32 offsetInBytes; - uint32 sizeInBytes; -} -#include "vmware_pack_end.h" -SVGA3dConstantBufferBinding; - -typedef -#include "vmware_pack_begin.h" -struct SVGADXInputAssemblyMobFormat { - uint32 layoutId; - SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS]; - uint32 indexBufferSid; - uint32 pad; - uint32 indexBufferOffset; - uint32 indexBufferFormat; - uint32 topology; -} -#include "vmware_pack_end.h" -SVGADXInputAssemblyMobFormat; - -typedef -#include "vmware_pack_begin.h" -struct SVGADXContextMobFormat { - SVGADXInputAssemblyMobFormat inputAssembly; - - struct { - uint32 blendStateId; - uint32 blendFactor[4]; - uint32 sampleMask; - uint32 depthStencilStateId; - uint32 stencilRef; - uint32 rasterizerStateId; - uint32 depthStencilViewId; - uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS]; - } renderState; - - uint32 pad0[8]; - - struct { - uint32 targets[SVGA3D_DX_MAX_SOTARGETS]; - uint32 soid; - } streamOut; - - uint32 pad1[10]; - - uint32 uavSpliceIndex; - - uint8 numViewports; - uint8 numScissorRects; - uint16 pad2[1]; - - uint32 pad3[3]; - - SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS]; - uint32 pad4[32]; - - SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS]; - uint32 pad5[64]; - - struct { - uint32 queryID; - uint32 value; - } predication; - - SVGAMobId shaderIfaceMobid; - uint32 shaderIfaceOffset; - struct { - uint32 shaderId; - SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS]; - uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS]; - uint32 samplers[SVGA3D_DX_MAX_SAMPLERS]; - } shaderState[SVGA3D_NUM_SHADERTYPE]; - uint32 pad6[26]; - - SVGA3dQueryId queryID[SVGA3D_MAX_QUERY]; - - SVGA3dCOTableData cotables[SVGA_COTABLE_MAX]; - - uint32 pad7[64]; - - uint32 uaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS]; - uint32 csuaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS]; - - uint32 pad8[188]; -} -#include "vmware_pack_end.h" -SVGADXContextMobFormat; +#pragma pack(push, 1) +typedef struct SVGADXShaderIfaceMobFormat { + struct { + uint32 numClassInstances; + uint32 iface[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED]; + SVGA3dIfaceData data[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED]; + } shaderIfaceState[SVGA3D_NUM_SHADERTYPE]; -/* - * There is conflicting documentation on max class instances (253 vs 256). The - * lower value is the one used throughout the device, but since mob format is - * more involved to increase if needed, conservatively use the higher one here. - */ -#define SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED 256 + uint32 pad0[1018]; +} SVGADXShaderIfaceMobFormat; +#pragma pack(pop) -typedef -#include "vmware_pack_begin.h" -struct SVGADXShaderIfaceMobFormat { - struct { - uint32 numClassInstances; - uint32 iface[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED]; - SVGA3dIfaceData data[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED]; - } shaderIfaceState[SVGA3D_NUM_SHADERTYPE]; - - uint32 pad0[1018]; -} -#include "vmware_pack_end.h" -SVGADXShaderIfaceMobFormat; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCmdDXTempSetContext { - uint32 dxcid; -} -#include "vmware_pack_end.h" -SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */ - -#endif /* _SVGA3D_DX_H_ */ +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h index f4375a41b3aa..35494a728c7a 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2007-2019 VMware, Inc. + * Copyright 2012-2021 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -27,104 +27,59 @@ /* * svga3d_limits.h -- * - * SVGA 3d hardware limits + * SVGA 3d hardware limits */ -#ifndef _SVGA3D_LIMITS_H_ -#define _SVGA3D_LIMITS_H_ -#define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_VMCORE -#include "includeCheck.h" +#ifndef _SVGA3D_LIMITS_H_ +#define _SVGA3D_LIMITS_H_ -#define SVGA3D_NUM_CLIPPLANES 6 -#define SVGA3D_MAX_CONTEXT_IDS 256 -#define SVGA3D_MAX_SURFACE_IDS (32 * 1024) +#define SVGA3D_HB_MAX_CONTEXT_IDS 256 +#define SVGA3D_HB_MAX_SURFACE_IDS (32 * 1024) -/* - * While there are separate bind-points for RenderTargetViews and - * UnorderedAccessViews in a DXContext, there is in fact one shared - * semantic space that the guest-driver can use on any given draw call. - * So there are really only 8 slots that can be spilt up between them, with the - * spliceIndex controlling where the UAV's sit in the collapsed array. - */ -#define SVGA3D_MAX_RENDER_TARGETS 8 -#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS) -#define SVGA3D_MAX_UAVIEWS 8 -#define SVGA3D_DX11_1_MAX_UAVIEWS 64 +#define SVGA3D_DX_MAX_RENDER_TARGETS 8 +#define SVGA3D_DX11_MAX_UAVIEWS 8 +#define SVGA3D_DX11_1_MAX_UAVIEWS 64 +#define SVGA3D_MAX_UAVIEWS (SVGA3D_DX11_1_MAX_UAVIEWS) +#define SVGA3D_DX11_MAX_SIMULTANEOUS_RTUAV (SVGA3D_DX11_MAX_UAVIEWS) +#define SVGA3D_DX11_1_MAX_SIMULTANEOUS_RTUAV (SVGA3D_DX11_1_MAX_UAVIEWS) +#define SVGA3D_MAX_SIMULTANEOUS_RTUAV (SVGA3D_MAX_UAVIEWS) -/* - * Maximum canonical size of a surface in host-backed mode (pre-GBObjects). - */ #define SVGA3D_HB_MAX_SURFACE_SIZE MBYTES_2_BYTES(128) -/* - * Maximum ID a shader can be assigned on a given context. - */ -#define SVGA3D_MAX_SHADERIDS 5000 -/* - * Maximum number of shaders of a given type that can be defined - * (including all contexts). - */ -#define SVGA3D_MAX_SIMULTANEOUS_SHADERS 20000 +#define SVGA3D_MAX_SHADERIDS 5000 -#define SVGA3D_NUM_TEXTURE_UNITS 32 -#define SVGA3D_NUM_LIGHTS 8 +#define SVGA3D_MAX_SIMULTANEOUS_SHADERS 20000 -#define SVGA3D_MAX_VIDEOPROCESSOR_SAMPLERS 32 +#define SVGA3D_NUM_TEXTURE_UNITS 32 +#define SVGA3D_NUM_LIGHTS 8 + +#define SVGA3D_MAX_VIDEOPROCESSOR_SAMPLERS 32 -/* - * Maximum size in dwords of shader text the SVGA device will allow. - * Currently 8 MB. - */ #define SVGA3D_MAX_SHADER_MEMORY_BYTES (8 * 1024 * 1024) -#define SVGA3D_MAX_SHADER_MEMORY (SVGA3D_MAX_SHADER_MEMORY_BYTES / \ - sizeof(uint32)) +#define SVGA3D_MAX_SHADER_MEMORY \ + (SVGA3D_MAX_SHADER_MEMORY_BYTES / sizeof(uint32)) -/* - * The maximum value of threadGroupCount in each dimension - */ #define SVGA3D_MAX_SHADER_THREAD_GROUPS 65535 -#define SVGA3D_MAX_CLIP_PLANES 6 +#define SVGA3D_MAX_CLIP_PLANES 6 -/* - * This is the limit to the number of fixed-function texture - * transforms and texture coordinates we can support. It does *not* - * correspond to the number of texture image units (samplers) we - * support! - */ #define SVGA3D_MAX_TEXTURE_COORDS 8 -/* - * Number of faces in a cubemap. - */ #define SVGA3D_MAX_SURFACE_FACES 6 -/* - * Maximum number of array indexes in a GB surface (with DX enabled). - */ #define SVGA3D_SM4_MAX_SURFACE_ARRAYSIZE 512 #define SVGA3D_SM5_MAX_SURFACE_ARRAYSIZE 2048 #define SVGA3D_MAX_SURFACE_ARRAYSIZE SVGA3D_SM5_MAX_SURFACE_ARRAYSIZE -/* - * The maximum number of vertex arrays we're guaranteed to support in - * SVGA_3D_CMD_DRAWPRIMITIVES. - */ -#define SVGA3D_MAX_VERTEX_ARRAYS 32 +#define SVGA3D_MAX_VERTEX_ARRAYS 32 -/* - * The maximum number of primitive ranges we're guaranteed to support - * in SVGA_3D_CMD_DRAWPRIMITIVES. - */ #define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32 -/* - * The maximum number of samples that can be contained in a surface. - */ #define SVGA3D_MAX_SAMPLES 8 -#endif /* _SVGA3D_LIMITS_H_ */ +#define SVGA3D_MIN_SBX_DATA_SIZE (GBYTES_2_BYTES(1)) +#define SVGA3D_MAX_SBX_DATA_SIZE (GBYTES_2_BYTES(4)) + +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h index bdfc404c91e3..988d8509c472 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** * Copyright 1998-2015 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -27,17 +27,13 @@ /* * svga3d_reg.h -- * - * SVGA 3d hardware definitions + * SVGA 3d hardware definitions */ -#ifndef _SVGA3D_REG_H_ -#define _SVGA3D_REG_H_ -#define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_VMCORE -#include "includeCheck.h" +#ifndef _SVGA3D_REG_H_ +#define _SVGA3D_REG_H_ #include "svga_reg.h" @@ -47,5 +43,4 @@ #include "svga3d_dx.h" #include "svga3d_devcaps.h" - -#endif /* _SVGA3D_REG_H_ */ +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h index 127eaf0a0a58..7d98fc48414e 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h @@ -1,1667 +1,1561 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/************************************************************************** +/********************************************************** + * Copyright 2008-2021 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * - * Copyright 2008-2015 VMware, Inc., Palo Alto, CA., USA + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ + **********************************************************/ /* * svga3d_surfacedefs.h -- * - * Surface definitions and inlineable utilities for SVGA3d. + * Surface definitions for SVGA3d. */ + + #ifndef _SVGA3D_SURFACEDEFS_H_ #define _SVGA3D_SURFACEDEFS_H_ -#define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_MODULE -#include "includeCheck.h" +#include "svga3d_types.h" -#include <linux/kernel.h> -#include <drm/vmwgfx_drm.h> +#ifdef __cplusplus +extern "C" { +#endif -#include "svga3d_reg.h" +struct SVGAUseCaps; -#define surf_size_struct struct drm_vmw_size +#if defined(_WIN32) && !defined(__GNUC__) -/* - * enum svga3d_block_desc - describes generic properties about formats. - */ -enum svga3d_block_desc { - /* Nothing special can be said about this format. */ - SVGA3DBLOCKDESC_NONE = 0, +#define STATIC_CONST __declspec(selectany) extern const +#else +#define STATIC_CONST static const +#endif + +typedef enum SVGA3dBlockDesc { - /* Format contains Blue/U data */ - SVGA3DBLOCKDESC_BLUE = 1 << 0, - SVGA3DBLOCKDESC_W = 1 << 0, - SVGA3DBLOCKDESC_BUMP_L = 1 << 0, + SVGA3DBLOCKDESC_NONE = 0, - /* Format contains Green/V data */ - SVGA3DBLOCKDESC_GREEN = 1 << 1, - SVGA3DBLOCKDESC_V = 1 << 1, + SVGA3DBLOCKDESC_BLUE = 1 << 0, + SVGA3DBLOCKDESC_W = 1 << 0, + SVGA3DBLOCKDESC_BUMP_L = 1 << 0, - /* Format contains Red/W/Luminance data */ - SVGA3DBLOCKDESC_RED = 1 << 2, - SVGA3DBLOCKDESC_U = 1 << 2, - SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, + SVGA3DBLOCKDESC_GREEN = 1 << 1, + SVGA3DBLOCKDESC_V = 1 << 1, - /* Format contains Alpha/Q data */ - SVGA3DBLOCKDESC_ALPHA = 1 << 3, - SVGA3DBLOCKDESC_Q = 1 << 3, + SVGA3DBLOCKDESC_RED = 1 << 2, + SVGA3DBLOCKDESC_U = 1 << 2, + SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, - /* Format is a buffer */ - SVGA3DBLOCKDESC_BUFFER = 1 << 4, + SVGA3DBLOCKDESC_ALPHA = 1 << 3, + SVGA3DBLOCKDESC_Q = 1 << 3, - /* Format is compressed */ - SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, + SVGA3DBLOCKDESC_BUFFER = 1 << 4, - /* Format uses IEEE floating point */ - SVGA3DBLOCKDESC_FP = 1 << 6, + SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, - /* Three separate blocks store data. */ - SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 7, + SVGA3DBLOCKDESC_FP = 1 << 6, + + SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 7, - /* 2 planes of Y, UV, e.g., NV12. */ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 8, - /* 3 planes of separate Y, U, V, e.g., YV12. */ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 9, - /* Block with a stencil channel */ - SVGA3DBLOCKDESC_STENCIL = 1 << 11, - - /* Typeless format */ - SVGA3DBLOCKDESC_TYPELESS = 1 << 12, - - /* Channels are signed integers */ - SVGA3DBLOCKDESC_SINT = 1 << 13, - - /* Channels are unsigned integers */ - SVGA3DBLOCKDESC_UINT = 1 << 14, - - /* Channels are normalized (when sampling) */ - SVGA3DBLOCKDESC_NORM = 1 << 15, - - /* Channels are in SRGB */ - SVGA3DBLOCKDESC_SRGB = 1 << 16, - - /* Shared exponent */ - SVGA3DBLOCKDESC_EXP = 1 << 17, - - /* Format contains color data. */ - SVGA3DBLOCKDESC_COLOR = 1 << 18, - /* Format contains depth data. */ - SVGA3DBLOCKDESC_DEPTH = 1 << 19, - /* Format contains bump data. */ - SVGA3DBLOCKDESC_BUMP = 1 << 20, - - /* Format contains YUV video data. */ - SVGA3DBLOCKDESC_YUV_VIDEO = 1 << 21, - - /* For mixed unsigned/signed formats. */ - SVGA3DBLOCKDESC_MIXED = 1 << 22, - - /* For distingushing CxV8U8. */ - SVGA3DBLOCKDESC_CX = 1 << 23, - - /* Different compressed format groups. */ - SVGA3DBLOCKDESC_BC1 = 1 << 24, - SVGA3DBLOCKDESC_BC2 = 1 << 25, - SVGA3DBLOCKDESC_BC3 = 1 << 26, - SVGA3DBLOCKDESC_BC4 = 1 << 27, - SVGA3DBLOCKDESC_BC5 = 1 << 28, - SVGA3DBLOCKDESC_BC6H = 1 << 29, - SVGA3DBLOCKDESC_BC7 = 1 << 30, - - SVGA3DBLOCKDESC_A_UINT = SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_UINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_A_UNORM = SVGA3DBLOCKDESC_A_UINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_R_UINT = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_UINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_R_UNORM = SVGA3DBLOCKDESC_R_UINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_R_SINT = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_SINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_R_SNORM = SVGA3DBLOCKDESC_R_SINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_G_UINT = SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_UINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RG_UINT = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_UINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RG_UNORM = SVGA3DBLOCKDESC_RG_UINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_RG_SINT = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_SINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RG_SNORM = SVGA3DBLOCKDESC_RG_SINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_RGB_UINT = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_BLUE | - SVGA3DBLOCKDESC_UINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RGB_SINT = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_BLUE | - SVGA3DBLOCKDESC_SINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RGB_UNORM = SVGA3DBLOCKDESC_RGB_UINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_RGB_UNORM_SRGB = SVGA3DBLOCKDESC_RGB_UNORM | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_RGBA_UINT = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_BLUE | - SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_UINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RGBA_UNORM = SVGA3DBLOCKDESC_RGBA_UINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_RGBA_UNORM_SRGB = SVGA3DBLOCKDESC_RGBA_UNORM | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_RGBA_SINT = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_BLUE | - SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_SINT | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RGBA_SNORM = SVGA3DBLOCKDESC_RGBA_SINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_BLUE | - SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_FP | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V | - SVGA3DBLOCKDESC_BUMP, - SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_BUMP_L | - SVGA3DBLOCKDESC_MIXED | - SVGA3DBLOCKDESC_BUMP, - SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_W | - SVGA3DBLOCKDESC_BUMP, - SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW | - SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_MIXED | - SVGA3DBLOCKDESC_BUMP, - SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V | - SVGA3DBLOCKDESC_W | - SVGA3DBLOCKDESC_Q | - SVGA3DBLOCKDESC_BUMP, - SVGA3DBLOCKDESC_L_UNORM = SVGA3DBLOCKDESC_LUMINANCE | - SVGA3DBLOCKDESC_UINT | - SVGA3DBLOCKDESC_NORM | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_LA_UNORM = SVGA3DBLOCKDESC_LUMINANCE | - SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_UINT | - SVGA3DBLOCKDESC_NORM | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_FP | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP | - SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP | - SVGA3DBLOCKDESC_BLUE | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_YUV_VIDEO | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_YUV_VIDEO | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_RGB_EXP = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN | - SVGA3DBLOCKDESC_BLUE | - SVGA3DBLOCKDESC_EXP | - SVGA3DBLOCKDESC_COLOR, - - SVGA3DBLOCKDESC_COMP_TYPELESS = SVGA3DBLOCKDESC_COMPRESSED | - SVGA3DBLOCKDESC_TYPELESS, - SVGA3DBLOCKDESC_COMP_UNORM = SVGA3DBLOCKDESC_COMPRESSED | - SVGA3DBLOCKDESC_UINT | - SVGA3DBLOCKDESC_NORM | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_COMP_SNORM = SVGA3DBLOCKDESC_COMPRESSED | - SVGA3DBLOCKDESC_SINT | - SVGA3DBLOCKDESC_NORM | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_COMP_UNORM | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_BC1_COMP_TYPELESS = SVGA3DBLOCKDESC_BC1 | - SVGA3DBLOCKDESC_COMP_TYPELESS, - SVGA3DBLOCKDESC_BC1_COMP_UNORM = SVGA3DBLOCKDESC_BC1 | - SVGA3DBLOCKDESC_COMP_UNORM, - SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC1_COMP_UNORM | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_BC2_COMP_TYPELESS = SVGA3DBLOCKDESC_BC2 | - SVGA3DBLOCKDESC_COMP_TYPELESS, - SVGA3DBLOCKDESC_BC2_COMP_UNORM = SVGA3DBLOCKDESC_BC2 | - SVGA3DBLOCKDESC_COMP_UNORM, - SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC2_COMP_UNORM | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_BC3_COMP_TYPELESS = SVGA3DBLOCKDESC_BC3 | - SVGA3DBLOCKDESC_COMP_TYPELESS, - SVGA3DBLOCKDESC_BC3_COMP_UNORM = SVGA3DBLOCKDESC_BC3 | - SVGA3DBLOCKDESC_COMP_UNORM, - SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC3_COMP_UNORM | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_BC4_COMP_TYPELESS = SVGA3DBLOCKDESC_BC4 | - SVGA3DBLOCKDESC_COMP_TYPELESS, - SVGA3DBLOCKDESC_BC4_COMP_UNORM = SVGA3DBLOCKDESC_BC4 | - SVGA3DBLOCKDESC_COMP_UNORM, - SVGA3DBLOCKDESC_BC4_COMP_SNORM = SVGA3DBLOCKDESC_BC4 | - SVGA3DBLOCKDESC_COMP_SNORM, - SVGA3DBLOCKDESC_BC5_COMP_TYPELESS = SVGA3DBLOCKDESC_BC5 | - SVGA3DBLOCKDESC_COMP_TYPELESS, - SVGA3DBLOCKDESC_BC5_COMP_UNORM = SVGA3DBLOCKDESC_BC5 | - SVGA3DBLOCKDESC_COMP_UNORM, - SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 | - SVGA3DBLOCKDESC_COMP_SNORM, - SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS = SVGA3DBLOCKDESC_BC6H | - SVGA3DBLOCKDESC_COMP_TYPELESS, - SVGA3DBLOCKDESC_BC6H_COMP_UF16 = SVGA3DBLOCKDESC_BC6H | - SVGA3DBLOCKDESC_COMPRESSED, - SVGA3DBLOCKDESC_BC6H_COMP_SF16 = SVGA3DBLOCKDESC_BC6H | - SVGA3DBLOCKDESC_COMPRESSED, - SVGA3DBLOCKDESC_BC7_COMP_TYPELESS = SVGA3DBLOCKDESC_BC7 | - SVGA3DBLOCKDESC_COMP_TYPELESS, - SVGA3DBLOCKDESC_BC7_COMP_UNORM = SVGA3DBLOCKDESC_BC7 | - SVGA3DBLOCKDESC_COMP_UNORM, - SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC7_COMP_UNORM | - SVGA3DBLOCKDESC_SRGB, - - SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_YUV_VIDEO | - SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_2PLANAR_YUV | - SVGA3DBLOCKDESC_COLOR, - SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_YUV_VIDEO | - SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_3PLANAR_YUV | - SVGA3DBLOCKDESC_COLOR, - - SVGA3DBLOCKDESC_DEPTH_UINT = SVGA3DBLOCKDESC_DEPTH | - SVGA3DBLOCKDESC_UINT, - SVGA3DBLOCKDESC_DEPTH_UNORM = SVGA3DBLOCKDESC_DEPTH_UINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH | - SVGA3DBLOCKDESC_STENCIL, - SVGA3DBLOCKDESC_DS_UINT = SVGA3DBLOCKDESC_DEPTH | - SVGA3DBLOCKDESC_STENCIL | - SVGA3DBLOCKDESC_UINT, - SVGA3DBLOCKDESC_DS_UNORM = SVGA3DBLOCKDESC_DS_UINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_DEPTH_FP = SVGA3DBLOCKDESC_DEPTH | - SVGA3DBLOCKDESC_FP, - - SVGA3DBLOCKDESC_UV_UINT = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_UINT, - SVGA3DBLOCKDESC_UV_SNORM = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_SINT | - SVGA3DBLOCKDESC_NORM, - SVGA3DBLOCKDESC_UVCX_SNORM = SVGA3DBLOCKDESC_UV_SNORM | - SVGA3DBLOCKDESC_CX, + SVGA3DBLOCKDESC_STENCIL = 1 << 11, + + SVGA3DBLOCKDESC_TYPELESS = 1 << 12, + + SVGA3DBLOCKDESC_SINT = 1 << 13, + + SVGA3DBLOCKDESC_UINT = 1 << 14, + + SVGA3DBLOCKDESC_NORM = 1 << 15, + + SVGA3DBLOCKDESC_SRGB = 1 << 16, + + SVGA3DBLOCKDESC_EXP = 1 << 17, + + SVGA3DBLOCKDESC_COLOR = 1 << 18, + + SVGA3DBLOCKDESC_DEPTH = 1 << 19, + + SVGA3DBLOCKDESC_BUMP = 1 << 20, + + SVGA3DBLOCKDESC_YUV_VIDEO = 1 << 21, + + SVGA3DBLOCKDESC_MIXED = 1 << 22, + + SVGA3DBLOCKDESC_CX = 1 << 23, + + SVGA3DBLOCKDESC_BC1 = 1 << 24, + SVGA3DBLOCKDESC_BC2 = 1 << 25, + SVGA3DBLOCKDESC_BC3 = 1 << 26, + SVGA3DBLOCKDESC_BC4 = 1 << 27, + SVGA3DBLOCKDESC_BC5 = 1 << 28, + SVGA3DBLOCKDESC_BC6H = 1 << 29, + SVGA3DBLOCKDESC_BC7 = 1 << 30, + SVGA3DBLOCKDESC_COMPRESSED_MASK = + SVGA3DBLOCKDESC_BC1 | SVGA3DBLOCKDESC_BC2 | + SVGA3DBLOCKDESC_BC3 | SVGA3DBLOCKDESC_BC4 | + SVGA3DBLOCKDESC_BC5 | SVGA3DBLOCKDESC_BC6H | + SVGA3DBLOCKDESC_BC7, + + SVGA3DBLOCKDESC_A_UINT = SVGA3DBLOCKDESC_ALPHA | SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_A_UNORM = SVGA3DBLOCKDESC_A_UINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_R_UINT = SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_R_UNORM = SVGA3DBLOCKDESC_R_UINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_R_SINT = SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_R_SNORM = SVGA3DBLOCKDESC_R_SINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_G_UINT = SVGA3DBLOCKDESC_GREEN | SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RG_UINT = SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_UINT | SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RG_UNORM = + SVGA3DBLOCKDESC_RG_UINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RG_SINT = SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_SINT | SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RG_SNORM = + SVGA3DBLOCKDESC_RG_SINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RGB_UINT = SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGB_SINT = SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGB_UNORM = + SVGA3DBLOCKDESC_RGB_UINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RGB_UNORM_SRGB = + SVGA3DBLOCKDESC_RGB_UNORM | SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_RGBA_UINT = + SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_UINT | SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGBA_UNORM = + SVGA3DBLOCKDESC_RGBA_UINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RGBA_UNORM_SRGB = + SVGA3DBLOCKDESC_RGBA_UNORM | SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_RGBA_SINT = + SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_SINT | SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGBA_SNORM = + SVGA3DBLOCKDESC_RGBA_SINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_FP | SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_UV = + SVGA3DBLOCKDESC_U | SVGA3DBLOCKDESC_V | SVGA3DBLOCKDESC_BUMP, + SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV | SVGA3DBLOCKDESC_BUMP_L | + SVGA3DBLOCKDESC_MIXED | SVGA3DBLOCKDESC_BUMP, + SVGA3DBLOCKDESC_UVW = + SVGA3DBLOCKDESC_UV | SVGA3DBLOCKDESC_W | SVGA3DBLOCKDESC_BUMP, + SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW | SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_MIXED | SVGA3DBLOCKDESC_BUMP, + SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U | SVGA3DBLOCKDESC_V | + SVGA3DBLOCKDESC_W | SVGA3DBLOCKDESC_Q | + SVGA3DBLOCKDESC_BUMP, + SVGA3DBLOCKDESC_L_UNORM = SVGA3DBLOCKDESC_LUMINANCE | + SVGA3DBLOCKDESC_UINT | SVGA3DBLOCKDESC_NORM | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_LA_UNORM = SVGA3DBLOCKDESC_LUMINANCE | + SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_UINT | SVGA3DBLOCKDESC_NORM | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_FP | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP | SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP | SVGA3DBLOCKDESC_BLUE | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_YUV_VIDEO | SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA | + SVGA3DBLOCKDESC_YUV_VIDEO | + SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_RGB_EXP = SVGA3DBLOCKDESC_RED | SVGA3DBLOCKDESC_GREEN | + SVGA3DBLOCKDESC_BLUE | SVGA3DBLOCKDESC_EXP | + SVGA3DBLOCKDESC_COLOR, + + SVGA3DBLOCKDESC_COMP_TYPELESS = + SVGA3DBLOCKDESC_COMPRESSED | SVGA3DBLOCKDESC_TYPELESS, + SVGA3DBLOCKDESC_COMP_UNORM = + SVGA3DBLOCKDESC_COMPRESSED | SVGA3DBLOCKDESC_UINT | + SVGA3DBLOCKDESC_NORM | SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_COMP_SNORM = + SVGA3DBLOCKDESC_COMPRESSED | SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_NORM | SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_COMP_UNORM_SRGB = + SVGA3DBLOCKDESC_COMP_UNORM | SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_BC1_COMP_TYPELESS = + SVGA3DBLOCKDESC_BC1 | SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC1_COMP_UNORM = + SVGA3DBLOCKDESC_BC1 | SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB = + SVGA3DBLOCKDESC_BC1_COMP_UNORM | SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_BC2_COMP_TYPELESS = + SVGA3DBLOCKDESC_BC2 | SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC2_COMP_UNORM = + SVGA3DBLOCKDESC_BC2 | SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB = + SVGA3DBLOCKDESC_BC2_COMP_UNORM | SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_BC3_COMP_TYPELESS = + SVGA3DBLOCKDESC_BC3 | SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC3_COMP_UNORM = + SVGA3DBLOCKDESC_BC3 | SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB = + SVGA3DBLOCKDESC_BC3_COMP_UNORM | SVGA3DBLOCKDESC_SRGB, + SVGA3DBLOCKDESC_BC4_COMP_TYPELESS = + SVGA3DBLOCKDESC_BC4 | SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC4_COMP_UNORM = + SVGA3DBLOCKDESC_BC4 | SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC4_COMP_SNORM = + SVGA3DBLOCKDESC_BC4 | SVGA3DBLOCKDESC_COMP_SNORM, + SVGA3DBLOCKDESC_BC5_COMP_TYPELESS = + SVGA3DBLOCKDESC_BC5 | SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC5_COMP_UNORM = + SVGA3DBLOCKDESC_BC5 | SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC5_COMP_SNORM = + SVGA3DBLOCKDESC_BC5 | SVGA3DBLOCKDESC_COMP_SNORM, + SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS = + SVGA3DBLOCKDESC_BC6H | SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC6H_COMP_UF16 = + SVGA3DBLOCKDESC_BC6H | SVGA3DBLOCKDESC_COMPRESSED, + SVGA3DBLOCKDESC_BC6H_COMP_SF16 = + SVGA3DBLOCKDESC_BC6H | SVGA3DBLOCKDESC_COMPRESSED, + SVGA3DBLOCKDESC_BC7_COMP_TYPELESS = + SVGA3DBLOCKDESC_BC7 | SVGA3DBLOCKDESC_COMP_TYPELESS, + SVGA3DBLOCKDESC_BC7_COMP_UNORM = + SVGA3DBLOCKDESC_BC7 | SVGA3DBLOCKDESC_COMP_UNORM, + SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB = + SVGA3DBLOCKDESC_BC7_COMP_UNORM | SVGA3DBLOCKDESC_SRGB, + + SVGA3DBLOCKDESC_NV12 = + SVGA3DBLOCKDESC_YUV_VIDEO | SVGA3DBLOCKDESC_PLANAR_YUV | + SVGA3DBLOCKDESC_2PLANAR_YUV | SVGA3DBLOCKDESC_COLOR, + SVGA3DBLOCKDESC_YV12 = + SVGA3DBLOCKDESC_YUV_VIDEO | SVGA3DBLOCKDESC_PLANAR_YUV | + SVGA3DBLOCKDESC_3PLANAR_YUV | SVGA3DBLOCKDESC_COLOR, + + SVGA3DBLOCKDESC_DEPTH_UINT = + SVGA3DBLOCKDESC_DEPTH | SVGA3DBLOCKDESC_UINT, + SVGA3DBLOCKDESC_DEPTH_UNORM = + SVGA3DBLOCKDESC_DEPTH_UINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH | SVGA3DBLOCKDESC_STENCIL, + SVGA3DBLOCKDESC_DS_UINT = SVGA3DBLOCKDESC_DEPTH | + SVGA3DBLOCKDESC_STENCIL | + SVGA3DBLOCKDESC_UINT, + SVGA3DBLOCKDESC_DS_UNORM = + SVGA3DBLOCKDESC_DS_UINT | SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_DEPTH_FP = SVGA3DBLOCKDESC_DEPTH | SVGA3DBLOCKDESC_FP, + + SVGA3DBLOCKDESC_UV_UINT = SVGA3DBLOCKDESC_UV | SVGA3DBLOCKDESC_UINT, + SVGA3DBLOCKDESC_UV_SNORM = SVGA3DBLOCKDESC_UV | SVGA3DBLOCKDESC_SINT | + SVGA3DBLOCKDESC_NORM, + SVGA3DBLOCKDESC_UVCX_SNORM = + SVGA3DBLOCKDESC_UV_SNORM | SVGA3DBLOCKDESC_CX, SVGA3DBLOCKDESC_UVWQ_SNORM = SVGA3DBLOCKDESC_UVWQ | SVGA3DBLOCKDESC_SINT | SVGA3DBLOCKDESC_NORM, -}; +} SVGA3dBlockDesc; -struct svga3d_channel_def { +typedef struct SVGA3dChannelDef { union { - u8 blue; - u8 w_bump; - u8 l_bump; - u8 uv_video; - u8 u_video; + uint8 blue; + uint8 w_bump; + uint8 l_bump; + uint8 uv_video; + uint8 u_video; }; union { - u8 green; - u8 stencil; - u8 v_bump; - u8 v_video; + uint8 green; + uint8 stencil; + uint8 v_bump; + uint8 v_video; }; union { - u8 red; - u8 u_bump; - u8 luminance; - u8 y_video; - u8 depth; - u8 data; + uint8 red; + uint8 u_bump; + uint8 luminance; + uint8 y_video; + uint8 depth; + uint8 data; }; union { - u8 alpha; - u8 q_bump; - u8 exp; + uint8 alpha; + uint8 q_bump; + uint8 exp; }; -}; +} SVGA3dChannelDef; -/* - * struct svga3d_surface_desc - describes the actual pixel data. - * - * @format: Format - * @block_desc: Block description - * @block_size: Dimensions in pixels of a block - * @bytes_per_block: Size of block in bytes - * @pitch_bytes_per_block: Size of a block in bytes for purposes of pitch - * @bit_depth: Channel bit depths - * @bit_offset: Channel bit masks (in bits offset from the start of the pointer) - */ -struct svga3d_surface_desc { +typedef struct SVGA3dSurfaceDesc { SVGA3dSurfaceFormat format; - enum svga3d_block_desc block_desc; - - surf_size_struct block_size; - u32 bytes_per_block; - u32 pitch_bytes_per_block; - - struct svga3d_channel_def bit_depth; - struct svga3d_channel_def bit_offset; -}; - -static const struct svga3d_surface_desc svga3d_surface_descs[] = { - {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE, - {1, 1, 1}, 0, 0, - {{0}, {0}, {0}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB_UNORM, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {0}}, - {{0}, {8}, {16}, {24}}}, - - {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{0}, {8}, {16}, {24}}}, - - {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB_UNORM, - {1, 1, 1}, 2, 2, - {{5}, {6}, {5}, {0}}, - {{0}, {5}, {11}, {0}}}, - - {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB_UNORM, - {1, 1, 1}, 2, 2, - {{5}, {5}, {5}, {0}}, - {{0}, {5}, {10}, {0}}}, - - {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 2, 2, - {{5}, {5}, {5}, {1}}, - {{0}, {5}, {10}, {15}}}, - - {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 2, 2, - {{4}, {4}, {4}, {4}}, - {{0}, {4}, {8}, {12}}}, - - {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH_UNORM, - {1, 1, 1}, 4, 4, - {{0}, {0}, {32}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH_UNORM, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS_UNORM, - {1, 1, 1}, 4, 4, - {{0}, {8}, {24}, {0}}, - {{0}, {0}, {8}, {0}}}, - - {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS_UNORM, - {1, 1, 1}, 2, 2, - {{0}, {1}, {15}, {0}}, - {{0}, {0}, {1}, {0}}}, - - {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_L_UNORM, - {1, 1, 1}, 1, 1, - {{0}, {0}, {8}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA_UNORM, - {1, 1, 1}, 1, 1, - {{0}, {0}, {4}, {4}}, - {{0}, {0}, {0}, {4}}}, - - {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_L_UNORM, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA_UNORM, - {1, 1, 1}, 2, 2, - {{0}, {0}, {8}, {8}}, - {{0}, {0}, {0}, {8}}}, - - {SVGA3D_DXT1, SVGA3DBLOCKDESC_BC1_COMP_UNORM, - {4, 4, 1}, 8, 8, - {{0}, {0}, {64}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_DXT2, SVGA3DBLOCKDESC_BC2_COMP_UNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_DXT3, SVGA3DBLOCKDESC_BC2_COMP_UNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_DXT4, SVGA3DBLOCKDESC_BC3_COMP_UNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_DXT5, SVGA3DBLOCKDESC_BC3_COMP_UNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV_SNORM, - {1, 1, 1}, 2, 2, - {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, - - {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 2, 2, - {{6}, {5}, {5}, {0}}, - {{10}, {5}, {0}, {0}}}, - - {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {0}}, - {{16}, {8}, {0}, {0}}}, - - {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_NONE, - {1, 1, 1}, 3, 3, - {{8}, {8}, {8}, {0}}, - {{16}, {8}, {0}, {0}}}, - - {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 8, 8, - {{16}, {16}, {16}, {16}}, - {{32}, {16}, {0}, {48}}}, - - {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 16, 16, - {{32}, {32}, {32}, {32}}, - {{64}, {32}, {0}, {96}}}, - - {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 4, 4, - {{10}, {10}, {10}, {2}}, - {{0}, {10}, {20}, {30}}}, - - {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV_SNORM, - {1, 1, 1}, 2, 2, - {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, - - {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ_SNORM, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{16}, {8}, {0}, {24}}}, - - {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UVCX_SNORM, - {1, 1, 1}, 2, 2, - {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, - - {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {0}}, - {{16}, {8}, {0}, {0}}}, - - {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA, - {1, 1, 1}, 4, 4, - {{10}, {10}, {10}, {2}}, - {{20}, {10}, {0}, {30}}}, - - {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_A_UNORM, - {1, 1, 1}, 1, 1, - {{0}, {0}, {0}, {8}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 4, 4, - {{0}, {0}, {32}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 4, 4, - {{0}, {16}, {16}, {0}}, - {{0}, {16}, {0}, {0}}}, - - {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 8, 8, - {{0}, {32}, {32}, {0}}, - {{0}, {32}, {0}, {0}}}, - - {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER, - {1, 1, 1}, 1, 1, - {{0}, {0}, {8}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH_UNORM, - {1, 1, 1}, 4, 4, - {{0}, {0}, {24}, {0}}, - {{0}, {0}, {8}, {0}}}, - - {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV_SNORM, - {1, 1, 1}, 4, 4, - {{0}, {16}, {16}, {0}}, - {{0}, {16}, {0}, {0}}}, - - {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG_UNORM, - {1, 1, 1}, 4, 4, - {{0}, {16}, {16}, {0}}, - {{0}, {16}, {0}, {0}}}, - - {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 8, 8, - {{16}, {16}, {16}, {16}}, - {{32}, {16}, {0}, {48}}}, - - {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV, - {2, 1, 1}, 4, 4, - {{8}, {0}, {8}, {0}}, - {{0}, {0}, {8}, {0}}}, - - {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV, - {2, 1, 1}, 4, 4, - {{8}, {0}, {8}, {0}}, - {{8}, {0}, {0}, {0}}}, - - {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12, - {2, 2, 1}, 6, 2, - {{0}, {0}, {48}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_FORMAT_DEAD2, SVGA3DBLOCKDESC_NONE, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{0}, {8}, {16}, {24}}}, - - {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 16, 16, - {{32}, {32}, {32}, {32}}, - {{64}, {32}, {0}, {96}}}, - - {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA_UINT, - {1, 1, 1}, 16, 16, - {{32}, {32}, {32}, {32}}, - {{64}, {32}, {0}, {96}}}, - - {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_RGBA_SINT, - {1, 1, 1}, 16, 16, - {{32}, {32}, {32}, {32}}, - {{64}, {32}, {0}, {96}}}, - - {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 12, 12, - {{32}, {32}, {32}, {0}}, - {{64}, {32}, {0}, {0}}}, - - {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP, - {1, 1, 1}, 12, 12, - {{32}, {32}, {32}, {0}}, - {{64}, {32}, {0}, {0}}}, - - {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB_UINT, - {1, 1, 1}, 12, 12, - {{32}, {32}, {32}, {0}}, - {{64}, {32}, {0}, {0}}}, - - {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_RGB_SINT, - {1, 1, 1}, 12, 12, - {{32}, {32}, {32}, {0}}, - {{64}, {32}, {0}, {0}}}, - - {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 8, 8, - {{16}, {16}, {16}, {16}}, - {{32}, {16}, {0}, {48}}}, - - {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA_UINT, - {1, 1, 1}, 8, 8, - {{16}, {16}, {16}, {16}}, - {{32}, {16}, {0}, {48}}}, - - {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM, - {1, 1, 1}, 8, 8, - {{16}, {16}, {16}, {16}}, - {{32}, {16}, {0}, {48}}}, - - {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_RGBA_SINT, - {1, 1, 1}, 8, 8, - {{16}, {16}, {16}, {16}}, - {{32}, {16}, {0}, {48}}}, - - {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 8, 8, - {{0}, {32}, {32}, {0}}, - {{0}, {32}, {0}, {0}}}, - - {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG_UINT, - {1, 1, 1}, 8, 8, - {{0}, {32}, {32}, {0}}, - {{0}, {32}, {0}, {0}}}, - - {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_RG_SINT, - {1, 1, 1}, 8, 8, - {{0}, {32}, {32}, {0}}, - {{0}, {32}, {0}, {0}}}, - - {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 8, 8, - {{0}, {8}, {32}, {0}}, - {{0}, {32}, {0}, {0}}}, - - {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 8, 8, - {{0}, {8}, {32}, {0}}, - {{0}, {32}, {0}, {0}}}, - - {SVGA3D_R32_FLOAT_X8X24, SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 8, 8, - {{0}, {0}, {32}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_X32_G8X24_UINT, SVGA3DBLOCKDESC_G_UINT, - {1, 1, 1}, 8, 8, - {{0}, {8}, {0}, {0}}, - {{0}, {32}, {0}, {0}}}, - - {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 4, 4, - {{10}, {10}, {10}, {2}}, - {{20}, {10}, {0}, {30}}}, - - {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA_UINT, - {1, 1, 1}, 4, 4, - {{10}, {10}, {10}, {2}}, - {{20}, {10}, {0}, {30}}}, - - {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP, - {1, 1, 1}, 4, 4, - {{10}, {11}, {11}, {0}}, - {{22}, {11}, {0}, {0}}}, - - {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{16}, {8}, {0}, {24}}}, - - {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{16}, {8}, {0}, {24}}}, - - {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{16}, {8}, {0}, {24}}}, - - {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA_UINT, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{16}, {8}, {0}, {24}}}, - - {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA_SINT, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{16}, {8}, {0}, {24}}}, - - {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 4, 4, - {{0}, {16}, {16}, {0}}, - {{0}, {16}, {0}, {0}}}, - - {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_UINT, - {1, 1, 1}, 4, 4, - {{0}, {16}, {16}, {0}}, - {{0}, {16}, {0}, {0}}}, - - {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_RG_SINT, - {1, 1, 1}, 4, 4, - {{0}, {16}, {16}, {0}}, - {{0}, {16}, {0}, {0}}}, - - {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 4, 4, - {{0}, {0}, {32}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH_FP, - {1, 1, 1}, 4, 4, - {{0}, {0}, {32}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_R_UINT, - {1, 1, 1}, 4, 4, - {{0}, {0}, {32}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_R_SINT, - {1, 1, 1}, 4, 4, - {{0}, {0}, {32}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 4, 4, - {{0}, {8}, {24}, {0}}, - {{0}, {24}, {0}, {0}}}, - - {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS_UNORM, - {1, 1, 1}, 4, 4, - {{0}, {8}, {24}, {0}}, - {{0}, {24}, {0}, {0}}}, - - {SVGA3D_R24_UNORM_X8, SVGA3DBLOCKDESC_R_UNORM, - {1, 1, 1}, 4, 4, - {{0}, {0}, {24}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_X24_G8_UINT, SVGA3DBLOCKDESC_G_UINT, - {1, 1, 1}, 4, 4, - {{0}, {8}, {0}, {0}}, - {{0}, {24}, {0}, {0}}}, - - {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 2, 2, - {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, - - {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG_UNORM, - {1, 1, 1}, 2, 2, - {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, - - {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG_UINT, - {1, 1, 1}, 2, 2, - {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, - - {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_RG_SINT, - {1, 1, 1}, 2, 2, - {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, - - {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_R_UNORM, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_R_UINT, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_R_SNORM, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_R_SINT, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 1, 1, - {{0}, {0}, {8}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_R_UNORM, - {1, 1, 1}, 1, 1, - {{0}, {0}, {8}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_R_UINT, - {1, 1, 1}, 1, 1, - {{0}, {0}, {8}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_R_SNORM, - {1, 1, 1}, 1, 1, - {{0}, {0}, {8}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_R_SINT, - {1, 1, 1}, 1, 1, - {{0}, {0}, {8}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_P8, SVGA3DBLOCKDESC_NONE, - {1, 1, 1}, 1, 1, - {{0}, {0}, {8}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGB_EXP, - {1, 1, 1}, 4, 4, - {{9}, {9}, {9}, {5}}, - {{18}, {9}, {0}, {27}}}, - - {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_NONE, - {2, 1, 1}, 4, 4, - {{0}, {8}, {8}, {0}}, - {{0}, {0}, {8}, {0}}}, - - {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_NONE, - {2, 1, 1}, 4, 4, - {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, - - {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_BC1_COMP_TYPELESS, - {4, 4, 1}, 8, 8, - {{0}, {0}, {64}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB, - {4, 4, 1}, 8, 8, - {{0}, {0}, {64}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_BC2_COMP_TYPELESS, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_BC3_COMP_TYPELESS, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_BC4_COMP_TYPELESS, - {4, 4, 1}, 8, 8, - {{0}, {0}, {64}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_ATI1, SVGA3DBLOCKDESC_BC4_COMP_UNORM, - {4, 4, 1}, 8, 8, - {{0}, {0}, {64}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_BC4_COMP_SNORM, - {4, 4, 1}, 8, 8, - {{0}, {0}, {64}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_BC5_COMP_TYPELESS, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_ATI2, SVGA3DBLOCKDESC_BC5_COMP_UNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_BC5_COMP_SNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 4, 4, - {{10}, {10}, {10}, {2}}, - {{20}, {10}, {0}, {30}}}, - - {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{0}, {8}, {16}, {24}}}, - - {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{0}, {8}, {16}, {24}}}, - - {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {0}}, - {{0}, {8}, {16}, {24}}}, - - {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_UNORM_SRGB, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {0}}, - {{0}, {8}, {16}, {24}}}, - - {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH_UNORM, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH_UNORM, - {1, 1, 1}, 4, 4, - {{0}, {0}, {24}, {0}}, - {{0}, {0}, {8}, {0}}}, - - {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS_UNORM, - {1, 1, 1}, 4, 4, - {{0}, {8}, {24}, {0}}, - {{0}, {0}, {8}, {0}}}, - - {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12, - {2, 2, 1}, 6, 2, - {{0}, {0}, {48}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 16, 16, - {{32}, {32}, {32}, {32}}, - {{64}, {32}, {0}, {96}}}, - - {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 8, 8, - {{16}, {16}, {16}, {16}}, - {{32}, {16}, {0}, {48}}}, - - {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 8, 8, - {{16}, {16}, {16}, {16}}, - {{32}, {16}, {0}, {48}}}, - - {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 8, 8, - {{0}, {32}, {32}, {0}}, - {{0}, {32}, {0}, {0}}}, - - {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 4, 4, - {{10}, {10}, {10}, {2}}, - {{20}, {10}, {0}, {30}}}, - - {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{16}, {8}, {0}, {24}}}, - - {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 4, 4, - {{0}, {16}, {16}, {0}}, - {{0}, {16}, {0}, {0}}}, - - {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG_UNORM, - {1, 1, 1}, 4, 4, - {{0}, {16}, {16}, {0}}, - {{0}, {16}, {0}, {0}}}, - - {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG_SNORM, - {1, 1, 1}, 4, 4, - {{0}, {16}, {16}, {0}}, - {{0}, {16}, {0}, {0}}}, - - {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 4, 4, - {{0}, {0}, {32}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG_SNORM, - {1, 1, 1}, 2, 2, - {{0}, {8}, {8}, {0}}, - {{0}, {8}, {0}, {0}}}, - - {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH_UNORM, - {1, 1, 1}, 2, 2, - {{0}, {0}, {16}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_A_UNORM, - {1, 1, 1}, 1, 1, - {{0}, {0}, {0}, {8}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_BC1_COMP_UNORM, - {4, 4, 1}, 8, 8, - {{0}, {0}, {64}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_BC2_COMP_UNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_BC3_COMP_UNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB_UNORM, - {1, 1, 1}, 2, 2, - {{5}, {6}, {5}, {0}}, - {{0}, {5}, {11}, {0}}}, - - {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 2, 2, - {{5}, {5}, {5}, {1}}, - {{0}, {5}, {10}, {15}}}, - - {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{0}, {8}, {16}, {24}}}, - - {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB_UNORM, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {0}}, - {{0}, {8}, {16}, {24}}}, - - {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_BC4_COMP_UNORM, - {4, 4, 1}, 8, 8, - {{0}, {0}, {64}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_BC5_COMP_UNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_B4G4R4A4_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM, - {1, 1, 1}, 2, 2, - {{4}, {4}, {4}, {4}}, - {{0}, {4}, {8}, {12}}}, - - {SVGA3D_BC6H_TYPELESS, SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC6H_UF16, SVGA3DBLOCKDESC_BC6H_COMP_UF16, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC6H_SF16, SVGA3DBLOCKDESC_BC6H_COMP_SF16, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC7_TYPELESS, SVGA3DBLOCKDESC_BC7_COMP_TYPELESS, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC7_UNORM, SVGA3DBLOCKDESC_BC7_COMP_UNORM, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_BC7_UNORM_SRGB, SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB, - {4, 4, 1}, 16, 16, - {{0}, {0}, {128}, {0}}, - {{0}, {0}, {0}, {0}}}, - - {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV, - {1, 1, 1}, 4, 4, - {{8}, {8}, {8}, {8}}, - {{0}, {8}, {16}, {24}}}, -}; - -static inline u32 clamped_umul32(u32 a, u32 b) -{ - uint64_t tmp = (uint64_t) a*b; - return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; -} - -/** - * svga3dsurface_get_desc - Look up the appropriate SVGA3dSurfaceDesc for the - * given format. - */ -static inline const struct svga3d_surface_desc * -svga3dsurface_get_desc(SVGA3dSurfaceFormat format) -{ - if (format < ARRAY_SIZE(svga3d_surface_descs)) - return &svga3d_surface_descs[format]; - - return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID]; -} - -/** - * svga3dsurface_get_mip_size - Given a base level size and the mip level, - * compute the size of the mip level. - */ -static inline surf_size_struct -svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level) -{ - surf_size_struct size; - - size.width = max_t(u32, base_level.width >> mip_level, 1); - size.height = max_t(u32, base_level.height >> mip_level, 1); - size.depth = max_t(u32, base_level.depth >> mip_level, 1); - size.pad64 = 0; - - return size; -} - -static inline void -svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc, - const surf_size_struct *pixel_size, - surf_size_struct *block_size) -{ - block_size->width = __KERNEL_DIV_ROUND_UP(pixel_size->width, - desc->block_size.width); - block_size->height = __KERNEL_DIV_ROUND_UP(pixel_size->height, - desc->block_size.height); - block_size->depth = __KERNEL_DIV_ROUND_UP(pixel_size->depth, - desc->block_size.depth); -} - -static inline bool -svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc) -{ - return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0; -} - -static inline u32 -svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc, - const surf_size_struct *size) -{ - u32 pitch; - surf_size_struct blocks; - - svga3dsurface_get_size_in_blocks(desc, size, &blocks); - - pitch = blocks.width * desc->pitch_bytes_per_block; - - return pitch; -} - -/** - * svga3dsurface_get_image_buffer_size - Calculates image buffer size. - * - * Return the number of bytes of buffer space required to store one image of a - * surface, optionally using the specified pitch. - * - * If pitch is zero, it is assumed that rows are tightly packed. - * - * This function is overflow-safe. If the result would have overflowed, instead - * we return MAX_UINT32. - */ -static inline u32 -svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc, - const surf_size_struct *size, - u32 pitch) -{ - surf_size_struct image_blocks; - u32 slice_size, total_size; - - svga3dsurface_get_size_in_blocks(desc, size, &image_blocks); - - if (svga3dsurface_is_planar_surface(desc)) { - total_size = clamped_umul32(image_blocks.width, - image_blocks.height); - total_size = clamped_umul32(total_size, image_blocks.depth); - total_size = clamped_umul32(total_size, desc->bytes_per_block); - return total_size; - } - - if (pitch == 0) - pitch = svga3dsurface_calculate_pitch(desc, size); - - slice_size = clamped_umul32(image_blocks.height, pitch); - total_size = clamped_umul32(slice_size, image_blocks.depth); - - return total_size; -} - -/** - * svga3dsurface_get_serialized_size - Get the serialized size for the image. - */ -static inline u32 -svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, - surf_size_struct base_level_size, - u32 num_mip_levels, - u32 num_layers) -{ - const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - u32 total_size = 0; - u32 mip; - - for (mip = 0; mip < num_mip_levels; mip++) { - surf_size_struct size = - svga3dsurface_get_mip_size(base_level_size, mip); - total_size += svga3dsurface_get_image_buffer_size(desc, - &size, 0); - } - - return total_size * num_layers; -} - -/** - * svga3dsurface_get_serialized_size_extended - Returns the number of bytes - * required for a surface with given parameters. Support for sample count. - */ -static inline u32 -svga3dsurface_get_serialized_size_extended(SVGA3dSurfaceFormat format, - surf_size_struct base_level_size, - u32 num_mip_levels, - u32 num_layers, - u32 num_samples) -{ - uint64_t total_size = - svga3dsurface_get_serialized_size(format, - base_level_size, - num_mip_levels, - num_layers); - total_size *= max_t(u32, 1, num_samples); - - return min_t(uint64_t, total_size, (uint64_t)U32_MAX); -} - -/** - * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel - * in an image (or volume). - * - * @width: The image width in pixels. - * @height: The image height in pixels - */ -static inline u32 -svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format, - u32 width, u32 height, - u32 x, u32 y, u32 z) -{ - const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - const u32 bw = desc->block_size.width, bh = desc->block_size.height; - const u32 bd = desc->block_size.depth; - const u32 rowstride = __KERNEL_DIV_ROUND_UP(width, bw) * - desc->bytes_per_block; - const u32 imgstride = __KERNEL_DIV_ROUND_UP(height, bh) * rowstride; - const u32 offset = (z / bd * imgstride + - y / bh * rowstride + - x / bw * desc->bytes_per_block); - return offset; -} - -static inline u32 -svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format, - surf_size_struct baseLevelSize, - u32 numMipLevels, - u32 face, - u32 mip) - -{ - u32 offset; - u32 mipChainBytes; - u32 mipChainBytesToLevel; - u32 i; - const struct svga3d_surface_desc *desc; - surf_size_struct mipSize; - u32 bytes; - - desc = svga3dsurface_get_desc(format); - - mipChainBytes = 0; - mipChainBytesToLevel = 0; - for (i = 0; i < numMipLevels; i++) { - mipSize = svga3dsurface_get_mip_size(baseLevelSize, i); - bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0); - mipChainBytes += bytes; - if (i < mip) - mipChainBytesToLevel += bytes; - } - - offset = mipChainBytes * face + mipChainBytesToLevel; - - return offset; -} - - -/** - * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as - * a ScreenTarget? - * (with just the GBObjects cap-bit - * set) - * @format: format to queried - * - * RETURNS: - * true if queried format is valid for screen targets - */ -static inline bool -svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format) -{ - return (format == SVGA3D_X8R8G8B8 || - format == SVGA3D_A8R8G8B8 || - format == SVGA3D_R5G6B5 || - format == SVGA3D_X1R5G5B5 || - format == SVGA3D_A1R5G5B5 || - format == SVGA3D_P8); -} - - -/** - * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as - * a ScreenTarget? - * (with DX10 enabled) - * - * @format: format to queried - * - * Results: - * true if queried format is valid for screen targets - */ -static inline bool -svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format) -{ - return (format == SVGA3D_R8G8B8A8_UNORM || - format == SVGA3D_B8G8R8A8_UNORM || - format == SVGA3D_B8G8R8X8_UNORM); -} - - -/** - * svga3dsurface_is_screen_target_format - Is the specified format usable as a - * ScreenTarget? - * (for some combination of caps) - * - * @format: format to queried - * - * Results: - * true if queried format is valid for screen targets - */ -static inline bool -svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format) -{ - if (svga3dsurface_is_gb_screen_target_format(format)) { - return true; - } - return svga3dsurface_is_dx_screen_target_format(format); -} - -/** - * struct svga3dsurface_mip - Mimpmap level information - * @bytes: Bytes required in the backing store of this mipmap level. - * @img_stride: Byte stride per image. - * @row_stride: Byte stride per block row. - * @size: The size of the mipmap. - */ -struct svga3dsurface_mip { - size_t bytes; - size_t img_stride; - size_t row_stride; - struct drm_vmw_size size; - -}; - -/** - * struct svga3dsurface_cache - Cached surface information - * @desc: Pointer to the surface descriptor - * @mip: Array of mipmap level information. Valid size is @num_mip_levels. - * @mip_chain_bytes: Bytes required in the backing store for the whole chain - * of mip levels. - * @sheet_bytes: Bytes required in the backing store for a sheet - * representing a single sample. - * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in - * a chain. - * @num_layers: Number of slices in an array texture or number of faces in - * a cubemap texture. - */ -struct svga3dsurface_cache { - const struct svga3d_surface_desc *desc; - struct svga3dsurface_mip mip[DRM_VMW_MAX_MIP_LEVELS]; - size_t mip_chain_bytes; - size_t sheet_bytes; - u32 num_mip_levels; - u32 num_layers; -}; - -/** - * struct svga3dsurface_loc - Surface location - * @sheet: The multisample sheet. - * @sub_resource: Surface subresource. Defined as layer * num_mip_levels + - * mip_level. - * @x: X coordinate. - * @y: Y coordinate. - * @z: Z coordinate. - */ -struct svga3dsurface_loc { - u32 sheet; - u32 sub_resource; - u32 x, y, z; + SVGA3dBlockDesc blockDesc; + + SVGA3dSize blockSize; + uint32 bytesPerBlock; + uint32 pitchBytesPerBlock; + + SVGA3dChannelDef bitDepth; + SVGA3dChannelDef bitOffset; +} SVGA3dSurfaceDesc; + +STATIC_CONST SVGA3dSurfaceDesc g_SVGA3dSurfaceDescs[] = { + { SVGA3D_FORMAT_INVALID, + SVGA3DBLOCKDESC_NONE, + { 1, 1, 1 }, + 0, + 0, + { { 0 }, { 0 }, { 0 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_X8R8G8B8, + SVGA3DBLOCKDESC_RGB_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_A8R8G8B8, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_R5G6B5, + SVGA3DBLOCKDESC_RGB_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 5 }, { 6 }, { 5 }, { 0 } }, + { { 0 }, { 5 }, { 11 }, { 0 } } }, + + { SVGA3D_X1R5G5B5, + SVGA3DBLOCKDESC_RGB_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 5 }, { 5 }, { 5 }, { 0 } }, + { { 0 }, { 5 }, { 10 }, { 0 } } }, + + { SVGA3D_A1R5G5B5, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 5 }, { 5 }, { 5 }, { 1 } }, + { { 0 }, { 5 }, { 10 }, { 15 } } }, + + { SVGA3D_A4R4G4B4, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 4 }, { 4 }, { 4 }, { 4 } }, + { { 0 }, { 4 }, { 8 }, { 12 } } }, + + { SVGA3D_Z_D32, + SVGA3DBLOCKDESC_DEPTH_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 32 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_Z_D16, + SVGA3DBLOCKDESC_DEPTH_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_Z_D24S8, + SVGA3DBLOCKDESC_DS_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 8 }, { 24 }, { 0 } }, + { { 0 }, { 0 }, { 8 }, { 0 } } }, + + { SVGA3D_Z_D15S1, + SVGA3DBLOCKDESC_DS_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 1 }, { 15 }, { 0 } }, + { { 0 }, { 0 }, { 1 }, { 0 } } }, + + { SVGA3D_LUMINANCE8, + SVGA3DBLOCKDESC_L_UNORM, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_LUMINANCE4_ALPHA4, + SVGA3DBLOCKDESC_LA_UNORM, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 4 }, { 4 } }, + { { 0 }, { 0 }, { 0 }, { 4 } } }, + + { SVGA3D_LUMINANCE16, + SVGA3DBLOCKDESC_L_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_LUMINANCE8_ALPHA8, + SVGA3DBLOCKDESC_LA_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 8 }, { 8 } }, + { { 0 }, { 0 }, { 0 }, { 8 } } }, + + { SVGA3D_DXT1, + SVGA3DBLOCKDESC_BC1_COMP_UNORM, + { 4, 4, 1 }, + 8, + 8, + { { 0 }, { 0 }, { 64 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_DXT2, + SVGA3DBLOCKDESC_BC2_COMP_UNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_DXT3, + SVGA3DBLOCKDESC_BC2_COMP_UNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_DXT4, + SVGA3DBLOCKDESC_BC3_COMP_UNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_DXT5, + SVGA3DBLOCKDESC_BC3_COMP_UNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BUMPU8V8, + SVGA3DBLOCKDESC_UV_SNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_BUMPL6V5U5, + SVGA3DBLOCKDESC_UVL, + { 1, 1, 1 }, + 2, + 2, + { { 6 }, { 5 }, { 5 }, { 0 } }, + { { 10 }, { 5 }, { 0 }, { 0 } } }, + + { SVGA3D_BUMPX8L8V8U8, + SVGA3DBLOCKDESC_UVL, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 0 } }, + { { 16 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_FORMAT_DEAD1, + SVGA3DBLOCKDESC_NONE, + { 1, 1, 1 }, + 3, + 3, + { { 8 }, { 8 }, { 8 }, { 0 } }, + { { 16 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_ARGB_S10E5, + SVGA3DBLOCKDESC_RGBA_FP, + { 1, 1, 1 }, + 8, + 8, + { { 16 }, { 16 }, { 16 }, { 16 } }, + { { 32 }, { 16 }, { 0 }, { 48 } } }, + + { SVGA3D_ARGB_S23E8, + SVGA3DBLOCKDESC_RGBA_FP, + { 1, 1, 1 }, + 16, + 16, + { { 32 }, { 32 }, { 32 }, { 32 } }, + { { 64 }, { 32 }, { 0 }, { 96 } } }, + + { SVGA3D_A2R10G10B10, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 10 }, { 10 }, { 10 }, { 2 } }, + { { 0 }, { 10 }, { 20 }, { 30 } } }, + + { SVGA3D_V8U8, + SVGA3DBLOCKDESC_UV_SNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_Q8W8V8U8, + SVGA3DBLOCKDESC_UVWQ_SNORM, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 16 }, { 8 }, { 0 }, { 24 } } }, + + { SVGA3D_CxV8U8, + SVGA3DBLOCKDESC_UVCX_SNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_X8L8V8U8, + SVGA3DBLOCKDESC_UVL, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 0 } }, + { { 16 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_A2W10V10U10, + SVGA3DBLOCKDESC_UVWA, + { 1, 1, 1 }, + 4, + 4, + { { 10 }, { 10 }, { 10 }, { 2 } }, + { { 20 }, { 10 }, { 0 }, { 30 } } }, + + { SVGA3D_ALPHA8, + SVGA3DBLOCKDESC_A_UNORM, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 0 }, { 8 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R_S10E5, + SVGA3DBLOCKDESC_R_FP, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R_S23E8, + SVGA3DBLOCKDESC_R_FP, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 32 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_RG_S10E5, + SVGA3DBLOCKDESC_RG_FP, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 16 }, { 16 }, { 0 } }, + { { 0 }, { 16 }, { 0 }, { 0 } } }, + + { SVGA3D_RG_S23E8, + SVGA3DBLOCKDESC_RG_FP, + { 1, 1, 1 }, + 8, + 8, + { { 0 }, { 32 }, { 32 }, { 0 } }, + { { 0 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_BUFFER, + SVGA3DBLOCKDESC_BUFFER, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_Z_D24X8, + SVGA3DBLOCKDESC_DEPTH_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 24 }, { 0 } }, + { { 0 }, { 0 }, { 8 }, { 0 } } }, + + { SVGA3D_V16U16, + SVGA3DBLOCKDESC_UV_SNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 16 }, { 16 }, { 0 } }, + { { 0 }, { 16 }, { 0 }, { 0 } } }, + + { SVGA3D_G16R16, + SVGA3DBLOCKDESC_RG_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 16 }, { 16 }, { 0 } }, + { { 0 }, { 16 }, { 0 }, { 0 } } }, + + { SVGA3D_A16B16G16R16, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 8, + 8, + { { 16 }, { 16 }, { 16 }, { 16 } }, + { { 32 }, { 16 }, { 0 }, { 48 } } }, + + { SVGA3D_UYVY, + SVGA3DBLOCKDESC_YUV, + { 2, 1, 1 }, + 4, + 4, + { { 8 }, { 0 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 8 }, { 0 } } }, + + { SVGA3D_YUY2, + SVGA3DBLOCKDESC_YUV, + { 2, 1, 1 }, + 4, + 4, + { { 8 }, { 0 }, { 8 }, { 0 } }, + { { 8 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_NV12, + SVGA3DBLOCKDESC_NV12, + { 2, 2, 1 }, + 6, + 2, + { { 0 }, { 0 }, { 48 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_FORMAT_DEAD2, + SVGA3DBLOCKDESC_NONE, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_R32G32B32A32_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 16, + 16, + { { 32 }, { 32 }, { 32 }, { 32 } }, + { { 64 }, { 32 }, { 0 }, { 96 } } }, + + { SVGA3D_R32G32B32A32_UINT, + SVGA3DBLOCKDESC_RGBA_UINT, + { 1, 1, 1 }, + 16, + 16, + { { 32 }, { 32 }, { 32 }, { 32 } }, + { { 64 }, { 32 }, { 0 }, { 96 } } }, + + { SVGA3D_R32G32B32A32_SINT, + SVGA3DBLOCKDESC_RGBA_SINT, + { 1, 1, 1 }, + 16, + 16, + { { 32 }, { 32 }, { 32 }, { 32 } }, + { { 64 }, { 32 }, { 0 }, { 96 } } }, + + { SVGA3D_R32G32B32_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 12, + 12, + { { 32 }, { 32 }, { 32 }, { 0 } }, + { { 64 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R32G32B32_FLOAT, + SVGA3DBLOCKDESC_RGB_FP, + { 1, 1, 1 }, + 12, + 12, + { { 32 }, { 32 }, { 32 }, { 0 } }, + { { 64 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R32G32B32_UINT, + SVGA3DBLOCKDESC_RGB_UINT, + { 1, 1, 1 }, + 12, + 12, + { { 32 }, { 32 }, { 32 }, { 0 } }, + { { 64 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R32G32B32_SINT, + SVGA3DBLOCKDESC_RGB_SINT, + { 1, 1, 1 }, + 12, + 12, + { { 32 }, { 32 }, { 32 }, { 0 } }, + { { 64 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R16G16B16A16_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 8, + 8, + { { 16 }, { 16 }, { 16 }, { 16 } }, + { { 32 }, { 16 }, { 0 }, { 48 } } }, + + { SVGA3D_R16G16B16A16_UINT, + SVGA3DBLOCKDESC_RGBA_UINT, + { 1, 1, 1 }, + 8, + 8, + { { 16 }, { 16 }, { 16 }, { 16 } }, + { { 32 }, { 16 }, { 0 }, { 48 } } }, + + { SVGA3D_R16G16B16A16_SNORM, + SVGA3DBLOCKDESC_RGBA_SNORM, + { 1, 1, 1 }, + 8, + 8, + { { 16 }, { 16 }, { 16 }, { 16 } }, + { { 32 }, { 16 }, { 0 }, { 48 } } }, + + { SVGA3D_R16G16B16A16_SINT, + SVGA3DBLOCKDESC_RGBA_SINT, + { 1, 1, 1 }, + 8, + 8, + { { 16 }, { 16 }, { 16 }, { 16 } }, + { { 32 }, { 16 }, { 0 }, { 48 } } }, + + { SVGA3D_R32G32_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 8, + 8, + { { 0 }, { 32 }, { 32 }, { 0 } }, + { { 0 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R32G32_UINT, + SVGA3DBLOCKDESC_RG_UINT, + { 1, 1, 1 }, + 8, + 8, + { { 0 }, { 32 }, { 32 }, { 0 } }, + { { 0 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R32G32_SINT, + SVGA3DBLOCKDESC_RG_SINT, + { 1, 1, 1 }, + 8, + 8, + { { 0 }, { 32 }, { 32 }, { 0 } }, + { { 0 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R32G8X24_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 8, + 8, + { { 0 }, { 8 }, { 32 }, { 0 } }, + { { 0 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_D32_FLOAT_S8X24_UINT, + SVGA3DBLOCKDESC_DS, + { 1, 1, 1 }, + 8, + 8, + { { 0 }, { 8 }, { 32 }, { 0 } }, + { { 0 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R32_FLOAT_X8X24, + SVGA3DBLOCKDESC_R_FP, + { 1, 1, 1 }, + 8, + 8, + { { 0 }, { 0 }, { 32 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_X32_G8X24_UINT, + SVGA3DBLOCKDESC_G_UINT, + { 1, 1, 1 }, + 8, + 8, + { { 0 }, { 8 }, { 0 }, { 0 } }, + { { 0 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R10G10B10A2_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 4, + 4, + { { 10 }, { 10 }, { 10 }, { 2 } }, + { { 20 }, { 10 }, { 0 }, { 30 } } }, + + { SVGA3D_R10G10B10A2_UINT, + SVGA3DBLOCKDESC_RGBA_UINT, + { 1, 1, 1 }, + 4, + 4, + { { 10 }, { 10 }, { 10 }, { 2 } }, + { { 20 }, { 10 }, { 0 }, { 30 } } }, + + { SVGA3D_R11G11B10_FLOAT, + SVGA3DBLOCKDESC_RGB_FP, + { 1, 1, 1 }, + 4, + 4, + { { 10 }, { 11 }, { 11 }, { 0 } }, + { { 22 }, { 11 }, { 0 }, { 0 } } }, + + { SVGA3D_R8G8B8A8_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 16 }, { 8 }, { 0 }, { 24 } } }, + + { SVGA3D_R8G8B8A8_UNORM, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 16 }, { 8 }, { 0 }, { 24 } } }, + + { SVGA3D_R8G8B8A8_UNORM_SRGB, + SVGA3DBLOCKDESC_RGBA_UNORM_SRGB, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 16 }, { 8 }, { 0 }, { 24 } } }, + + { SVGA3D_R8G8B8A8_UINT, + SVGA3DBLOCKDESC_RGBA_UINT, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 16 }, { 8 }, { 0 }, { 24 } } }, + + { SVGA3D_R8G8B8A8_SINT, + SVGA3DBLOCKDESC_RGBA_SINT, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 16 }, { 8 }, { 0 }, { 24 } } }, + + { SVGA3D_R16G16_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 16 }, { 16 }, { 0 } }, + { { 0 }, { 16 }, { 0 }, { 0 } } }, + + { SVGA3D_R16G16_UINT, + SVGA3DBLOCKDESC_RG_UINT, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 16 }, { 16 }, { 0 } }, + { { 0 }, { 16 }, { 0 }, { 0 } } }, + + { SVGA3D_R16G16_SINT, + SVGA3DBLOCKDESC_RG_SINT, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 16 }, { 16 }, { 0 } }, + { { 0 }, { 16 }, { 0 }, { 0 } } }, + + { SVGA3D_R32_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 32 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_D32_FLOAT, + SVGA3DBLOCKDESC_DEPTH_FP, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 32 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R32_UINT, + SVGA3DBLOCKDESC_R_UINT, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 32 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R32_SINT, + SVGA3DBLOCKDESC_R_SINT, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 32 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R24G8_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 8 }, { 24 }, { 0 } }, + { { 0 }, { 24 }, { 0 }, { 0 } } }, + + { SVGA3D_D24_UNORM_S8_UINT, + SVGA3DBLOCKDESC_DS_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 8 }, { 24 }, { 0 } }, + { { 0 }, { 24 }, { 0 }, { 0 } } }, + + { SVGA3D_R24_UNORM_X8, + SVGA3DBLOCKDESC_R_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 24 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_X24_G8_UINT, + SVGA3DBLOCKDESC_G_UINT, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 8 }, { 0 }, { 0 } }, + { { 0 }, { 24 }, { 0 }, { 0 } } }, + + { SVGA3D_R8G8_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_R8G8_UNORM, + SVGA3DBLOCKDESC_RG_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_R8G8_UINT, + SVGA3DBLOCKDESC_RG_UINT, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_R8G8_SINT, + SVGA3DBLOCKDESC_RG_SINT, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_R16_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R16_UNORM, + SVGA3DBLOCKDESC_R_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R16_UINT, + SVGA3DBLOCKDESC_R_UINT, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R16_SNORM, + SVGA3DBLOCKDESC_R_SNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R16_SINT, + SVGA3DBLOCKDESC_R_SINT, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R8_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R8_UNORM, + SVGA3DBLOCKDESC_R_UNORM, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R8_UINT, + SVGA3DBLOCKDESC_R_UINT, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R8_SNORM, + SVGA3DBLOCKDESC_R_SNORM, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R8_SINT, + SVGA3DBLOCKDESC_R_SINT, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_P8, + SVGA3DBLOCKDESC_NONE, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R9G9B9E5_SHAREDEXP, + SVGA3DBLOCKDESC_RGB_EXP, + { 1, 1, 1 }, + 4, + 4, + { { 9 }, { 9 }, { 9 }, { 5 } }, + { { 18 }, { 9 }, { 0 }, { 27 } } }, + + { SVGA3D_R8G8_B8G8_UNORM, + SVGA3DBLOCKDESC_NONE, + { 2, 1, 1 }, + 4, + 4, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 0 }, { 8 }, { 0 } } }, + + { SVGA3D_G8R8_G8B8_UNORM, + SVGA3DBLOCKDESC_NONE, + { 2, 1, 1 }, + 4, + 4, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_BC1_TYPELESS, + SVGA3DBLOCKDESC_BC1_COMP_TYPELESS, + { 4, 4, 1 }, + 8, + 8, + { { 0 }, { 0 }, { 64 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC1_UNORM_SRGB, + SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB, + { 4, 4, 1 }, + 8, + 8, + { { 0 }, { 0 }, { 64 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC2_TYPELESS, + SVGA3DBLOCKDESC_BC2_COMP_TYPELESS, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC2_UNORM_SRGB, + SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC3_TYPELESS, + SVGA3DBLOCKDESC_BC3_COMP_TYPELESS, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC3_UNORM_SRGB, + SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC4_TYPELESS, + SVGA3DBLOCKDESC_BC4_COMP_TYPELESS, + { 4, 4, 1 }, + 8, + 8, + { { 0 }, { 0 }, { 64 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_ATI1, + SVGA3DBLOCKDESC_BC4_COMP_UNORM, + { 4, 4, 1 }, + 8, + 8, + { { 0 }, { 0 }, { 64 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC4_SNORM, + SVGA3DBLOCKDESC_BC4_COMP_SNORM, + { 4, 4, 1 }, + 8, + 8, + { { 0 }, { 0 }, { 64 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC5_TYPELESS, + SVGA3DBLOCKDESC_BC5_COMP_TYPELESS, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_ATI2, + SVGA3DBLOCKDESC_BC5_COMP_UNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC5_SNORM, + SVGA3DBLOCKDESC_BC5_COMP_SNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 10 }, { 10 }, { 10 }, { 2 } }, + { { 20 }, { 10 }, { 0 }, { 30 } } }, + + { SVGA3D_B8G8R8A8_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_B8G8R8A8_UNORM_SRGB, + SVGA3DBLOCKDESC_RGBA_UNORM_SRGB, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_B8G8R8X8_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_B8G8R8X8_UNORM_SRGB, + SVGA3DBLOCKDESC_RGB_UNORM_SRGB, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_Z_DF16, + SVGA3DBLOCKDESC_DEPTH_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_Z_DF24, + SVGA3DBLOCKDESC_DEPTH_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 24 }, { 0 } }, + { { 0 }, { 0 }, { 8 }, { 0 } } }, + + { SVGA3D_Z_D24S8_INT, + SVGA3DBLOCKDESC_DS_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 8 }, { 24 }, { 0 } }, + { { 0 }, { 0 }, { 8 }, { 0 } } }, + + { SVGA3D_YV12, + SVGA3DBLOCKDESC_YV12, + { 2, 2, 1 }, + 6, + 2, + { { 0 }, { 0 }, { 48 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R32G32B32A32_FLOAT, + SVGA3DBLOCKDESC_RGBA_FP, + { 1, 1, 1 }, + 16, + 16, + { { 32 }, { 32 }, { 32 }, { 32 } }, + { { 64 }, { 32 }, { 0 }, { 96 } } }, + + { SVGA3D_R16G16B16A16_FLOAT, + SVGA3DBLOCKDESC_RGBA_FP, + { 1, 1, 1 }, + 8, + 8, + { { 16 }, { 16 }, { 16 }, { 16 } }, + { { 32 }, { 16 }, { 0 }, { 48 } } }, + + { SVGA3D_R16G16B16A16_UNORM, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 8, + 8, + { { 16 }, { 16 }, { 16 }, { 16 } }, + { { 32 }, { 16 }, { 0 }, { 48 } } }, + + { SVGA3D_R32G32_FLOAT, + SVGA3DBLOCKDESC_RG_FP, + { 1, 1, 1 }, + 8, + 8, + { { 0 }, { 32 }, { 32 }, { 0 } }, + { { 0 }, { 32 }, { 0 }, { 0 } } }, + + { SVGA3D_R10G10B10A2_UNORM, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 10 }, { 10 }, { 10 }, { 2 } }, + { { 20 }, { 10 }, { 0 }, { 30 } } }, + + { SVGA3D_R8G8B8A8_SNORM, + SVGA3DBLOCKDESC_RGBA_SNORM, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 16 }, { 8 }, { 0 }, { 24 } } }, + + { SVGA3D_R16G16_FLOAT, + SVGA3DBLOCKDESC_RG_FP, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 16 }, { 16 }, { 0 } }, + { { 0 }, { 16 }, { 0 }, { 0 } } }, + + { SVGA3D_R16G16_UNORM, + SVGA3DBLOCKDESC_RG_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 16 }, { 16 }, { 0 } }, + { { 0 }, { 16 }, { 0 }, { 0 } } }, + + { SVGA3D_R16G16_SNORM, + SVGA3DBLOCKDESC_RG_SNORM, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 16 }, { 16 }, { 0 } }, + { { 0 }, { 16 }, { 0 }, { 0 } } }, + + { SVGA3D_R32_FLOAT, + SVGA3DBLOCKDESC_R_FP, + { 1, 1, 1 }, + 4, + 4, + { { 0 }, { 0 }, { 32 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_R8G8_SNORM, + SVGA3DBLOCKDESC_RG_SNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 0 }, { 0 } } }, + + { SVGA3D_R16_FLOAT, + SVGA3DBLOCKDESC_R_FP, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_D16_UNORM, + SVGA3DBLOCKDESC_DEPTH_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 0 }, { 0 }, { 16 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_A8_UNORM, + SVGA3DBLOCKDESC_A_UNORM, + { 1, 1, 1 }, + 1, + 1, + { { 0 }, { 0 }, { 0 }, { 8 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC1_UNORM, + SVGA3DBLOCKDESC_BC1_COMP_UNORM, + { 4, 4, 1 }, + 8, + 8, + { { 0 }, { 0 }, { 64 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC2_UNORM, + SVGA3DBLOCKDESC_BC2_COMP_UNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC3_UNORM, + SVGA3DBLOCKDESC_BC3_COMP_UNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_B5G6R5_UNORM, + SVGA3DBLOCKDESC_RGB_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 5 }, { 6 }, { 5 }, { 0 } }, + { { 0 }, { 5 }, { 11 }, { 0 } } }, + + { SVGA3D_B5G5R5A1_UNORM, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 5 }, { 5 }, { 5 }, { 1 } }, + { { 0 }, { 5 }, { 10 }, { 15 } } }, + + { SVGA3D_B8G8R8A8_UNORM, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_B8G8R8X8_UNORM, + SVGA3DBLOCKDESC_RGB_UNORM, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 0 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_BC4_UNORM, + SVGA3DBLOCKDESC_BC4_COMP_UNORM, + { 4, 4, 1 }, + 8, + 8, + { { 0 }, { 0 }, { 64 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC5_UNORM, + SVGA3DBLOCKDESC_BC5_COMP_UNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_B4G4R4A4_UNORM, + SVGA3DBLOCKDESC_RGBA_UNORM, + { 1, 1, 1 }, + 2, + 2, + { { 4 }, { 4 }, { 4 }, { 4 } }, + { { 0 }, { 4 }, { 8 }, { 12 } } }, + + { SVGA3D_BC6H_TYPELESS, + SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC6H_UF16, + SVGA3DBLOCKDESC_BC6H_COMP_UF16, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC6H_SF16, + SVGA3DBLOCKDESC_BC6H_COMP_SF16, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC7_TYPELESS, + SVGA3DBLOCKDESC_BC7_COMP_TYPELESS, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC7_UNORM, + SVGA3DBLOCKDESC_BC7_COMP_UNORM, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_BC7_UNORM_SRGB, + SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB, + { 4, 4, 1 }, + 16, + 16, + { { 0 }, { 0 }, { 128 }, { 0 } }, + { { 0 }, { 0 }, { 0 }, { 0 } } }, + + { SVGA3D_AYUV, + SVGA3DBLOCKDESC_AYUV, + { 1, 1, 1 }, + 4, + 4, + { { 8 }, { 8 }, { 8 }, { 8 } }, + { { 0 }, { 8 }, { 16 }, { 24 } } }, + + { SVGA3D_R11G11B10_TYPELESS, + SVGA3DBLOCKDESC_TYPELESS, + { 1, 1, 1 }, + 4, + 4, + { { 10 }, { 11 }, { 11 }, { 0 } }, + { { 22 }, { 11 }, { 0 }, { 0 } } }, }; -/** - * svga3dsurface_subres - Compute the subresource from layer and mipmap. - * @cache: Surface layout data. - * @mip_level: The mipmap level. - * @layer: The surface layer (face or array slice). - * - * Return: The subresource. - */ -static inline u32 svga3dsurface_subres(const struct svga3dsurface_cache *cache, - u32 mip_level, u32 layer) -{ - return cache->num_mip_levels * layer + mip_level; -} - -/** - * svga3dsurface_setup_cache - Build a surface cache entry - * @size: The surface base level dimensions. - * @format: The surface format. - * @num_mip_levels: Number of mipmap levels. - * @num_layers: Number of layers. - * @cache: Pointer to a struct svga3dsurface_cach object to be filled in. - * - * Return: Zero on success, -EINVAL on invalid surface layout. - */ -static inline int svga3dsurface_setup_cache(const struct drm_vmw_size *size, - SVGA3dSurfaceFormat format, - u32 num_mip_levels, - u32 num_layers, - u32 num_samples, - struct svga3dsurface_cache *cache) -{ - const struct svga3d_surface_desc *desc; - u32 i; - - memset(cache, 0, sizeof(*cache)); - cache->desc = desc = svga3dsurface_get_desc(format); - cache->num_mip_levels = num_mip_levels; - cache->num_layers = num_layers; - for (i = 0; i < cache->num_mip_levels; i++) { - struct svga3dsurface_mip *mip = &cache->mip[i]; - - mip->size = svga3dsurface_get_mip_size(*size, i); - mip->bytes = svga3dsurface_get_image_buffer_size - (desc, &mip->size, 0); - mip->row_stride = - __KERNEL_DIV_ROUND_UP(mip->size.width, - desc->block_size.width) * - desc->bytes_per_block * num_samples; - if (!mip->row_stride) - goto invalid_dim; - - mip->img_stride = - __KERNEL_DIV_ROUND_UP(mip->size.height, - desc->block_size.height) * - mip->row_stride; - if (!mip->img_stride) - goto invalid_dim; - - cache->mip_chain_bytes += mip->bytes; - } - cache->sheet_bytes = cache->mip_chain_bytes * num_layers; - if (!cache->sheet_bytes) - goto invalid_dim; - - return 0; - -invalid_dim: - VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n"); - return -EINVAL; -} - -/** - * svga3dsurface_get_loc - Get a surface location from an offset into the - * backing store - * @cache: Surface layout data. - * @loc: Pointer to a struct svga3dsurface_loc to be filled in. - * @offset: Offset into the surface backing store. - */ -static inline void -svga3dsurface_get_loc(const struct svga3dsurface_cache *cache, - struct svga3dsurface_loc *loc, - size_t offset) -{ - const struct svga3dsurface_mip *mip = &cache->mip[0]; - const struct svga3d_surface_desc *desc = cache->desc; - u32 layer; - int i; - - loc->sheet = offset / cache->sheet_bytes; - offset -= loc->sheet * cache->sheet_bytes; - - layer = offset / cache->mip_chain_bytes; - offset -= layer * cache->mip_chain_bytes; - for (i = 0; i < cache->num_mip_levels; ++i, ++mip) { - if (mip->bytes > offset) - break; - offset -= mip->bytes; - } - - loc->sub_resource = svga3dsurface_subres(cache, i, layer); - loc->z = offset / mip->img_stride; - offset -= loc->z * mip->img_stride; - loc->z *= desc->block_size.depth; - loc->y = offset / mip->row_stride; - offset -= loc->y * mip->row_stride; - loc->y *= desc->block_size.height; - loc->x = offset / desc->bytes_per_block; - loc->x *= desc->block_size.width; -} - -/** - * svga3dsurface_inc_loc - Clamp increment a surface location with one block - * size - * in each dimension. - * @loc: Pointer to a struct svga3dsurface_loc to be incremented. - * - * When computing the size of a range as size = end - start, the range does not - * include the end element. However a location representing the last byte - * of a touched region in the backing store *is* included in the range. - * This function modifies such a location to match the end definition - * given as start + size which is the one used in a SVGA3dBox. - */ -static inline void -svga3dsurface_inc_loc(const struct svga3dsurface_cache *cache, - struct svga3dsurface_loc *loc) -{ - const struct svga3d_surface_desc *desc = cache->desc; - u32 mip = loc->sub_resource % cache->num_mip_levels; - const struct drm_vmw_size *size = &cache->mip[mip].size; - - loc->sub_resource++; - loc->x += desc->block_size.width; - if (loc->x > size->width) - loc->x = size->width; - loc->y += desc->block_size.height; - if (loc->y > size->height) - loc->y = size->height; - loc->z += desc->block_size.depth; - if (loc->z > size->depth) - loc->z = size->depth; -} - -/** - * svga3dsurface_min_loc - The start location in a subresource - * @cache: Surface layout data. - * @sub_resource: The subresource. - * @loc: Pointer to a struct svga3dsurface_loc to be filled in. - */ -static inline void -svga3dsurface_min_loc(const struct svga3dsurface_cache *cache, - u32 sub_resource, - struct svga3dsurface_loc *loc) -{ - loc->sheet = 0; - loc->sub_resource = sub_resource; - loc->x = loc->y = loc->z = 0; -} - -/** - * svga3dsurface_min_loc - The end location in a subresource - * @cache: Surface layout data. - * @sub_resource: The subresource. - * @loc: Pointer to a struct svga3dsurface_loc to be filled in. - * - * Following the end definition given in svga3dsurface_inc_loc(), - * Compute the end location of a surface subresource. - */ -static inline void -svga3dsurface_max_loc(const struct svga3dsurface_cache *cache, - u32 sub_resource, - struct svga3dsurface_loc *loc) -{ - const struct drm_vmw_size *size; - u32 mip; - - loc->sheet = 0; - loc->sub_resource = sub_resource + 1; - mip = sub_resource % cache->num_mip_levels; - size = &cache->mip[mip].size; - loc->x = size->width; - loc->y = size->height; - loc->z = size->depth; +#ifdef __cplusplus } +#endif -#endif /* _SVGA3D_SURFACEDEFS_H_ */ +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h index 77e338a65791..70b88ee16cf6 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2012-2015 VMware, Inc. + * Copyright 2012-2021 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -27,1974 +27,1530 @@ /* * svga3d_types.h -- * - * SVGA 3d hardware definitions for basic types + * SVGA 3d hardware definitions for basic types */ + + #ifndef _SVGA3D_TYPES_H_ #define _SVGA3D_TYPES_H_ -#define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_VMCORE - -#include "includeCheck.h" +#include "vm_basic_types.h" -/* - * Generic Types - */ +#define SVGA3D_INVALID_ID ((uint32)-1) -#define SVGA3D_INVALID_ID ((uint32)-1) +#define SVGA3D_RESOURCE_TYPE_MIN 1 +#define SVGA3D_RESOURCE_BUFFER 1 +#define SVGA3D_RESOURCE_TEXTURE1D 2 +#define SVGA3D_RESOURCE_TEXTURE2D 3 +#define SVGA3D_RESOURCE_TEXTURE3D 4 +#define SVGA3D_RESOURCE_TEXTURECUBE 5 +#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6 +#define SVGA3D_RESOURCE_BUFFEREX 6 +#define SVGA3D_RESOURCE_TYPE_MAX 7 +typedef uint32 SVGA3dResourceType; -typedef uint8 SVGABool8; /* 8-bit Bool definition */ -typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ -typedef uint32 SVGA3dColor; /* a, r, g, b */ +typedef uint8 SVGABool8; +typedef uint32 SVGA3dBool; +typedef uint32 SVGA3dColor; typedef uint32 SVGA3dSurfaceId; -typedef -#include "vmware_pack_begin.h" -struct { - uint32 numerator; - uint32 denominator; -} -#include "vmware_pack_end.h" -SVGA3dFraction64; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCopyRect { - uint32 x; - uint32 y; - uint32 w; - uint32 h; - uint32 srcx; - uint32 srcy; -} -#include "vmware_pack_end.h" -SVGA3dCopyRect; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dCopyBox { - uint32 x; - uint32 y; - uint32 z; - uint32 w; - uint32 h; - uint32 d; - uint32 srcx; - uint32 srcy; - uint32 srcz; -} -#include "vmware_pack_end.h" -SVGA3dCopyBox; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dRect { - uint32 x; - uint32 y; - uint32 w; - uint32 h; -} -#include "vmware_pack_end.h" -SVGA3dRect; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 x; - uint32 y; - uint32 z; - uint32 w; - uint32 h; - uint32 d; -} -#include "vmware_pack_end.h" -SVGA3dBox; - -typedef -#include "vmware_pack_begin.h" -struct { - int32 x; - int32 y; - int32 z; - int32 w; - int32 h; - int32 d; -} -#include "vmware_pack_end.h" -SVGA3dSignedBox; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 x; - uint32 y; - uint32 z; -} -#include "vmware_pack_end.h" -SVGA3dPoint; +#pragma pack(push, 1) +typedef struct { + uint32 numerator; + uint32 denominator; +} SVGA3dFraction64; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCopyRect { + uint32 x; + uint32 y; + uint32 w; + uint32 h; + uint32 srcx; + uint32 srcy; +} SVGA3dCopyRect; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dCopyBox { + uint32 x; + uint32 y; + uint32 z; + uint32 w; + uint32 h; + uint32 d; + uint32 srcx; + uint32 srcy; + uint32 srcz; +} SVGA3dCopyBox; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dRect { + uint32 x; + uint32 y; + uint32 w; + uint32 h; +} SVGA3dRect; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 x; + uint32 y; + uint32 z; + uint32 w; + uint32 h; + uint32 d; +} SVGA3dBox; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + int32 x; + int32 y; + int32 z; + int32 w; + int32 h; + int32 d; +} SVGA3dSignedBox; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 x; + uint32 y; + uint32 z; +} SVGA3dPoint; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef union { + struct { + float r; + float g; + float b; + float a; + }; + + float value[4]; +} SVGA3dRGBAFloat; +#pragma pack(pop) -/* - * Surface formats. - */ typedef enum SVGA3dSurfaceFormat { - SVGA3D_FORMAT_INVALID = 0, - - SVGA3D_X8R8G8B8 = 1, - SVGA3D_FORMAT_MIN = 1, - - SVGA3D_A8R8G8B8 = 2, - - SVGA3D_R5G6B5 = 3, - SVGA3D_X1R5G5B5 = 4, - SVGA3D_A1R5G5B5 = 5, - SVGA3D_A4R4G4B4 = 6, - - SVGA3D_Z_D32 = 7, - SVGA3D_Z_D16 = 8, - SVGA3D_Z_D24S8 = 9, - SVGA3D_Z_D15S1 = 10, - - SVGA3D_LUMINANCE8 = 11, - SVGA3D_LUMINANCE4_ALPHA4 = 12, - SVGA3D_LUMINANCE16 = 13, - SVGA3D_LUMINANCE8_ALPHA8 = 14, - - SVGA3D_DXT1 = 15, - SVGA3D_DXT2 = 16, - SVGA3D_DXT3 = 17, - SVGA3D_DXT4 = 18, - SVGA3D_DXT5 = 19, - - SVGA3D_BUMPU8V8 = 20, - SVGA3D_BUMPL6V5U5 = 21, - SVGA3D_BUMPX8L8V8U8 = 22, - SVGA3D_FORMAT_DEAD1 = 23, - - SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */ - SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */ - - SVGA3D_A2R10G10B10 = 26, - - /* signed formats */ - SVGA3D_V8U8 = 27, - SVGA3D_Q8W8V8U8 = 28, - SVGA3D_CxV8U8 = 29, - - /* mixed formats */ - SVGA3D_X8L8V8U8 = 30, - SVGA3D_A2W10V10U10 = 31, - - SVGA3D_ALPHA8 = 32, - - /* Single- and dual-component floating point formats */ - SVGA3D_R_S10E5 = 33, - SVGA3D_R_S23E8 = 34, - SVGA3D_RG_S10E5 = 35, - SVGA3D_RG_S23E8 = 36, - - SVGA3D_BUFFER = 37, - - SVGA3D_Z_D24X8 = 38, - - SVGA3D_V16U16 = 39, - - SVGA3D_G16R16 = 40, - SVGA3D_A16B16G16R16 = 41, - - /* Packed Video formats */ - SVGA3D_UYVY = 42, - SVGA3D_YUY2 = 43, - - /* Planar video formats */ - SVGA3D_NV12 = 44, - - SVGA3D_FORMAT_DEAD2 = 45, - - SVGA3D_R32G32B32A32_TYPELESS = 46, - SVGA3D_R32G32B32A32_UINT = 47, - SVGA3D_R32G32B32A32_SINT = 48, - SVGA3D_R32G32B32_TYPELESS = 49, - SVGA3D_R32G32B32_FLOAT = 50, - SVGA3D_R32G32B32_UINT = 51, - SVGA3D_R32G32B32_SINT = 52, - SVGA3D_R16G16B16A16_TYPELESS = 53, - SVGA3D_R16G16B16A16_UINT = 54, - SVGA3D_R16G16B16A16_SNORM = 55, - SVGA3D_R16G16B16A16_SINT = 56, - SVGA3D_R32G32_TYPELESS = 57, - SVGA3D_R32G32_UINT = 58, - SVGA3D_R32G32_SINT = 59, - SVGA3D_R32G8X24_TYPELESS = 60, - SVGA3D_D32_FLOAT_S8X24_UINT = 61, - SVGA3D_R32_FLOAT_X8X24 = 62, - SVGA3D_X32_G8X24_UINT = 63, - SVGA3D_R10G10B10A2_TYPELESS = 64, - SVGA3D_R10G10B10A2_UINT = 65, - SVGA3D_R11G11B10_FLOAT = 66, - SVGA3D_R8G8B8A8_TYPELESS = 67, - SVGA3D_R8G8B8A8_UNORM = 68, - SVGA3D_R8G8B8A8_UNORM_SRGB = 69, - SVGA3D_R8G8B8A8_UINT = 70, - SVGA3D_R8G8B8A8_SINT = 71, - SVGA3D_R16G16_TYPELESS = 72, - SVGA3D_R16G16_UINT = 73, - SVGA3D_R16G16_SINT = 74, - SVGA3D_R32_TYPELESS = 75, - SVGA3D_D32_FLOAT = 76, - SVGA3D_R32_UINT = 77, - SVGA3D_R32_SINT = 78, - SVGA3D_R24G8_TYPELESS = 79, - SVGA3D_D24_UNORM_S8_UINT = 80, - SVGA3D_R24_UNORM_X8 = 81, - SVGA3D_X24_G8_UINT = 82, - SVGA3D_R8G8_TYPELESS = 83, - SVGA3D_R8G8_UNORM = 84, - SVGA3D_R8G8_UINT = 85, - SVGA3D_R8G8_SINT = 86, - SVGA3D_R16_TYPELESS = 87, - SVGA3D_R16_UNORM = 88, - SVGA3D_R16_UINT = 89, - SVGA3D_R16_SNORM = 90, - SVGA3D_R16_SINT = 91, - SVGA3D_R8_TYPELESS = 92, - SVGA3D_R8_UNORM = 93, - SVGA3D_R8_UINT = 94, - SVGA3D_R8_SNORM = 95, - SVGA3D_R8_SINT = 96, - SVGA3D_P8 = 97, - SVGA3D_R9G9B9E5_SHAREDEXP = 98, - SVGA3D_R8G8_B8G8_UNORM = 99, - SVGA3D_G8R8_G8B8_UNORM = 100, - SVGA3D_BC1_TYPELESS = 101, - SVGA3D_BC1_UNORM_SRGB = 102, - SVGA3D_BC2_TYPELESS = 103, - SVGA3D_BC2_UNORM_SRGB = 104, - SVGA3D_BC3_TYPELESS = 105, - SVGA3D_BC3_UNORM_SRGB = 106, - SVGA3D_BC4_TYPELESS = 107, - SVGA3D_ATI1 = 108, /* DX9-specific BC4_UNORM */ - SVGA3D_BC4_SNORM = 109, - SVGA3D_BC5_TYPELESS = 110, - SVGA3D_ATI2 = 111, /* DX9-specific BC5_UNORM */ - SVGA3D_BC5_SNORM = 112, - SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, - SVGA3D_B8G8R8A8_TYPELESS = 114, - SVGA3D_B8G8R8A8_UNORM_SRGB = 115, - SVGA3D_B8G8R8X8_TYPELESS = 116, - SVGA3D_B8G8R8X8_UNORM_SRGB = 117, - - /* Advanced depth formats. */ - SVGA3D_Z_DF16 = 118, - SVGA3D_Z_DF24 = 119, - SVGA3D_Z_D24S8_INT = 120, - - /* Planar video formats. */ - SVGA3D_YV12 = 121, - - SVGA3D_R32G32B32A32_FLOAT = 122, - SVGA3D_R16G16B16A16_FLOAT = 123, - SVGA3D_R16G16B16A16_UNORM = 124, - SVGA3D_R32G32_FLOAT = 125, - SVGA3D_R10G10B10A2_UNORM = 126, - SVGA3D_R8G8B8A8_SNORM = 127, - SVGA3D_R16G16_FLOAT = 128, - SVGA3D_R16G16_UNORM = 129, - SVGA3D_R16G16_SNORM = 130, - SVGA3D_R32_FLOAT = 131, - SVGA3D_R8G8_SNORM = 132, - SVGA3D_R16_FLOAT = 133, - SVGA3D_D16_UNORM = 134, - SVGA3D_A8_UNORM = 135, - SVGA3D_BC1_UNORM = 136, - SVGA3D_BC2_UNORM = 137, - SVGA3D_BC3_UNORM = 138, - SVGA3D_B5G6R5_UNORM = 139, - SVGA3D_B5G5R5A1_UNORM = 140, - SVGA3D_B8G8R8A8_UNORM = 141, - SVGA3D_B8G8R8X8_UNORM = 142, - SVGA3D_BC4_UNORM = 143, - SVGA3D_BC5_UNORM = 144, - SVGA3D_B4G4R4A4_UNORM = 145, - - /* DX11 compressed formats */ - SVGA3D_BC6H_TYPELESS = 146, - SVGA3D_BC6H_UF16 = 147, - SVGA3D_BC6H_SF16 = 148, - SVGA3D_BC7_TYPELESS = 149, - SVGA3D_BC7_UNORM = 150, - SVGA3D_BC7_UNORM_SRGB = 151, - - /* Video format with alpha */ - SVGA3D_AYUV = 152, - - SVGA3D_FORMAT_MAX + SVGA3D_FORMAT_INVALID = 0, + + SVGA3D_X8R8G8B8 = 1, + SVGA3D_FORMAT_MIN = 1, + + SVGA3D_A8R8G8B8 = 2, + + SVGA3D_R5G6B5 = 3, + SVGA3D_X1R5G5B5 = 4, + SVGA3D_A1R5G5B5 = 5, + SVGA3D_A4R4G4B4 = 6, + + SVGA3D_Z_D32 = 7, + SVGA3D_Z_D16 = 8, + SVGA3D_Z_D24S8 = 9, + SVGA3D_Z_D15S1 = 10, + + SVGA3D_LUMINANCE8 = 11, + SVGA3D_LUMINANCE4_ALPHA4 = 12, + SVGA3D_LUMINANCE16 = 13, + SVGA3D_LUMINANCE8_ALPHA8 = 14, + + SVGA3D_DXT1 = 15, + SVGA3D_DXT2 = 16, + SVGA3D_DXT3 = 17, + SVGA3D_DXT4 = 18, + SVGA3D_DXT5 = 19, + + SVGA3D_BUMPU8V8 = 20, + SVGA3D_BUMPL6V5U5 = 21, + SVGA3D_BUMPX8L8V8U8 = 22, + SVGA3D_FORMAT_DEAD1 = 23, + + SVGA3D_ARGB_S10E5 = 24, + SVGA3D_ARGB_S23E8 = 25, + + SVGA3D_A2R10G10B10 = 26, + + SVGA3D_V8U8 = 27, + SVGA3D_Q8W8V8U8 = 28, + SVGA3D_CxV8U8 = 29, + + SVGA3D_X8L8V8U8 = 30, + SVGA3D_A2W10V10U10 = 31, + + SVGA3D_ALPHA8 = 32, + + SVGA3D_R_S10E5 = 33, + SVGA3D_R_S23E8 = 34, + SVGA3D_RG_S10E5 = 35, + SVGA3D_RG_S23E8 = 36, + + SVGA3D_BUFFER = 37, + + SVGA3D_Z_D24X8 = 38, + + SVGA3D_V16U16 = 39, + + SVGA3D_G16R16 = 40, + SVGA3D_A16B16G16R16 = 41, + + SVGA3D_UYVY = 42, + SVGA3D_YUY2 = 43, + + SVGA3D_NV12 = 44, + + SVGA3D_FORMAT_DEAD2 = 45, + + SVGA3D_R32G32B32A32_TYPELESS = 46, + SVGA3D_R32G32B32A32_UINT = 47, + SVGA3D_R32G32B32A32_SINT = 48, + SVGA3D_R32G32B32_TYPELESS = 49, + SVGA3D_R32G32B32_FLOAT = 50, + SVGA3D_R32G32B32_UINT = 51, + SVGA3D_R32G32B32_SINT = 52, + SVGA3D_R16G16B16A16_TYPELESS = 53, + SVGA3D_R16G16B16A16_UINT = 54, + SVGA3D_R16G16B16A16_SNORM = 55, + SVGA3D_R16G16B16A16_SINT = 56, + SVGA3D_R32G32_TYPELESS = 57, + SVGA3D_R32G32_UINT = 58, + SVGA3D_R32G32_SINT = 59, + SVGA3D_R32G8X24_TYPELESS = 60, + SVGA3D_D32_FLOAT_S8X24_UINT = 61, + SVGA3D_R32_FLOAT_X8X24 = 62, + SVGA3D_X32_G8X24_UINT = 63, + SVGA3D_R10G10B10A2_TYPELESS = 64, + SVGA3D_R10G10B10A2_UINT = 65, + SVGA3D_R11G11B10_FLOAT = 66, + SVGA3D_R8G8B8A8_TYPELESS = 67, + SVGA3D_R8G8B8A8_UNORM = 68, + SVGA3D_R8G8B8A8_UNORM_SRGB = 69, + SVGA3D_R8G8B8A8_UINT = 70, + SVGA3D_R8G8B8A8_SINT = 71, + SVGA3D_R16G16_TYPELESS = 72, + SVGA3D_R16G16_UINT = 73, + SVGA3D_R16G16_SINT = 74, + SVGA3D_R32_TYPELESS = 75, + SVGA3D_D32_FLOAT = 76, + SVGA3D_R32_UINT = 77, + SVGA3D_R32_SINT = 78, + SVGA3D_R24G8_TYPELESS = 79, + SVGA3D_D24_UNORM_S8_UINT = 80, + SVGA3D_R24_UNORM_X8 = 81, + SVGA3D_X24_G8_UINT = 82, + SVGA3D_R8G8_TYPELESS = 83, + SVGA3D_R8G8_UNORM = 84, + SVGA3D_R8G8_UINT = 85, + SVGA3D_R8G8_SINT = 86, + SVGA3D_R16_TYPELESS = 87, + SVGA3D_R16_UNORM = 88, + SVGA3D_R16_UINT = 89, + SVGA3D_R16_SNORM = 90, + SVGA3D_R16_SINT = 91, + SVGA3D_R8_TYPELESS = 92, + SVGA3D_R8_UNORM = 93, + SVGA3D_R8_UINT = 94, + SVGA3D_R8_SNORM = 95, + SVGA3D_R8_SINT = 96, + SVGA3D_P8 = 97, + SVGA3D_R9G9B9E5_SHAREDEXP = 98, + SVGA3D_R8G8_B8G8_UNORM = 99, + SVGA3D_G8R8_G8B8_UNORM = 100, + SVGA3D_BC1_TYPELESS = 101, + SVGA3D_BC1_UNORM_SRGB = 102, + SVGA3D_BC2_TYPELESS = 103, + SVGA3D_BC2_UNORM_SRGB = 104, + SVGA3D_BC3_TYPELESS = 105, + SVGA3D_BC3_UNORM_SRGB = 106, + SVGA3D_BC4_TYPELESS = 107, + SVGA3D_ATI1 = 108, + SVGA3D_BC4_SNORM = 109, + SVGA3D_BC5_TYPELESS = 110, + SVGA3D_ATI2 = 111, + SVGA3D_BC5_SNORM = 112, + SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, + SVGA3D_B8G8R8A8_TYPELESS = 114, + SVGA3D_B8G8R8A8_UNORM_SRGB = 115, + SVGA3D_B8G8R8X8_TYPELESS = 116, + SVGA3D_B8G8R8X8_UNORM_SRGB = 117, + + SVGA3D_Z_DF16 = 118, + SVGA3D_Z_DF24 = 119, + SVGA3D_Z_D24S8_INT = 120, + + SVGA3D_YV12 = 121, + + SVGA3D_R32G32B32A32_FLOAT = 122, + SVGA3D_R16G16B16A16_FLOAT = 123, + SVGA3D_R16G16B16A16_UNORM = 124, + SVGA3D_R32G32_FLOAT = 125, + SVGA3D_R10G10B10A2_UNORM = 126, + SVGA3D_R8G8B8A8_SNORM = 127, + SVGA3D_R16G16_FLOAT = 128, + SVGA3D_R16G16_UNORM = 129, + SVGA3D_R16G16_SNORM = 130, + SVGA3D_R32_FLOAT = 131, + SVGA3D_R8G8_SNORM = 132, + SVGA3D_R16_FLOAT = 133, + SVGA3D_D16_UNORM = 134, + SVGA3D_A8_UNORM = 135, + SVGA3D_BC1_UNORM = 136, + SVGA3D_BC2_UNORM = 137, + SVGA3D_BC3_UNORM = 138, + SVGA3D_B5G6R5_UNORM = 139, + SVGA3D_B5G5R5A1_UNORM = 140, + SVGA3D_B8G8R8A8_UNORM = 141, + SVGA3D_B8G8R8X8_UNORM = 142, + SVGA3D_BC4_UNORM = 143, + SVGA3D_BC5_UNORM = 144, + SVGA3D_B4G4R4A4_UNORM = 145, + + SVGA3D_BC6H_TYPELESS = 146, + SVGA3D_BC6H_UF16 = 147, + SVGA3D_BC6H_SF16 = 148, + SVGA3D_BC7_TYPELESS = 149, + SVGA3D_BC7_UNORM = 150, + SVGA3D_BC7_UNORM_SRGB = 151, + + SVGA3D_AYUV = 152, + + SVGA3D_R11G11B10_TYPELESS = 153, + + SVGA3D_FORMAT_MAX } SVGA3dSurfaceFormat; -/* - * SVGA3d Surface Flags -- - */ -#define SVGA3D_SURFACE_CUBEMAP (1 << 0) +#define SVGA3D_SURFACE_CUBEMAP (1 << 0) -/* - * HINT flags are not enforced by the device but are useful for - * performance. - */ -#define SVGA3D_SURFACE_HINT_STATIC (CONST64U(1) << 1) -#define SVGA3D_SURFACE_HINT_DYNAMIC (CONST64U(1) << 2) -#define SVGA3D_SURFACE_HINT_INDEXBUFFER (CONST64U(1) << 3) -#define SVGA3D_SURFACE_HINT_VERTEXBUFFER (CONST64U(1) << 4) -#define SVGA3D_SURFACE_HINT_TEXTURE (CONST64U(1) << 5) -#define SVGA3D_SURFACE_HINT_RENDERTARGET (CONST64U(1) << 6) -#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL (CONST64U(1) << 7) -#define SVGA3D_SURFACE_HINT_WRITEONLY (CONST64U(1) << 8) -#define SVGA3D_SURFACE_DEAD2 (CONST64U(1) << 9) -#define SVGA3D_SURFACE_AUTOGENMIPMAPS (CONST64U(1) << 10) - -#define SVGA3D_SURFACE_DEAD1 (CONST64U(1) << 11) +#define SVGA3D_SURFACE_HINT_STATIC (CONST64U(1) << 1) +#define SVGA3D_SURFACE_HINT_DYNAMIC (CONST64U(1) << 2) +#define SVGA3D_SURFACE_HINT_INDEXBUFFER (CONST64U(1) << 3) +#define SVGA3D_SURFACE_HINT_VERTEXBUFFER (CONST64U(1) << 4) +#define SVGA3D_SURFACE_HINT_TEXTURE (CONST64U(1) << 5) +#define SVGA3D_SURFACE_HINT_RENDERTARGET (CONST64U(1) << 6) +#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL (CONST64U(1) << 7) +#define SVGA3D_SURFACE_HINT_WRITEONLY (CONST64U(1) << 8) +#define SVGA3D_SURFACE_DEAD2 (CONST64U(1) << 9) +#define SVGA3D_SURFACE_AUTOGENMIPMAPS (CONST64U(1) << 10) -/* - * Is this surface using a base-level pitch for it's mob backing? - * - * This flag is not intended to be set by guest-drivers, but is instead - * set by the device when the surface is bound to a mob with a specified - * pitch. - */ -#define SVGA3D_SURFACE_MOB_PITCH (CONST64U(1) << 12) +#define SVGA3D_SURFACE_DEAD1 (CONST64U(1) << 11) -#define SVGA3D_SURFACE_INACTIVE (CONST64U(1) << 13) -#define SVGA3D_SURFACE_HINT_RT_LOCKABLE (CONST64U(1) << 14) -#define SVGA3D_SURFACE_VOLUME (CONST64U(1) << 15) +#define SVGA3D_SURFACE_MOB_PITCH (CONST64U(1) << 12) -/* - * Required to be set on a surface to bind it to a screen target. - */ -#define SVGA3D_SURFACE_SCREENTARGET (CONST64U(1) << 16) +#define SVGA3D_SURFACE_INACTIVE (CONST64U(1) << 13) +#define SVGA3D_SURFACE_HINT_RT_LOCKABLE (CONST64U(1) << 14) +#define SVGA3D_SURFACE_VOLUME (CONST64U(1) << 15) -/* - * Align images in the guest-backing mob to 16-bytes. - */ -#define SVGA3D_SURFACE_ALIGN16 (CONST64U(1) << 17) +#define SVGA3D_SURFACE_SCREENTARGET (CONST64U(1) << 16) -#define SVGA3D_SURFACE_1D (CONST64U(1) << 18) -#define SVGA3D_SURFACE_ARRAY (CONST64U(1) << 19) +#define SVGA3D_SURFACE_ALIGN16 (CONST64U(1) << 17) -/* - * Bind flags. - * These are enforced for any surface defined with DefineGBSurface_v2. - */ -#define SVGA3D_SURFACE_BIND_VERTEX_BUFFER (CONST64U(1) << 20) -#define SVGA3D_SURFACE_BIND_INDEX_BUFFER (CONST64U(1) << 21) -#define SVGA3D_SURFACE_BIND_CONSTANT_BUFFER (CONST64U(1) << 22) -#define SVGA3D_SURFACE_BIND_SHADER_RESOURCE (CONST64U(1) << 23) -#define SVGA3D_SURFACE_BIND_RENDER_TARGET (CONST64U(1) << 24) -#define SVGA3D_SURFACE_BIND_DEPTH_STENCIL (CONST64U(1) << 25) -#define SVGA3D_SURFACE_BIND_STREAM_OUTPUT (CONST64U(1) << 26) +#define SVGA3D_SURFACE_1D (CONST64U(1) << 18) +#define SVGA3D_SURFACE_ARRAY (CONST64U(1) << 19) -/* - * The STAGING flags notes that the surface will not be used directly by the - * drawing pipeline, i.e. that it will not be bound to any bind point. - * Staging surfaces may be used by copy operations to move data in and out - * of other surfaces. No bind flags may be set on surfaces with this flag. - * - * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive - * updates indirectly, i.e. the surface will not be updated directly, but - * will receive copies from staging surfaces. - */ -#define SVGA3D_SURFACE_STAGING_UPLOAD (CONST64U(1) << 27) -#define SVGA3D_SURFACE_STAGING_DOWNLOAD (CONST64U(1) << 28) -#define SVGA3D_SURFACE_HINT_INDIRECT_UPDATE (CONST64U(1) << 29) +#define SVGA3D_SURFACE_BIND_VERTEX_BUFFER (CONST64U(1) << 20) +#define SVGA3D_SURFACE_BIND_INDEX_BUFFER (CONST64U(1) << 21) +#define SVGA3D_SURFACE_BIND_CONSTANT_BUFFER (CONST64U(1) << 22) +#define SVGA3D_SURFACE_BIND_SHADER_RESOURCE (CONST64U(1) << 23) +#define SVGA3D_SURFACE_BIND_RENDER_TARGET (CONST64U(1) << 24) +#define SVGA3D_SURFACE_BIND_DEPTH_STENCIL (CONST64U(1) << 25) +#define SVGA3D_SURFACE_BIND_STREAM_OUTPUT (CONST64U(1) << 26) -/* - * Setting this flag allow this surface to be used with the - * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for - * buffer surfaces, and no bind flags are allowed to be set on surfaces - * with this flag except SVGA3D_SURFACE_TRANSFER_TO_BUFFER. - */ -#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30) +#define SVGA3D_SURFACE_STAGING_UPLOAD (CONST64U(1) << 27) +#define SVGA3D_SURFACE_STAGING_DOWNLOAD (CONST64U(1) << 28) +#define SVGA3D_SURFACE_HINT_INDIRECT_UPDATE (CONST64U(1) << 29) -/* - * Reserved for video operations. - */ -#define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31) +#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30) -/* - * Specifies that a surface is multisample, and therefore requires the full - * mob-backing to store all the samples. - */ -#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32) +#define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31) +#define SVGA3D_SURFACE_VADECODE SVGA3D_SURFACE_RESERVED1 -/* - * Specified that the surface is allowed to be bound to a UAView. - */ -#define SVGA3D_SURFACE_BIND_UAVIEW (CONST64U(1) << 33) +#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32) -/* - * Setting this flag allow this surface to be used with the - * SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER command. It is only valid for - * buffer surfaces, and no bind flags are allowed to be set on surfaces - * with this flag except SVGA3D_SURFACE_TRANSFER_FROM_BUFFER. - */ -#define SVGA3D_SURFACE_TRANSFER_TO_BUFFER (CONST64U(1) << 34) +#define SVGA3D_SURFACE_BIND_UAVIEW (CONST64U(1) << 33) -#define SVGA3D_SURFACE_BIND_LOGICOPS (CONST64U(1) << 35) +#define SVGA3D_SURFACE_TRANSFER_TO_BUFFER (CONST64U(1) << 34) -/* - * Optional flags for use with SVGA3D_SURFACE_BIND_UAVIEW - */ -#define SVGA3D_SURFACE_BIND_RAW_VIEWS (CONST64U(1) << 36) -#define SVGA3D_SURFACE_BUFFER_STRUCTURED (CONST64U(1) << 37) +#define SVGA3D_SURFACE_BIND_LOGICOPS (CONST64U(1) << 35) -#define SVGA3D_SURFACE_DRAWINDIRECT_ARGS (CONST64U(1) << 38) -#define SVGA3D_SURFACE_RESOURCE_CLAMP (CONST64U(1) << 39) +#define SVGA3D_SURFACE_BIND_RAW_VIEWS (CONST64U(1) << 36) +#define SVGA3D_SURFACE_BUFFER_STRUCTURED (CONST64U(1) << 37) -#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 40) +#define SVGA3D_SURFACE_DRAWINDIRECT_ARGS (CONST64U(1) << 38) +#define SVGA3D_SURFACE_RESOURCE_CLAMP (CONST64U(1) << 39) + +#define SVGA3D_SURFACE_STAGING_COPY (CONST64U(1) << 40) + +#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 44) -/* - * Surface flags types: - * - * SVGA3dSurface1Flags: Lower 32-bits of flags. - * SVGA3dSurface2Flags: Upper 32-bits of flags. - * SVGA3dSurfaceAllFlags: Full 64-bits of flags. - */ typedef uint32 SVGA3dSurface1Flags; typedef uint32 SVGA3dSurface2Flags; typedef uint64 SVGA3dSurfaceAllFlags; -#define SVGA3D_SURFACE_FLAGS1_MASK ((uint64_t)MAX_UINT32) +#define SVGA3D_SURFACE_FLAGS1_MASK ((uint64)MAX_UINT32) #define SVGA3D_SURFACE_FLAGS2_MASK (MAX_UINT64 & ~SVGA3D_SURFACE_FLAGS1_MASK) -#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_MOB_PITCH | \ - SVGA3D_SURFACE_SCREENTARGET | \ - SVGA3D_SURFACE_ALIGN16 | \ - SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ - SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_STAGING_UPLOAD | \ - SVGA3D_SURFACE_STAGING_DOWNLOAD | \ - SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ - SVGA3D_SURFACE_RESERVED1 | \ - SVGA3D_SURFACE_MULTISAMPLE | \ - SVGA3D_SURFACE_BIND_UAVIEW | \ - SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ - SVGA3D_SURFACE_BIND_LOGICOPS | \ - SVGA3D_SURFACE_BIND_RAW_VIEWS | \ - SVGA3D_SURFACE_BUFFER_STRUCTURED | \ - SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \ - SVGA3D_SURFACE_RESOURCE_CLAMP \ - ) - -#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_RESERVED1 | \ - SVGA3D_SURFACE_MULTISAMPLE \ - ) - -#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_CUBEMAP | \ - SVGA3D_SURFACE_AUTOGENMIPMAPS | \ - SVGA3D_SURFACE_VOLUME | \ - SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ - SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ - SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ - SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ - SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ - SVGA3D_SURFACE_RESERVED1 | \ - SVGA3D_SURFACE_MULTISAMPLE | \ - SVGA3D_SURFACE_BIND_UAVIEW | \ - SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ - SVGA3D_SURFACE_BIND_RAW_VIEWS | \ - SVGA3D_SURFACE_BUFFER_STRUCTURED | \ - SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \ - SVGA3D_SURFACE_RESOURCE_CLAMP \ - ) - -#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_CUBEMAP | \ - SVGA3D_SURFACE_AUTOGENMIPMAPS | \ - SVGA3D_SURFACE_VOLUME | \ - SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_RESERVED1 | \ - SVGA3D_SURFACE_MULTISAMPLE \ - ) - -#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_CUBEMAP | \ - SVGA3D_SURFACE_AUTOGENMIPMAPS | \ - SVGA3D_SURFACE_VOLUME | \ - SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ - SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ - SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ - SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ - SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_INACTIVE | \ - SVGA3D_SURFACE_STAGING_UPLOAD | \ - SVGA3D_SURFACE_STAGING_DOWNLOAD | \ - SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ - SVGA3D_SURFACE_RESERVED1 | \ - SVGA3D_SURFACE_MULTISAMPLE | \ - SVGA3D_SURFACE_BIND_UAVIEW | \ - SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ - SVGA3D_SURFACE_BIND_RAW_VIEWS | \ - SVGA3D_SURFACE_BUFFER_STRUCTURED | \ - SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \ - SVGA3D_SURFACE_RESOURCE_CLAMP \ - ) - -#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_CUBEMAP | \ - SVGA3D_SURFACE_AUTOGENMIPMAPS | \ - SVGA3D_SURFACE_VOLUME | \ - SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_DEAD2 | \ - SVGA3D_SURFACE_ARRAY | \ - SVGA3D_SURFACE_MULTISAMPLE | \ - SVGA3D_SURFACE_MOB_PITCH | \ - SVGA3D_SURFACE_RESOURCE_CLAMP \ - ) - -#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_CUBEMAP | \ - SVGA3D_SURFACE_AUTOGENMIPMAPS | \ - SVGA3D_SURFACE_VOLUME | \ - SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_SCREENTARGET | \ - SVGA3D_SURFACE_MOB_PITCH | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ - SVGA3D_SURFACE_RESERVED1 | \ - SVGA3D_SURFACE_BIND_UAVIEW | \ - SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ - SVGA3D_SURFACE_BIND_LOGICOPS | \ - SVGA3D_SURFACE_BIND_RAW_VIEWS | \ - SVGA3D_SURFACE_BUFFER_STRUCTURED | \ - SVGA3D_SURFACE_DRAWINDIRECT_ARGS \ - ) - -#define SVGA3D_SURFACE_DX_ONLY_MASK \ - ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_STAGING_UPLOAD | \ - SVGA3D_SURFACE_STAGING_DOWNLOAD | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ - SVGA3D_SURFACE_TRANSFER_TO_BUFFER \ - ) - -#define SVGA3D_SURFACE_STAGING_MASK \ - ( SVGA3D_SURFACE_STAGING_UPLOAD | \ - SVGA3D_SURFACE_STAGING_DOWNLOAD \ - ) - -#define SVGA3D_SURFACE_BIND_MASK \ - ( SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ - SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ - SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ - SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \ - SVGA3D_SURFACE_BIND_RENDER_TARGET | \ - SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ - SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_BIND_UAVIEW | \ - SVGA3D_SURFACE_BIND_LOGICOPS | \ - SVGA3D_SURFACE_BIND_RAW_VIEWS \ - ) - -#define SVGA3D_SURFACE_VADECODE_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_CUBEMAP | \ - SVGA3D_SURFACE_HINT_STATIC | \ - SVGA3D_SURFACE_HINT_DYNAMIC | \ - SVGA3D_SURFACE_HINT_INDEXBUFFER | \ - SVGA3D_SURFACE_HINT_VERTEXBUFFER | \ - SVGA3D_SURFACE_HINT_TEXTURE | \ - SVGA3D_SURFACE_HINT_RENDERTARGET | \ - SVGA3D_SURFACE_HINT_DEPTHSTENCIL | \ - SVGA3D_SURFACE_HINT_WRITEONLY | \ - SVGA3D_SURFACE_DEAD2 | \ - SVGA3D_SURFACE_AUTOGENMIPMAPS | \ - SVGA3D_SURFACE_HINT_RT_LOCKABLE | \ - SVGA3D_SURFACE_VOLUME | \ - SVGA3D_SURFACE_SCREENTARGET | \ - SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ - SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ - SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ - SVGA3D_SURFACE_BIND_RENDER_TARGET | \ - SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \ - SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ - SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_INACTIVE | \ - SVGA3D_SURFACE_STAGING_UPLOAD | \ - SVGA3D_SURFACE_STAGING_DOWNLOAD | \ - SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ - SVGA3D_SURFACE_MULTISAMPLE | \ - SVGA3D_SURFACE_BIND_UAVIEW | \ - SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ - SVGA3D_SURFACE_BIND_LOGICOPS | \ - SVGA3D_SURFACE_BIND_RAW_VIEWS | \ - SVGA3D_SURFACE_BUFFER_STRUCTURED | \ - SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \ - SVGA3D_SURFACE_RESOURCE_CLAMP \ - ) - -#define SVGA3D_SURFACE_VAPROCESSFRAME_OUTPUT_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_HINT_INDEXBUFFER | \ - SVGA3D_SURFACE_HINT_VERTEXBUFFER | \ - SVGA3D_SURFACE_HINT_DEPTHSTENCIL | \ - SVGA3D_SURFACE_DEAD2 | \ - SVGA3D_SURFACE_VOLUME | \ - SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ - SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ - SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ - SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ - SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_INACTIVE | \ - SVGA3D_SURFACE_STAGING_UPLOAD | \ - SVGA3D_SURFACE_STAGING_DOWNLOAD | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ - SVGA3D_SURFACE_VADECODE | \ - SVGA3D_SURFACE_MULTISAMPLE | \ - SVGA3D_SURFACE_BIND_UAVIEW | \ - SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ - SVGA3D_SURFACE_BIND_LOGICOPS | \ - SVGA3D_SURFACE_BIND_RAW_VIEWS | \ - SVGA3D_SURFACE_BUFFER_STRUCTURED | \ - SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \ - SVGA3D_SURFACE_RESOURCE_CLAMP \ - ) - -#define SVGA3D_SURFACE_VAPROCESSFRAME_INPUT_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_CUBEMAP | \ - SVGA3D_SURFACE_HINT_INDEXBUFFER | \ - SVGA3D_SURFACE_HINT_VERTEXBUFFER | \ - SVGA3D_SURFACE_HINT_DEPTHSTENCIL | \ - SVGA3D_SURFACE_DEAD2 | \ - SVGA3D_SURFACE_VOLUME | \ - SVGA3D_SURFACE_SCREENTARGET | \ - SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ - SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ - SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ - SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ - SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_STAGING_UPLOAD | \ - SVGA3D_SURFACE_STAGING_DOWNLOAD | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ - SVGA3D_SURFACE_MULTISAMPLE | \ - SVGA3D_SURFACE_BIND_UAVIEW | \ - SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ - SVGA3D_SURFACE_BIND_LOGICOPS | \ - SVGA3D_SURFACE_BIND_RAW_VIEWS | \ - SVGA3D_SURFACE_BUFFER_STRUCTURED | \ - SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \ - SVGA3D_SURFACE_RESOURCE_CLAMP \ - ) - -#define SVGA3D_SURFACE_LOGICOPS_DISALLOWED_MASK \ - ( SVGA3D_SURFACE_CUBEMAP | \ - SVGA3D_SURFACE_DEAD2 | \ - SVGA3D_SURFACE_AUTOGENMIPMAPS | \ - SVGA3D_SURFACE_VOLUME | \ - SVGA3D_SURFACE_1D | \ - SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ - SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ - SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ - SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ - SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ - SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ - SVGA3D_SURFACE_VADECODE | \ - SVGA3D_SURFACE_MULTISAMPLE | \ - SVGA3D_SURFACE_BIND_UAVIEW | \ - SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ - SVGA3D_SURFACE_BIND_RAW_VIEWS | \ - SVGA3D_SURFACE_BUFFER_STRUCTURED | \ - SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \ - SVGA3D_SURFACE_RESOURCE_CLAMP \ - ) +#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \ + (SVGA3D_SURFACE_MOB_PITCH | SVGA3D_SURFACE_SCREENTARGET | \ + SVGA3D_SURFACE_ALIGN16 | SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ + SVGA3D_SURFACE_BIND_STREAM_OUTPUT | SVGA3D_SURFACE_STAGING_UPLOAD | \ + SVGA3D_SURFACE_STAGING_DOWNLOAD | \ + SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | SVGA3D_SURFACE_MULTISAMPLE | \ + SVGA3D_SURFACE_BIND_UAVIEW | SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ + SVGA3D_SURFACE_BIND_LOGICOPS | SVGA3D_SURFACE_BIND_RAW_VIEWS | \ + SVGA3D_SURFACE_BUFFER_STRUCTURED | SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \ + SVGA3D_SURFACE_RESOURCE_CLAMP | SVGA3D_SURFACE_STAGING_COPY | \ + SVGA3D_SURFACE_RESTRICT_UPDATE | SVGA3D_SURFACE_BIND_TENSOR | \ + SVGA3D_SURFACE_LO_STAGING) + +#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK \ + (SVGA3D_SURFACE_1D | SVGA3D_SURFACE_MULTISAMPLE | \ + SVGA3D_SURFACE_STAGING_COPY) + +#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \ + (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_VOLUME | SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ + SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ + SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ + SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ + SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | SVGA3D_SURFACE_MULTISAMPLE | \ + SVGA3D_SURFACE_BIND_UAVIEW | SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ + SVGA3D_SURFACE_BIND_RAW_VIEWS | SVGA3D_SURFACE_BUFFER_STRUCTURED | \ + SVGA3D_SURFACE_DRAWINDIRECT_ARGS | SVGA3D_SURFACE_RESOURCE_CLAMP | \ + SVGA3D_SURFACE_BIND_TENSOR) + +#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK \ + (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_VOLUME | SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_MULTISAMPLE) + +#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \ + (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_VOLUME | SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ + SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ + SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ + SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ + SVGA3D_SURFACE_BIND_STREAM_OUTPUT | SVGA3D_SURFACE_INACTIVE | \ + SVGA3D_SURFACE_STAGING_UPLOAD | SVGA3D_SURFACE_STAGING_DOWNLOAD | \ + SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | SVGA3D_SURFACE_MULTISAMPLE | \ + SVGA3D_SURFACE_TRANSFER_TO_BUFFER | SVGA3D_SURFACE_BIND_RAW_VIEWS | \ + SVGA3D_SURFACE_BUFFER_STRUCTURED | SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \ + SVGA3D_SURFACE_RESOURCE_CLAMP | SVGA3D_SURFACE_STAGING_COPY | \ + SVGA3D_SURFACE_BIND_TENSOR | SVGA3D_SURFACE_LO_STAGING) + +#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK \ + (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_VOLUME | SVGA3D_SURFACE_1D | SVGA3D_SURFACE_DEAD2 | \ + SVGA3D_SURFACE_ARRAY | SVGA3D_SURFACE_MULTISAMPLE | \ + SVGA3D_SURFACE_MOB_PITCH | SVGA3D_SURFACE_RESOURCE_CLAMP) + +#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK \ + (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_VOLUME | SVGA3D_SURFACE_1D | \ + SVGA3D_SURFACE_SCREENTARGET | SVGA3D_SURFACE_MOB_PITCH | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | SVGA3D_SURFACE_BIND_UAVIEW | \ + SVGA3D_SURFACE_TRANSFER_TO_BUFFER | SVGA3D_SURFACE_BIND_LOGICOPS | \ + SVGA3D_SURFACE_BIND_RAW_VIEWS | SVGA3D_SURFACE_BUFFER_STRUCTURED | \ + SVGA3D_SURFACE_DRAWINDIRECT_ARGS | SVGA3D_SURFACE_STAGING_COPY) + +#define SVGA3D_SURFACE_DX_ONLY_MASK \ + (SVGA3D_SURFACE_BIND_STREAM_OUTPUT | SVGA3D_SURFACE_STAGING_UPLOAD | \ + SVGA3D_SURFACE_STAGING_DOWNLOAD | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \ + SVGA3D_SURFACE_TRANSFER_TO_BUFFER) + +#define SVGA3D_SURFACE_ANY_STAGING_MASK \ + (SVGA3D_SURFACE_STAGING_UPLOAD | SVGA3D_SURFACE_STAGING_DOWNLOAD | \ + SVGA3D_SURFACE_STAGING_COPY | SVGA3D_SURFACE_LO_STAGING) + +#define SVGA3D_SURFACE_ANY_NONHINT_STAGING_MASK \ + (SVGA3D_SURFACE_ANY_STAGING_MASK & ~(SVGA3D_SURFACE_LO_STAGING)) + +#define SVGA3D_SURFACE_BIND_MASK \ + (SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ + SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ + SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ + SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \ + SVGA3D_SURFACE_BIND_RENDER_TARGET | \ + SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ + SVGA3D_SURFACE_BIND_STREAM_OUTPUT | SVGA3D_SURFACE_BIND_UAVIEW | \ + SVGA3D_SURFACE_BIND_LOGICOPS | SVGA3D_SURFACE_BIND_RAW_VIEWS | \ + SVGA3D_SURFACE_BIND_TENSOR) + +#define SVGA3D_SURFACE_STAGING_DISALLOWED_MASK \ + (SVGA3D_SURFACE_BIND_MASK | SVGA3D_SURFACE_AUTOGENMIPMAPS | \ + SVGA3D_SURFACE_SCREENTARGET | SVGA3D_SURFACE_HINT_RENDERTARGET | \ + SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | SVGA3D_SURFACE_MULTISAMPLE | \ + SVGA3D_SURFACE_DRAWINDIRECT_ARGS | SVGA3D_SURFACE_RESOURCE_CLAMP | \ + SVGA3D_SURFACE_BIND_TENSOR) + +#define SVGA3D_SURFACE_STAGING_COPY_DISALLOWED_MASK \ + (SVGA3D_SURFACE_STAGING_DISALLOWED_MASK | \ + SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER) + +#define SVGA3D_SURFACE_LOGICOPS_DISALLOWED_MASK \ + (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_DEAD2 | \ + SVGA3D_SURFACE_AUTOGENMIPMAPS | SVGA3D_SURFACE_VOLUME | \ + SVGA3D_SURFACE_1D | SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \ + SVGA3D_SURFACE_BIND_INDEX_BUFFER | \ + SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \ + SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \ + SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \ + SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | SVGA3D_SURFACE_MULTISAMPLE | \ + SVGA3D_SURFACE_BIND_UAVIEW | SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \ + SVGA3D_SURFACE_BIND_RAW_VIEWS | SVGA3D_SURFACE_BUFFER_STRUCTURED | \ + SVGA3D_SURFACE_DRAWINDIRECT_ARGS | SVGA3D_SURFACE_RESOURCE_CLAMP | \ + SVGA3D_SURFACE_STAGING_COPY) + +#define SVGA3D_SURFACE_SM5_MASK \ + (SVGA3D_SURFACE_DRAWINDIRECT_ARGS | SVGA3D_SURFACE_BUFFER_STRUCTURED | \ + SVGA3D_SURFACE_BIND_RAW_VIEWS | SVGA3D_SURFACE_BIND_UAVIEW | \ + SVGA3D_SURFACE_RESOURCE_CLAMP) #define SVGA3D_BUFFER_STRUCTURED_STRIDE_MAX 2048 - -/* - * These are really the D3DFORMAT_OP defines from the wdk. We need - * them so that we can query the host for what the supported surface - * operations are (when we're using the D3D backend, in particular), - * and so we can send those operations to the guest. - */ typedef enum { - SVGA3DFORMAT_OP_TEXTURE = 0x00000001, - SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002, - SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004, - SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008, - SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010, - SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040, - SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080, + SVGA3DFORMAT_OP_TEXTURE = 0x00000001, + SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002, + SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004, + SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008, + SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010, + SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040, + SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080, -/* - * This format can be used as a render target if the current display mode - * is the same depth if the alpha channel is ignored. e.g. if the device - * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the - * format op list entry for A8R8G8B8 should have this cap. - */ - SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100, + SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100, -/* - * This format contains DirectDraw support (including Flip). This flag - * should not to be set on alpha formats. - */ - SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400, + SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400, -/* - * The rasterizer can support some level of Direct3D support in this format - * and implies that the driver can create a Context in this mode (for some - * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE - * flag must also be set. - */ - SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800, + SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800, -/* - * This is set for a private format when the driver has put the bpp in - * the structure. - */ - SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000, + SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000, -/* - * Indicates that this format can be converted to any RGB format for which - * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified. - */ - SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000, + SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000, -/* - * Indicates that this format can be used to create offscreen plain surfaces. - */ - SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000, + SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000, -/* - * Indicated that this format can be read as an SRGB texture (meaning that the - * sampler will linearize the looked up data). - */ - SVGA3DFORMAT_OP_SRGBREAD = 0x00008000, + SVGA3DFORMAT_OP_SRGBREAD = 0x00008000, -/* - * Indicates that this format can be used in the bumpmap instructions. - */ - SVGA3DFORMAT_OP_BUMPMAP = 0x00010000, + SVGA3DFORMAT_OP_BUMPMAP = 0x00010000, -/* - * Indicates that this format can be sampled by the displacement map sampler. - */ - SVGA3DFORMAT_OP_DMAP = 0x00020000, + SVGA3DFORMAT_OP_DMAP = 0x00020000, -/* - * Indicates that this format cannot be used with texture filtering. - */ - SVGA3DFORMAT_OP_NOFILTER = 0x00040000, + SVGA3DFORMAT_OP_NOFILTER = 0x00040000, -/* - * Indicates that format conversions are supported to this RGB format if - * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format. - */ - SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000, + SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000, -/* - * Indicated that this format can be written as an SRGB target - * (meaning that the pixel pipe will DE-linearize data on output to format) - */ - SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000, + SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000, -/* - * Indicates that this format cannot be used with alpha blending. - */ - SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000, + SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000, -/* - * Indicates that the device can auto-generated sublevels for resources - * of this format. - */ - SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000, + SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000, -/* - * Indicates that this format can be used by vertex texture sampler. - */ - SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000, + SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000, -/* - * Indicates that this format supports neither texture coordinate - * wrap modes, nor mipmapping. - */ - SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000 + SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000 } SVGA3dFormatOp; -#define SVGA3D_FORMAT_POSITIVE \ - (SVGA3DFORMAT_OP_TEXTURE | \ - SVGA3DFORMAT_OP_VOLUMETEXTURE | \ - SVGA3DFORMAT_OP_CUBETEXTURE | \ - SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET | \ - SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET | \ - SVGA3DFORMAT_OP_ZSTENCIL | \ - SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH | \ - SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \ - SVGA3DFORMAT_OP_DISPLAYMODE | \ - SVGA3DFORMAT_OP_3DACCELERATION | \ - SVGA3DFORMAT_OP_PIXELSIZE | \ - SVGA3DFORMAT_OP_CONVERT_TO_ARGB | \ - SVGA3DFORMAT_OP_OFFSCREENPLAIN | \ - SVGA3DFORMAT_OP_SRGBREAD | \ - SVGA3DFORMAT_OP_BUMPMAP | \ - SVGA3DFORMAT_OP_DMAP | \ - SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB | \ - SVGA3DFORMAT_OP_SRGBWRITE | \ - SVGA3DFORMAT_OP_AUTOGENMIPMAP | \ - SVGA3DFORMAT_OP_VERTEXTEXTURE) - -#define SVGA3D_FORMAT_NEGATIVE \ - (SVGA3DFORMAT_OP_NOFILTER | \ - SVGA3DFORMAT_OP_NOALPHABLEND | \ - SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP) +#define SVGA3D_FORMAT_POSITIVE \ + (SVGA3DFORMAT_OP_TEXTURE | SVGA3DFORMAT_OP_VOLUMETEXTURE | \ + SVGA3DFORMAT_OP_CUBETEXTURE | \ + SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET | \ + SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET | SVGA3DFORMAT_OP_ZSTENCIL | \ + SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH | \ + SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \ + SVGA3DFORMAT_OP_DISPLAYMODE | SVGA3DFORMAT_OP_3DACCELERATION | \ + SVGA3DFORMAT_OP_PIXELSIZE | SVGA3DFORMAT_OP_CONVERT_TO_ARGB | \ + SVGA3DFORMAT_OP_OFFSCREENPLAIN | SVGA3DFORMAT_OP_SRGBREAD | \ + SVGA3DFORMAT_OP_BUMPMAP | SVGA3DFORMAT_OP_DMAP | \ + SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB | SVGA3DFORMAT_OP_SRGBWRITE | \ + SVGA3DFORMAT_OP_AUTOGENMIPMAP | SVGA3DFORMAT_OP_VERTEXTEXTURE) + +#define SVGA3D_FORMAT_NEGATIVE \ + (SVGA3DFORMAT_OP_NOFILTER | SVGA3DFORMAT_OP_NOALPHABLEND | \ + SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP) -/* - * This structure is a conversion of SVGA3DFORMAT_OP_* - * Entries must be located at the same position. - */ typedef union { - uint32 value; - struct { - uint32 texture : 1; - uint32 volumeTexture : 1; - uint32 cubeTexture : 1; - uint32 offscreenRenderTarget : 1; - uint32 sameFormatRenderTarget : 1; - uint32 unknown1 : 1; - uint32 zStencil : 1; - uint32 zStencilArbitraryDepth : 1; - uint32 sameFormatUpToAlpha : 1; - uint32 unknown2 : 1; - uint32 displayMode : 1; - uint32 acceleration3d : 1; - uint32 pixelSize : 1; - uint32 convertToARGB : 1; - uint32 offscreenPlain : 1; - uint32 sRGBRead : 1; - uint32 bumpMap : 1; - uint32 dmap : 1; - uint32 noFilter : 1; - uint32 memberOfGroupARGB : 1; - uint32 sRGBWrite : 1; - uint32 noAlphaBlend : 1; - uint32 autoGenMipMap : 1; - uint32 vertexTexture : 1; - uint32 noTexCoordWrapNorMip : 1; - }; + uint32 value; + struct { + uint32 texture : 1; + uint32 volumeTexture : 1; + uint32 cubeTexture : 1; + uint32 offscreenRenderTarget : 1; + uint32 sameFormatRenderTarget : 1; + uint32 unknown1 : 1; + uint32 zStencil : 1; + uint32 zStencilArbitraryDepth : 1; + uint32 sameFormatUpToAlpha : 1; + uint32 unknown2 : 1; + uint32 displayMode : 1; + uint32 acceleration3d : 1; + uint32 pixelSize : 1; + uint32 convertToARGB : 1; + uint32 offscreenPlain : 1; + uint32 sRGBRead : 1; + uint32 bumpMap : 1; + uint32 dmap : 1; + uint32 noFilter : 1; + uint32 memberOfGroupARGB : 1; + uint32 sRGBWrite : 1; + uint32 noAlphaBlend : 1; + uint32 autoGenMipMap : 1; + uint32 vertexTexture : 1; + uint32 noTexCoordWrapNorMip : 1; + }; } SVGA3dSurfaceFormatCaps; -/* - * SVGA_3D_CMD_SETRENDERSTATE Types. All value types - * must fit in a uint32. - */ - typedef enum { - SVGA3D_RS_INVALID = 0, - SVGA3D_RS_MIN = 1, - SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */ - SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */ - SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */ - SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */ - SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */ - SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */ - SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */ - SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */ - SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */ - SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */ - SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */ - SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */ - SVGA3D_RS_STENCILREF = 13, /* uint32 */ - SVGA3D_RS_STENCILMASK = 14, /* uint32 */ - SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */ - SVGA3D_RS_FOGSTART = 16, /* float */ - SVGA3D_RS_FOGEND = 17, /* float */ - SVGA3D_RS_FOGDENSITY = 18, /* float */ - SVGA3D_RS_POINTSIZE = 19, /* float */ - SVGA3D_RS_POINTSIZEMIN = 20, /* float */ - SVGA3D_RS_POINTSIZEMAX = 21, /* float */ - SVGA3D_RS_POINTSCALE_A = 22, /* float */ - SVGA3D_RS_POINTSCALE_B = 23, /* float */ - SVGA3D_RS_POINTSCALE_C = 24, /* float */ - SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */ - SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */ - SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */ - SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */ - SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */ - SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */ - SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */ - SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */ - SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */ - SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */ - SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */ - SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */ - SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */ - SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */ - SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */ - SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */ - SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */ - SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */ - SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */ - SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */ - SVGA3D_RS_ZBIAS = 45, /* float */ - SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */ - SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */ - SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */ - SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */ - SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */ - SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */ - SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */ - SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */ - SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */ - SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */ - SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */ - SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */ - SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */ - SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */ - SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */ - SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */ - SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */ - SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */ - SVGA3D_RS_DEPTHBIAS = 64, /* float */ - - - /* - * Output Gamma Level - * - * Output gamma effects the gamma curve of colors that are output from the - * rendering pipeline. A value of 1.0 specifies a linear color space. If the - * value is <= 0.0, gamma correction is ignored and linear color space is - * used. - */ - - SVGA3D_RS_OUTPUTGAMMA = 65, /* float */ - SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */ - SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */ - SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */ - SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */ - SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */ - SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */ - SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */ - SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */ - SVGA3D_RS_TWEENFACTOR = 88, /* float */ - SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */ - SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */ - SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */ - SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */ - SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */ - SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */ - SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */ - SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */ - SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */ - SVGA3D_RS_LINEWIDTH = 98, /* float */ - SVGA3D_RS_MAX + SVGA3D_RS_INVALID = 0, + SVGA3D_RS_MIN = 1, + SVGA3D_RS_ZENABLE = 1, + SVGA3D_RS_ZWRITEENABLE = 2, + SVGA3D_RS_ALPHATESTENABLE = 3, + SVGA3D_RS_DITHERENABLE = 4, + SVGA3D_RS_BLENDENABLE = 5, + SVGA3D_RS_FOGENABLE = 6, + SVGA3D_RS_SPECULARENABLE = 7, + SVGA3D_RS_STENCILENABLE = 8, + SVGA3D_RS_LIGHTINGENABLE = 9, + SVGA3D_RS_NORMALIZENORMALS = 10, + SVGA3D_RS_POINTSPRITEENABLE = 11, + SVGA3D_RS_POINTSCALEENABLE = 12, + SVGA3D_RS_STENCILREF = 13, + SVGA3D_RS_STENCILMASK = 14, + SVGA3D_RS_STENCILWRITEMASK = 15, + SVGA3D_RS_FOGSTART = 16, + SVGA3D_RS_FOGEND = 17, + SVGA3D_RS_FOGDENSITY = 18, + SVGA3D_RS_POINTSIZE = 19, + SVGA3D_RS_POINTSIZEMIN = 20, + SVGA3D_RS_POINTSIZEMAX = 21, + SVGA3D_RS_POINTSCALE_A = 22, + SVGA3D_RS_POINTSCALE_B = 23, + SVGA3D_RS_POINTSCALE_C = 24, + SVGA3D_RS_FOGCOLOR = 25, + SVGA3D_RS_AMBIENT = 26, + SVGA3D_RS_CLIPPLANEENABLE = 27, + SVGA3D_RS_FOGMODE = 28, + SVGA3D_RS_FILLMODE = 29, + SVGA3D_RS_SHADEMODE = 30, + SVGA3D_RS_LINEPATTERN = 31, + SVGA3D_RS_SRCBLEND = 32, + SVGA3D_RS_DSTBLEND = 33, + SVGA3D_RS_BLENDEQUATION = 34, + SVGA3D_RS_CULLMODE = 35, + SVGA3D_RS_ZFUNC = 36, + SVGA3D_RS_ALPHAFUNC = 37, + SVGA3D_RS_STENCILFUNC = 38, + SVGA3D_RS_STENCILFAIL = 39, + SVGA3D_RS_STENCILZFAIL = 40, + SVGA3D_RS_STENCILPASS = 41, + SVGA3D_RS_ALPHAREF = 42, + SVGA3D_RS_FRONTWINDING = 43, + SVGA3D_RS_COORDINATETYPE = 44, + SVGA3D_RS_ZBIAS = 45, + SVGA3D_RS_RANGEFOGENABLE = 46, + SVGA3D_RS_COLORWRITEENABLE = 47, + SVGA3D_RS_VERTEXMATERIALENABLE = 48, + SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, + SVGA3D_RS_SPECULARMATERIALSOURCE = 50, + SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, + SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, + SVGA3D_RS_TEXTUREFACTOR = 53, + SVGA3D_RS_LOCALVIEWER = 54, + SVGA3D_RS_SCISSORTESTENABLE = 55, + SVGA3D_RS_BLENDCOLOR = 56, + SVGA3D_RS_STENCILENABLE2SIDED = 57, + SVGA3D_RS_CCWSTENCILFUNC = 58, + SVGA3D_RS_CCWSTENCILFAIL = 59, + SVGA3D_RS_CCWSTENCILZFAIL = 60, + SVGA3D_RS_CCWSTENCILPASS = 61, + SVGA3D_RS_VERTEXBLEND = 62, + SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, + SVGA3D_RS_DEPTHBIAS = 64, + + SVGA3D_RS_OUTPUTGAMMA = 65, + SVGA3D_RS_ZVISIBLE = 66, + SVGA3D_RS_LASTPIXEL = 67, + SVGA3D_RS_CLIPPING = 68, + SVGA3D_RS_WRAP0 = 69, + SVGA3D_RS_WRAP1 = 70, + SVGA3D_RS_WRAP2 = 71, + SVGA3D_RS_WRAP3 = 72, + SVGA3D_RS_WRAP4 = 73, + SVGA3D_RS_WRAP5 = 74, + SVGA3D_RS_WRAP6 = 75, + SVGA3D_RS_WRAP7 = 76, + SVGA3D_RS_WRAP8 = 77, + SVGA3D_RS_WRAP9 = 78, + SVGA3D_RS_WRAP10 = 79, + SVGA3D_RS_WRAP11 = 80, + SVGA3D_RS_WRAP12 = 81, + SVGA3D_RS_WRAP13 = 82, + SVGA3D_RS_WRAP14 = 83, + SVGA3D_RS_WRAP15 = 84, + SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, + SVGA3D_RS_MULTISAMPLEMASK = 86, + SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, + SVGA3D_RS_TWEENFACTOR = 88, + SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, + SVGA3D_RS_COLORWRITEENABLE1 = 90, + SVGA3D_RS_COLORWRITEENABLE2 = 91, + SVGA3D_RS_COLORWRITEENABLE3 = 92, + SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, + SVGA3D_RS_SRCBLENDALPHA = 94, + SVGA3D_RS_DSTBLENDALPHA = 95, + SVGA3D_RS_BLENDEQUATIONALPHA = 96, + SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, + SVGA3D_RS_LINEWIDTH = 98, + SVGA3D_RS_MAX } SVGA3dRenderStateName; typedef enum { - SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0, - SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1, - SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2, - SVGA3D_TRANSPARENCYANTIALIAS_MAX + SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0, + SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1, + SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2, + SVGA3D_TRANSPARENCYANTIALIAS_MAX } SVGA3dTransparencyAntialiasType; typedef enum { - SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */ - SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */ - SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */ - SVGA3D_VERTEXMATERIAL_MAX = 3, + SVGA3D_VERTEXMATERIAL_NONE = 0, + SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, + SVGA3D_VERTEXMATERIAL_SPECULAR = 2, + SVGA3D_VERTEXMATERIAL_MAX = 3, } SVGA3dVertexMaterial; typedef enum { - SVGA3D_FILLMODE_INVALID = 0, - SVGA3D_FILLMODE_MIN = 1, - SVGA3D_FILLMODE_POINT = 1, - SVGA3D_FILLMODE_LINE = 2, - SVGA3D_FILLMODE_FILL = 3, - SVGA3D_FILLMODE_MAX + SVGA3D_FILLMODE_INVALID = 0, + SVGA3D_FILLMODE_MIN = 1, + SVGA3D_FILLMODE_POINT = 1, + SVGA3D_FILLMODE_LINE = 2, + SVGA3D_FILLMODE_FILL = 3, + SVGA3D_FILLMODE_MAX } SVGA3dFillModeType; - -typedef -#include "vmware_pack_begin.h" -union { - struct { - uint16 mode; /* SVGA3dFillModeType */ - uint16 face; /* SVGA3dFace */ - }; - uint32 uintValue; -} -#include "vmware_pack_end.h" -SVGA3dFillMode; +#pragma pack(push, 1) +typedef union { + struct { + uint16 mode; + uint16 face; + }; + uint32 uintValue; +} SVGA3dFillMode; +#pragma pack(pop) typedef enum { - SVGA3D_SHADEMODE_INVALID = 0, - SVGA3D_SHADEMODE_FLAT = 1, - SVGA3D_SHADEMODE_SMOOTH = 2, - SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */ - SVGA3D_SHADEMODE_MAX + SVGA3D_SHADEMODE_INVALID = 0, + SVGA3D_SHADEMODE_FLAT = 1, + SVGA3D_SHADEMODE_SMOOTH = 2, + SVGA3D_SHADEMODE_PHONG = 3, + SVGA3D_SHADEMODE_MAX } SVGA3dShadeMode; -typedef -#include "vmware_pack_begin.h" -union { - struct { - uint16 repeat; - uint16 pattern; - }; - uint32 uintValue; -} -#include "vmware_pack_end.h" -SVGA3dLinePattern; +#pragma pack(push, 1) +typedef union { + struct { + uint16 repeat; + uint16 pattern; + }; + uint32 uintValue; +} SVGA3dLinePattern; +#pragma pack(pop) typedef enum { - SVGA3D_BLENDOP_INVALID = 0, - SVGA3D_BLENDOP_MIN = 1, - SVGA3D_BLENDOP_ZERO = 1, - SVGA3D_BLENDOP_ONE = 2, - SVGA3D_BLENDOP_SRCCOLOR = 3, - SVGA3D_BLENDOP_INVSRCCOLOR = 4, - SVGA3D_BLENDOP_SRCALPHA = 5, - SVGA3D_BLENDOP_INVSRCALPHA = 6, - SVGA3D_BLENDOP_DESTALPHA = 7, - SVGA3D_BLENDOP_INVDESTALPHA = 8, - SVGA3D_BLENDOP_DESTCOLOR = 9, - SVGA3D_BLENDOP_INVDESTCOLOR = 10, - SVGA3D_BLENDOP_SRCALPHASAT = 11, - SVGA3D_BLENDOP_BLENDFACTOR = 12, - SVGA3D_BLENDOP_INVBLENDFACTOR = 13, - SVGA3D_BLENDOP_SRC1COLOR = 14, - SVGA3D_BLENDOP_INVSRC1COLOR = 15, - SVGA3D_BLENDOP_SRC1ALPHA = 16, - SVGA3D_BLENDOP_INVSRC1ALPHA = 17, - SVGA3D_BLENDOP_BLENDFACTORALPHA = 18, - SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19, - SVGA3D_BLENDOP_MAX + SVGA3D_BLENDOP_INVALID = 0, + SVGA3D_BLENDOP_MIN = 1, + SVGA3D_BLENDOP_ZERO = 1, + SVGA3D_BLENDOP_ONE = 2, + SVGA3D_BLENDOP_SRCCOLOR = 3, + SVGA3D_BLENDOP_INVSRCCOLOR = 4, + SVGA3D_BLENDOP_SRCALPHA = 5, + SVGA3D_BLENDOP_INVSRCALPHA = 6, + SVGA3D_BLENDOP_DESTALPHA = 7, + SVGA3D_BLENDOP_INVDESTALPHA = 8, + SVGA3D_BLENDOP_DESTCOLOR = 9, + SVGA3D_BLENDOP_INVDESTCOLOR = 10, + SVGA3D_BLENDOP_SRCALPHASAT = 11, + SVGA3D_BLENDOP_BLENDFACTOR = 12, + SVGA3D_BLENDOP_INVBLENDFACTOR = 13, + SVGA3D_BLENDOP_SRC1COLOR = 14, + SVGA3D_BLENDOP_INVSRC1COLOR = 15, + SVGA3D_BLENDOP_SRC1ALPHA = 16, + SVGA3D_BLENDOP_INVSRC1ALPHA = 17, + SVGA3D_BLENDOP_BLENDFACTORALPHA = 18, + SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19, + SVGA3D_BLENDOP_MAX } SVGA3dBlendOp; typedef enum { - SVGA3D_BLENDEQ_INVALID = 0, - SVGA3D_BLENDEQ_MIN = 1, - SVGA3D_BLENDEQ_ADD = 1, - SVGA3D_BLENDEQ_SUBTRACT = 2, - SVGA3D_BLENDEQ_REVSUBTRACT = 3, - SVGA3D_BLENDEQ_MINIMUM = 4, - SVGA3D_BLENDEQ_MAXIMUM = 5, - SVGA3D_BLENDEQ_MAX + SVGA3D_BLENDEQ_INVALID = 0, + SVGA3D_BLENDEQ_MIN = 1, + SVGA3D_BLENDEQ_ADD = 1, + SVGA3D_BLENDEQ_SUBTRACT = 2, + SVGA3D_BLENDEQ_REVSUBTRACT = 3, + SVGA3D_BLENDEQ_MINIMUM = 4, + SVGA3D_BLENDEQ_MAXIMUM = 5, + SVGA3D_BLENDEQ_MAX } SVGA3dBlendEquation; typedef enum { - SVGA3D_DX11_LOGICOP_MIN = 0, - SVGA3D_DX11_LOGICOP_CLEAR = 0, - SVGA3D_DX11_LOGICOP_SET = 1, - SVGA3D_DX11_LOGICOP_COPY = 2, - SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3, - SVGA3D_DX11_LOGICOP_NOOP = 4, - SVGA3D_DX11_LOGICOP_INVERT = 5, - SVGA3D_DX11_LOGICOP_AND = 6, - SVGA3D_DX11_LOGICOP_NAND = 7, - SVGA3D_DX11_LOGICOP_OR = 8, - SVGA3D_DX11_LOGICOP_NOR = 9, - SVGA3D_DX11_LOGICOP_XOR = 10, - SVGA3D_DX11_LOGICOP_EQUIV = 11, - SVGA3D_DX11_LOGICOP_AND_REVERSE = 12, - SVGA3D_DX11_LOGICOP_AND_INVERTED = 13, - SVGA3D_DX11_LOGICOP_OR_REVERSE = 14, - SVGA3D_DX11_LOGICOP_OR_INVERTED = 15, - SVGA3D_DX11_LOGICOP_MAX + SVGA3D_DX11_LOGICOP_MIN = 0, + SVGA3D_DX11_LOGICOP_CLEAR = 0, + SVGA3D_DX11_LOGICOP_SET = 1, + SVGA3D_DX11_LOGICOP_COPY = 2, + SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3, + SVGA3D_DX11_LOGICOP_NOOP = 4, + SVGA3D_DX11_LOGICOP_INVERT = 5, + SVGA3D_DX11_LOGICOP_AND = 6, + SVGA3D_DX11_LOGICOP_NAND = 7, + SVGA3D_DX11_LOGICOP_OR = 8, + SVGA3D_DX11_LOGICOP_NOR = 9, + SVGA3D_DX11_LOGICOP_XOR = 10, + SVGA3D_DX11_LOGICOP_EQUIV = 11, + SVGA3D_DX11_LOGICOP_AND_REVERSE = 12, + SVGA3D_DX11_LOGICOP_AND_INVERTED = 13, + SVGA3D_DX11_LOGICOP_OR_REVERSE = 14, + SVGA3D_DX11_LOGICOP_OR_INVERTED = 15, + SVGA3D_DX11_LOGICOP_MAX } SVGA3dDX11LogicOp; typedef enum { - SVGA3D_FRONTWINDING_INVALID = 0, - SVGA3D_FRONTWINDING_CW = 1, - SVGA3D_FRONTWINDING_CCW = 2, - SVGA3D_FRONTWINDING_MAX + SVGA3D_FRONTWINDING_INVALID = 0, + SVGA3D_FRONTWINDING_CW = 1, + SVGA3D_FRONTWINDING_MIN = 1, + SVGA3D_FRONTWINDING_CCW = 2, + SVGA3D_FRONTWINDING_MAX } SVGA3dFrontWinding; typedef enum { - SVGA3D_FACE_INVALID = 0, - SVGA3D_FACE_NONE = 1, - SVGA3D_FACE_MIN = 1, - SVGA3D_FACE_FRONT = 2, - SVGA3D_FACE_BACK = 3, - SVGA3D_FACE_FRONT_BACK = 4, - SVGA3D_FACE_MAX + SVGA3D_FACE_INVALID = 0, + SVGA3D_FACE_NONE = 1, + SVGA3D_FACE_MIN = 1, + SVGA3D_FACE_FRONT = 2, + SVGA3D_FACE_BACK = 3, + SVGA3D_FACE_FRONT_BACK = 4, + SVGA3D_FACE_MAX } SVGA3dFace; -/* - * The order and the values should not be changed - */ - typedef enum { - SVGA3D_CMP_INVALID = 0, - SVGA3D_CMP_NEVER = 1, - SVGA3D_CMP_LESS = 2, - SVGA3D_CMP_EQUAL = 3, - SVGA3D_CMP_LESSEQUAL = 4, - SVGA3D_CMP_GREATER = 5, - SVGA3D_CMP_NOTEQUAL = 6, - SVGA3D_CMP_GREATEREQUAL = 7, - SVGA3D_CMP_ALWAYS = 8, - SVGA3D_CMP_MAX + SVGA3D_CMP_INVALID = 0, + SVGA3D_CMP_NEVER = 1, + SVGA3D_CMP_LESS = 2, + SVGA3D_CMP_EQUAL = 3, + SVGA3D_CMP_LESSEQUAL = 4, + SVGA3D_CMP_GREATER = 5, + SVGA3D_CMP_NOTEQUAL = 6, + SVGA3D_CMP_GREATEREQUAL = 7, + SVGA3D_CMP_ALWAYS = 8, + SVGA3D_CMP_MAX } SVGA3dCmpFunc; -/* - * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows - * the fog factor to be specified in the alpha component of the specular - * (a.k.a. secondary) vertex color. - */ typedef enum { - SVGA3D_FOGFUNC_INVALID = 0, - SVGA3D_FOGFUNC_EXP = 1, - SVGA3D_FOGFUNC_EXP2 = 2, - SVGA3D_FOGFUNC_LINEAR = 3, - SVGA3D_FOGFUNC_PER_VERTEX = 4 + SVGA3D_FOGFUNC_INVALID = 0, + SVGA3D_FOGFUNC_EXP = 1, + SVGA3D_FOGFUNC_EXP2 = 2, + SVGA3D_FOGFUNC_LINEAR = 3, + SVGA3D_FOGFUNC_PER_VERTEX = 4 } SVGA3dFogFunction; -/* - * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex - * or per-pixel basis. - */ typedef enum { - SVGA3D_FOGTYPE_INVALID = 0, - SVGA3D_FOGTYPE_VERTEX = 1, - SVGA3D_FOGTYPE_PIXEL = 2, - SVGA3D_FOGTYPE_MAX = 3 + SVGA3D_FOGTYPE_INVALID = 0, + SVGA3D_FOGTYPE_VERTEX = 1, + SVGA3D_FOGTYPE_PIXEL = 2, + SVGA3D_FOGTYPE_MAX = 3 } SVGA3dFogType; -/* - * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is - * computed using the eye Z value of each pixel (or vertex), whereas range- - * based fog is computed using the actual distance (range) to the eye. - */ typedef enum { - SVGA3D_FOGBASE_INVALID = 0, - SVGA3D_FOGBASE_DEPTHBASED = 1, - SVGA3D_FOGBASE_RANGEBASED = 2, - SVGA3D_FOGBASE_MAX = 3 + SVGA3D_FOGBASE_INVALID = 0, + SVGA3D_FOGBASE_DEPTHBASED = 1, + SVGA3D_FOGBASE_RANGEBASED = 2, + SVGA3D_FOGBASE_MAX = 3 } SVGA3dFogBase; typedef enum { - SVGA3D_STENCILOP_INVALID = 0, - SVGA3D_STENCILOP_MIN = 1, - SVGA3D_STENCILOP_KEEP = 1, - SVGA3D_STENCILOP_ZERO = 2, - SVGA3D_STENCILOP_REPLACE = 3, - SVGA3D_STENCILOP_INCRSAT = 4, - SVGA3D_STENCILOP_DECRSAT = 5, - SVGA3D_STENCILOP_INVERT = 6, - SVGA3D_STENCILOP_INCR = 7, - SVGA3D_STENCILOP_DECR = 8, - SVGA3D_STENCILOP_MAX + SVGA3D_STENCILOP_INVALID = 0, + SVGA3D_STENCILOP_MIN = 1, + SVGA3D_STENCILOP_KEEP = 1, + SVGA3D_STENCILOP_ZERO = 2, + SVGA3D_STENCILOP_REPLACE = 3, + SVGA3D_STENCILOP_INCRSAT = 4, + SVGA3D_STENCILOP_DECRSAT = 5, + SVGA3D_STENCILOP_INVERT = 6, + SVGA3D_STENCILOP_INCR = 7, + SVGA3D_STENCILOP_DECR = 8, + SVGA3D_STENCILOP_MAX } SVGA3dStencilOp; typedef enum { - SVGA3D_CLIPPLANE_0 = (1 << 0), - SVGA3D_CLIPPLANE_1 = (1 << 1), - SVGA3D_CLIPPLANE_2 = (1 << 2), - SVGA3D_CLIPPLANE_3 = (1 << 3), - SVGA3D_CLIPPLANE_4 = (1 << 4), - SVGA3D_CLIPPLANE_5 = (1 << 5), + SVGA3D_CLIPPLANE_0 = (1 << 0), + SVGA3D_CLIPPLANE_1 = (1 << 1), + SVGA3D_CLIPPLANE_2 = (1 << 2), + SVGA3D_CLIPPLANE_3 = (1 << 3), + SVGA3D_CLIPPLANE_4 = (1 << 4), + SVGA3D_CLIPPLANE_5 = (1 << 5), } SVGA3dClipPlanes; typedef enum { - SVGA3D_CLEAR_COLOR = 0x1, - SVGA3D_CLEAR_DEPTH = 0x2, - SVGA3D_CLEAR_STENCIL = 0x4, - - /* - * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If - * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this - * bit will be ignored. - */ - SVGA3D_CLEAR_COLORFILL = 0x8 + SVGA3D_CLEAR_COLOR = 0x1, + SVGA3D_CLEAR_DEPTH = 0x2, + SVGA3D_CLEAR_STENCIL = 0x4, + + SVGA3D_CLEAR_COLORFILL = 0x8 } SVGA3dClearFlag; typedef enum { - SVGA3D_RT_DEPTH = 0, - SVGA3D_RT_MIN = 0, - SVGA3D_RT_STENCIL = 1, - SVGA3D_RT_COLOR0 = 2, - SVGA3D_RT_COLOR1 = 3, - SVGA3D_RT_COLOR2 = 4, - SVGA3D_RT_COLOR3 = 5, - SVGA3D_RT_COLOR4 = 6, - SVGA3D_RT_COLOR5 = 7, - SVGA3D_RT_COLOR6 = 8, - SVGA3D_RT_COLOR7 = 9, - SVGA3D_RT_MAX, - SVGA3D_RT_INVALID = ((uint32)-1), + SVGA3D_RT_DEPTH = 0, + SVGA3D_RT_MIN = 0, + SVGA3D_RT_STENCIL = 1, + SVGA3D_RT_COLOR0 = 2, + SVGA3D_RT_COLOR1 = 3, + SVGA3D_RT_COLOR2 = 4, + SVGA3D_RT_COLOR3 = 5, + SVGA3D_RT_COLOR4 = 6, + SVGA3D_RT_COLOR5 = 7, + SVGA3D_RT_COLOR6 = 8, + SVGA3D_RT_COLOR7 = 9, + SVGA3D_RT_MAX, + SVGA3D_RT_INVALID = ((uint32)-1), } SVGA3dRenderTargetType; #define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1) -typedef -#include "vmware_pack_begin.h" -union { - struct { - uint32 red : 1; - uint32 green : 1; - uint32 blue : 1; - uint32 alpha : 1; - }; - uint32 uintValue; -} -#include "vmware_pack_end.h" -SVGA3dColorMask; +#pragma pack(push, 1) +typedef union { + struct { + uint32 red : 1; + uint32 green : 1; + uint32 blue : 1; + uint32 alpha : 1; + }; + uint32 uintValue; +} SVGA3dColorMask; +#pragma pack(pop) typedef enum { - SVGA3D_VBLEND_DISABLE = 0, - SVGA3D_VBLEND_1WEIGHT = 1, - SVGA3D_VBLEND_2WEIGHT = 2, - SVGA3D_VBLEND_3WEIGHT = 3, - SVGA3D_VBLEND_MAX = 4, + SVGA3D_VBLEND_DISABLE = 0, + SVGA3D_VBLEND_1WEIGHT = 1, + SVGA3D_VBLEND_2WEIGHT = 2, + SVGA3D_VBLEND_3WEIGHT = 3, + SVGA3D_VBLEND_MAX = 4, } SVGA3dVertexBlendFlags; typedef enum { - SVGA3D_WRAPCOORD_0 = 1 << 0, - SVGA3D_WRAPCOORD_1 = 1 << 1, - SVGA3D_WRAPCOORD_2 = 1 << 2, - SVGA3D_WRAPCOORD_3 = 1 << 3, - SVGA3D_WRAPCOORD_ALL = 0xF, + SVGA3D_WRAPCOORD_0 = 1 << 0, + SVGA3D_WRAPCOORD_1 = 1 << 1, + SVGA3D_WRAPCOORD_2 = 1 << 2, + SVGA3D_WRAPCOORD_3 = 1 << 3, + SVGA3D_WRAPCOORD_ALL = 0xF, } SVGA3dWrapFlags; -/* - * SVGA_3D_CMD_TEXTURESTATE Types. All value types - * must fit in a uint32. - */ - typedef enum { - SVGA3D_TS_INVALID = 0, - SVGA3D_TS_MIN = 1, - SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */ - SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */ - SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */ - SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */ - SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */ - SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */ - SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */ - SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */ - SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */ - SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */ - SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */ - SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */ - SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */ - SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */ - SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */ - SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */ - SVGA3D_TS_BUMPENVMAT00 = 17, /* float */ - SVGA3D_TS_BUMPENVMAT01 = 18, /* float */ - SVGA3D_TS_BUMPENVMAT10 = 19, /* float */ - SVGA3D_TS_BUMPENVMAT11 = 20, /* float */ - SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */ - SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */ - SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */ - SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */ - - - /* - * Sampler Gamma Level - * - * Sampler gamma effects the color of samples taken from the sampler. A - * value of 1.0 will produce linear samples. If the value is <= 0.0 the - * gamma value is ignored and a linear space is used. - */ - - SVGA3D_TS_GAMMA = 25, /* float */ - SVGA3D_TS_BUMPENVLSCALE = 26, /* float */ - SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */ - SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */ - SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */ - SVGA3D_TS_PREGB_MAX = 30, /* Max value before GBObjects */ - SVGA3D_TS_CONSTANT = 30, /* SVGA3dColor */ - SVGA3D_TS_COLOR_KEY_ENABLE = 31, /* SVGA3dBool */ - SVGA3D_TS_COLOR_KEY = 32, /* SVGA3dColor */ - SVGA3D_TS_MAX + SVGA3D_TS_INVALID = 0, + SVGA3D_TS_MIN = 1, + SVGA3D_TS_BIND_TEXTURE = 1, + SVGA3D_TS_COLOROP = 2, + SVGA3D_TS_COLORARG1 = 3, + SVGA3D_TS_COLORARG2 = 4, + SVGA3D_TS_ALPHAOP = 5, + SVGA3D_TS_ALPHAARG1 = 6, + SVGA3D_TS_ALPHAARG2 = 7, + SVGA3D_TS_ADDRESSU = 8, + SVGA3D_TS_ADDRESSV = 9, + SVGA3D_TS_MIPFILTER = 10, + SVGA3D_TS_MAGFILTER = 11, + SVGA3D_TS_MINFILTER = 12, + SVGA3D_TS_BORDERCOLOR = 13, + SVGA3D_TS_TEXCOORDINDEX = 14, + SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, + SVGA3D_TS_TEXCOORDGEN = 16, + SVGA3D_TS_BUMPENVMAT00 = 17, + SVGA3D_TS_BUMPENVMAT01 = 18, + SVGA3D_TS_BUMPENVMAT10 = 19, + SVGA3D_TS_BUMPENVMAT11 = 20, + SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, + SVGA3D_TS_TEXTURE_LOD_BIAS = 22, + SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, + SVGA3D_TS_ADDRESSW = 24, + + SVGA3D_TS_GAMMA = 25, + SVGA3D_TS_BUMPENVLSCALE = 26, + SVGA3D_TS_BUMPENVLOFFSET = 27, + SVGA3D_TS_COLORARG0 = 28, + SVGA3D_TS_ALPHAARG0 = 29, + SVGA3D_TS_PREGB_MAX = 30, + SVGA3D_TS_CONSTANT = 30, + SVGA3D_TS_COLOR_KEY_ENABLE = 31, + SVGA3D_TS_COLOR_KEY = 32, + SVGA3D_TS_MAX } SVGA3dTextureStateName; typedef enum { - SVGA3D_TC_INVALID = 0, - SVGA3D_TC_DISABLE = 1, - SVGA3D_TC_SELECTARG1 = 2, - SVGA3D_TC_SELECTARG2 = 3, - SVGA3D_TC_MODULATE = 4, - SVGA3D_TC_ADD = 5, - SVGA3D_TC_ADDSIGNED = 6, - SVGA3D_TC_SUBTRACT = 7, - SVGA3D_TC_BLENDTEXTUREALPHA = 8, - SVGA3D_TC_BLENDDIFFUSEALPHA = 9, - SVGA3D_TC_BLENDCURRENTALPHA = 10, - SVGA3D_TC_BLENDFACTORALPHA = 11, - SVGA3D_TC_MODULATE2X = 12, - SVGA3D_TC_MODULATE4X = 13, - SVGA3D_TC_DSDT = 14, - SVGA3D_TC_DOTPRODUCT3 = 15, - SVGA3D_TC_BLENDTEXTUREALPHAPM = 16, - SVGA3D_TC_ADDSIGNED2X = 17, - SVGA3D_TC_ADDSMOOTH = 18, - SVGA3D_TC_PREMODULATE = 19, - SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20, - SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21, - SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22, - SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23, - SVGA3D_TC_BUMPENVMAPLUMINANCE = 24, - SVGA3D_TC_MULTIPLYADD = 25, - SVGA3D_TC_LERP = 26, - SVGA3D_TC_MAX + SVGA3D_TC_INVALID = 0, + SVGA3D_TC_DISABLE = 1, + SVGA3D_TC_SELECTARG1 = 2, + SVGA3D_TC_SELECTARG2 = 3, + SVGA3D_TC_MODULATE = 4, + SVGA3D_TC_ADD = 5, + SVGA3D_TC_ADDSIGNED = 6, + SVGA3D_TC_SUBTRACT = 7, + SVGA3D_TC_BLENDTEXTUREALPHA = 8, + SVGA3D_TC_BLENDDIFFUSEALPHA = 9, + SVGA3D_TC_BLENDCURRENTALPHA = 10, + SVGA3D_TC_BLENDFACTORALPHA = 11, + SVGA3D_TC_MODULATE2X = 12, + SVGA3D_TC_MODULATE4X = 13, + SVGA3D_TC_DSDT = 14, + SVGA3D_TC_DOTPRODUCT3 = 15, + SVGA3D_TC_BLENDTEXTUREALPHAPM = 16, + SVGA3D_TC_ADDSIGNED2X = 17, + SVGA3D_TC_ADDSMOOTH = 18, + SVGA3D_TC_PREMODULATE = 19, + SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20, + SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21, + SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22, + SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23, + SVGA3D_TC_BUMPENVMAPLUMINANCE = 24, + SVGA3D_TC_MULTIPLYADD = 25, + SVGA3D_TC_LERP = 26, + SVGA3D_TC_MAX } SVGA3dTextureCombiner; -#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0) +#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) \ + (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0) typedef enum { - SVGA3D_TEX_ADDRESS_INVALID = 0, - SVGA3D_TEX_ADDRESS_MIN = 1, - SVGA3D_TEX_ADDRESS_WRAP = 1, - SVGA3D_TEX_ADDRESS_MIRROR = 2, - SVGA3D_TEX_ADDRESS_CLAMP = 3, - SVGA3D_TEX_ADDRESS_BORDER = 4, - SVGA3D_TEX_ADDRESS_MIRRORONCE = 5, - SVGA3D_TEX_ADDRESS_EDGE = 6, - SVGA3D_TEX_ADDRESS_MAX + SVGA3D_TEX_ADDRESS_INVALID = 0, + SVGA3D_TEX_ADDRESS_MIN = 1, + SVGA3D_TEX_ADDRESS_WRAP = 1, + SVGA3D_TEX_ADDRESS_MIRROR = 2, + SVGA3D_TEX_ADDRESS_CLAMP = 3, + SVGA3D_TEX_ADDRESS_BORDER = 4, + SVGA3D_TEX_ADDRESS_MIRRORONCE = 5, + SVGA3D_TEX_ADDRESS_EDGE = 6, + SVGA3D_TEX_ADDRESS_MAX } SVGA3dTextureAddress; -/* - * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is - * disabled, and the rasterizer should use the magnification filter instead. - */ typedef enum { - SVGA3D_TEX_FILTER_NONE = 0, - SVGA3D_TEX_FILTER_MIN = 0, - SVGA3D_TEX_FILTER_NEAREST = 1, - SVGA3D_TEX_FILTER_LINEAR = 2, - SVGA3D_TEX_FILTER_ANISOTROPIC = 3, - SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */ - SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */ - SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */ - SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */ - SVGA3D_TEX_FILTER_MAX + SVGA3D_TEX_FILTER_NONE = 0, + SVGA3D_TEX_FILTER_MIN = 0, + SVGA3D_TEX_FILTER_NEAREST = 1, + SVGA3D_TEX_FILTER_LINEAR = 2, + SVGA3D_TEX_FILTER_ANISOTROPIC = 3, + SVGA3D_TEX_FILTER_FLATCUBIC = 4, + SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, + SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, + SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, + SVGA3D_TEX_FILTER_MAX } SVGA3dTextureFilter; typedef enum { - SVGA3D_TEX_TRANSFORM_OFF = 0, - SVGA3D_TEX_TRANSFORM_S = (1 << 0), - SVGA3D_TEX_TRANSFORM_T = (1 << 1), - SVGA3D_TEX_TRANSFORM_R = (1 << 2), - SVGA3D_TEX_TRANSFORM_Q = (1 << 3), - SVGA3D_TEX_PROJECTED = (1 << 15), + SVGA3D_TEX_TRANSFORM_OFF = 0, + SVGA3D_TEX_TRANSFORM_S = (1 << 0), + SVGA3D_TEX_TRANSFORM_T = (1 << 1), + SVGA3D_TEX_TRANSFORM_R = (1 << 2), + SVGA3D_TEX_TRANSFORM_Q = (1 << 3), + SVGA3D_TEX_PROJECTED = (1 << 15), } SVGA3dTexTransformFlags; typedef enum { - SVGA3D_TEXCOORD_GEN_OFF = 0, - SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1, - SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2, - SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3, - SVGA3D_TEXCOORD_GEN_SPHERE = 4, - SVGA3D_TEXCOORD_GEN_MAX + SVGA3D_TEXCOORD_GEN_OFF = 0, + SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1, + SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2, + SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3, + SVGA3D_TEXCOORD_GEN_SPHERE = 4, + SVGA3D_TEXCOORD_GEN_MAX } SVGA3dTextureCoordGen; -/* - * Texture argument constants for texture combiner - */ typedef enum { - SVGA3D_TA_INVALID = 0, - SVGA3D_TA_TFACTOR = 1, - SVGA3D_TA_PREVIOUS = 2, - SVGA3D_TA_DIFFUSE = 3, - SVGA3D_TA_TEXTURE = 4, - SVGA3D_TA_SPECULAR = 5, - SVGA3D_TA_CONSTANT = 6, - SVGA3D_TA_MAX + SVGA3D_TA_INVALID = 0, + SVGA3D_TA_TFACTOR = 1, + SVGA3D_TA_PREVIOUS = 2, + SVGA3D_TA_DIFFUSE = 3, + SVGA3D_TA_TEXTURE = 4, + SVGA3D_TA_SPECULAR = 5, + SVGA3D_TA_CONSTANT = 6, + SVGA3D_TA_MAX } SVGA3dTextureArgData; #define SVGA3D_TM_MASK_LEN 4 -/* Modifiers for texture argument constants defined above. */ typedef enum { - SVGA3D_TM_NONE = 0, - SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN), - SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN), + SVGA3D_TM_NONE = 0, + SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN), + SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN), } SVGA3dTextureArgModifier; -/* - * Vertex declarations - * - * Notes: - * - * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you - * draw with any POSITIONT vertex arrays, the programmable vertex - * pipeline will be implicitly disabled. Drawing will take place as if - * no vertex shader was bound. - */ - typedef enum { - SVGA3D_DECLUSAGE_POSITION = 0, - SVGA3D_DECLUSAGE_BLENDWEIGHT, - SVGA3D_DECLUSAGE_BLENDINDICES, - SVGA3D_DECLUSAGE_NORMAL, - SVGA3D_DECLUSAGE_PSIZE, - SVGA3D_DECLUSAGE_TEXCOORD, - SVGA3D_DECLUSAGE_TANGENT, - SVGA3D_DECLUSAGE_BINORMAL, - SVGA3D_DECLUSAGE_TESSFACTOR, - SVGA3D_DECLUSAGE_POSITIONT, - SVGA3D_DECLUSAGE_COLOR, - SVGA3D_DECLUSAGE_FOG, - SVGA3D_DECLUSAGE_DEPTH, - SVGA3D_DECLUSAGE_SAMPLE, - SVGA3D_DECLUSAGE_MAX + SVGA3D_DECLUSAGE_POSITION = 0, + SVGA3D_DECLUSAGE_BLENDWEIGHT, + SVGA3D_DECLUSAGE_BLENDINDICES, + SVGA3D_DECLUSAGE_NORMAL, + SVGA3D_DECLUSAGE_PSIZE, + SVGA3D_DECLUSAGE_TEXCOORD, + SVGA3D_DECLUSAGE_TANGENT, + SVGA3D_DECLUSAGE_BINORMAL, + SVGA3D_DECLUSAGE_TESSFACTOR, + SVGA3D_DECLUSAGE_POSITIONT, + SVGA3D_DECLUSAGE_COLOR, + SVGA3D_DECLUSAGE_FOG, + SVGA3D_DECLUSAGE_DEPTH, + SVGA3D_DECLUSAGE_SAMPLE, + SVGA3D_DECLUSAGE_MAX } SVGA3dDeclUsage; typedef enum { - SVGA3D_DECLMETHOD_DEFAULT = 0, - SVGA3D_DECLMETHOD_PARTIALU, - SVGA3D_DECLMETHOD_PARTIALV, - SVGA3D_DECLMETHOD_CROSSUV, /* Normal */ - SVGA3D_DECLMETHOD_UV, - SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */ - SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */ - /* map */ + SVGA3D_DECLMETHOD_DEFAULT = 0, + SVGA3D_DECLMETHOD_PARTIALU, + SVGA3D_DECLMETHOD_PARTIALV, + SVGA3D_DECLMETHOD_CROSSUV, + SVGA3D_DECLMETHOD_UV, + SVGA3D_DECLMETHOD_LOOKUP, + SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, } SVGA3dDeclMethod; typedef enum { - SVGA3D_DECLTYPE_FLOAT1 = 0, - SVGA3D_DECLTYPE_FLOAT2 = 1, - SVGA3D_DECLTYPE_FLOAT3 = 2, - SVGA3D_DECLTYPE_FLOAT4 = 3, - SVGA3D_DECLTYPE_D3DCOLOR = 4, - SVGA3D_DECLTYPE_UBYTE4 = 5, - SVGA3D_DECLTYPE_SHORT2 = 6, - SVGA3D_DECLTYPE_SHORT4 = 7, - SVGA3D_DECLTYPE_UBYTE4N = 8, - SVGA3D_DECLTYPE_SHORT2N = 9, - SVGA3D_DECLTYPE_SHORT4N = 10, - SVGA3D_DECLTYPE_USHORT2N = 11, - SVGA3D_DECLTYPE_USHORT4N = 12, - SVGA3D_DECLTYPE_UDEC3 = 13, - SVGA3D_DECLTYPE_DEC3N = 14, - SVGA3D_DECLTYPE_FLOAT16_2 = 15, - SVGA3D_DECLTYPE_FLOAT16_4 = 16, - SVGA3D_DECLTYPE_MAX, + SVGA3D_DECLTYPE_FLOAT1 = 0, + SVGA3D_DECLTYPE_FLOAT2 = 1, + SVGA3D_DECLTYPE_FLOAT3 = 2, + SVGA3D_DECLTYPE_FLOAT4 = 3, + SVGA3D_DECLTYPE_D3DCOLOR = 4, + SVGA3D_DECLTYPE_UBYTE4 = 5, + SVGA3D_DECLTYPE_SHORT2 = 6, + SVGA3D_DECLTYPE_SHORT4 = 7, + SVGA3D_DECLTYPE_UBYTE4N = 8, + SVGA3D_DECLTYPE_SHORT2N = 9, + SVGA3D_DECLTYPE_SHORT4N = 10, + SVGA3D_DECLTYPE_USHORT2N = 11, + SVGA3D_DECLTYPE_USHORT4N = 12, + SVGA3D_DECLTYPE_UDEC3 = 13, + SVGA3D_DECLTYPE_DEC3N = 14, + SVGA3D_DECLTYPE_FLOAT16_2 = 15, + SVGA3D_DECLTYPE_FLOAT16_4 = 16, + SVGA3D_DECLTYPE_MAX, } SVGA3dDeclType; -/* - * This structure is used for the divisor for geometry instancing; - * it's a direct translation of the Direct3D equivalent. - */ typedef union { - struct { - /* - * For index data, this number represents the number of instances to draw. - * For instance data, this number represents the number of - * instances/vertex in this stream - */ - uint32 count : 30; - - /* - * This is 1 if this is supposed to be the data that is repeated for - * every instance. - */ - uint32 indexedData : 1; - - /* - * This is 1 if this is supposed to be the per-instance data. - */ - uint32 instanceData : 1; - }; - - uint32 value; + struct { + uint32 count : 30; + + uint32 indexedData : 1; + + uint32 instanceData : 1; + }; + + uint32 value; } SVGA3dVertexDivisor; typedef enum { - /* - * SVGA3D_PRIMITIVE_INVALID is a valid primitive type. - * - * List MIN second so debuggers will think INVALID is - * the correct name. - */ - SVGA3D_PRIMITIVE_INVALID = 0, - SVGA3D_PRIMITIVE_MIN = 0, - SVGA3D_PRIMITIVE_TRIANGLELIST = 1, - SVGA3D_PRIMITIVE_POINTLIST = 2, - SVGA3D_PRIMITIVE_LINELIST = 3, - SVGA3D_PRIMITIVE_LINESTRIP = 4, - SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5, - SVGA3D_PRIMITIVE_TRIANGLEFAN = 6, - SVGA3D_PRIMITIVE_LINELIST_ADJ = 7, - SVGA3D_PRIMITIVE_PREDX_MAX = 7, - SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8, - SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9, - SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10, - SVGA3D_PRIMITIVE_DX10_MAX = 11, - SVGA3D_PRIMITIVE_1_CONTROL_POINT_PATCH = 11, - SVGA3D_PRIMITIVE_2_CONTROL_POINT_PATCH = 12, - SVGA3D_PRIMITIVE_3_CONTROL_POINT_PATCH = 13, - SVGA3D_PRIMITIVE_4_CONTROL_POINT_PATCH = 14, - SVGA3D_PRIMITIVE_5_CONTROL_POINT_PATCH = 15, - SVGA3D_PRIMITIVE_6_CONTROL_POINT_PATCH = 16, - SVGA3D_PRIMITIVE_7_CONTROL_POINT_PATCH = 17, - SVGA3D_PRIMITIVE_8_CONTROL_POINT_PATCH = 18, - SVGA3D_PRIMITIVE_9_CONTROL_POINT_PATCH = 19, - SVGA3D_PRIMITIVE_10_CONTROL_POINT_PATCH = 20, - SVGA3D_PRIMITIVE_11_CONTROL_POINT_PATCH = 21, - SVGA3D_PRIMITIVE_12_CONTROL_POINT_PATCH = 22, - SVGA3D_PRIMITIVE_13_CONTROL_POINT_PATCH = 23, - SVGA3D_PRIMITIVE_14_CONTROL_POINT_PATCH = 24, - SVGA3D_PRIMITIVE_15_CONTROL_POINT_PATCH = 25, - SVGA3D_PRIMITIVE_16_CONTROL_POINT_PATCH = 26, - SVGA3D_PRIMITIVE_17_CONTROL_POINT_PATCH = 27, - SVGA3D_PRIMITIVE_18_CONTROL_POINT_PATCH = 28, - SVGA3D_PRIMITIVE_19_CONTROL_POINT_PATCH = 29, - SVGA3D_PRIMITIVE_20_CONTROL_POINT_PATCH = 30, - SVGA3D_PRIMITIVE_21_CONTROL_POINT_PATCH = 31, - SVGA3D_PRIMITIVE_22_CONTROL_POINT_PATCH = 32, - SVGA3D_PRIMITIVE_23_CONTROL_POINT_PATCH = 33, - SVGA3D_PRIMITIVE_24_CONTROL_POINT_PATCH = 34, - SVGA3D_PRIMITIVE_25_CONTROL_POINT_PATCH = 35, - SVGA3D_PRIMITIVE_26_CONTROL_POINT_PATCH = 36, - SVGA3D_PRIMITIVE_27_CONTROL_POINT_PATCH = 37, - SVGA3D_PRIMITIVE_28_CONTROL_POINT_PATCH = 38, - SVGA3D_PRIMITIVE_29_CONTROL_POINT_PATCH = 39, - SVGA3D_PRIMITIVE_30_CONTROL_POINT_PATCH = 40, - SVGA3D_PRIMITIVE_31_CONTROL_POINT_PATCH = 41, - SVGA3D_PRIMITIVE_32_CONTROL_POINT_PATCH = 42, - SVGA3D_PRIMITIVE_MAX = 43 + + SVGA3D_PRIMITIVE_INVALID = 0, + SVGA3D_PRIMITIVE_MIN = 0, + SVGA3D_PRIMITIVE_TRIANGLELIST = 1, + SVGA3D_PRIMITIVE_POINTLIST = 2, + SVGA3D_PRIMITIVE_LINELIST = 3, + SVGA3D_PRIMITIVE_LINESTRIP = 4, + SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5, + SVGA3D_PRIMITIVE_TRIANGLEFAN = 6, + SVGA3D_PRIMITIVE_LINELIST_ADJ = 7, + SVGA3D_PRIMITIVE_PREDX_MAX = 7, + SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8, + SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9, + SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10, + SVGA3D_PRIMITIVE_DX10_MAX = 11, + SVGA3D_PRIMITIVE_1_CONTROL_POINT_PATCH = 11, + SVGA3D_PRIMITIVE_2_CONTROL_POINT_PATCH = 12, + SVGA3D_PRIMITIVE_3_CONTROL_POINT_PATCH = 13, + SVGA3D_PRIMITIVE_4_CONTROL_POINT_PATCH = 14, + SVGA3D_PRIMITIVE_5_CONTROL_POINT_PATCH = 15, + SVGA3D_PRIMITIVE_6_CONTROL_POINT_PATCH = 16, + SVGA3D_PRIMITIVE_7_CONTROL_POINT_PATCH = 17, + SVGA3D_PRIMITIVE_8_CONTROL_POINT_PATCH = 18, + SVGA3D_PRIMITIVE_9_CONTROL_POINT_PATCH = 19, + SVGA3D_PRIMITIVE_10_CONTROL_POINT_PATCH = 20, + SVGA3D_PRIMITIVE_11_CONTROL_POINT_PATCH = 21, + SVGA3D_PRIMITIVE_12_CONTROL_POINT_PATCH = 22, + SVGA3D_PRIMITIVE_13_CONTROL_POINT_PATCH = 23, + SVGA3D_PRIMITIVE_14_CONTROL_POINT_PATCH = 24, + SVGA3D_PRIMITIVE_15_CONTROL_POINT_PATCH = 25, + SVGA3D_PRIMITIVE_16_CONTROL_POINT_PATCH = 26, + SVGA3D_PRIMITIVE_17_CONTROL_POINT_PATCH = 27, + SVGA3D_PRIMITIVE_18_CONTROL_POINT_PATCH = 28, + SVGA3D_PRIMITIVE_19_CONTROL_POINT_PATCH = 29, + SVGA3D_PRIMITIVE_20_CONTROL_POINT_PATCH = 30, + SVGA3D_PRIMITIVE_21_CONTROL_POINT_PATCH = 31, + SVGA3D_PRIMITIVE_22_CONTROL_POINT_PATCH = 32, + SVGA3D_PRIMITIVE_23_CONTROL_POINT_PATCH = 33, + SVGA3D_PRIMITIVE_24_CONTROL_POINT_PATCH = 34, + SVGA3D_PRIMITIVE_25_CONTROL_POINT_PATCH = 35, + SVGA3D_PRIMITIVE_26_CONTROL_POINT_PATCH = 36, + SVGA3D_PRIMITIVE_27_CONTROL_POINT_PATCH = 37, + SVGA3D_PRIMITIVE_28_CONTROL_POINT_PATCH = 38, + SVGA3D_PRIMITIVE_29_CONTROL_POINT_PATCH = 39, + SVGA3D_PRIMITIVE_30_CONTROL_POINT_PATCH = 40, + SVGA3D_PRIMITIVE_31_CONTROL_POINT_PATCH = 41, + SVGA3D_PRIMITIVE_32_CONTROL_POINT_PATCH = 42, + SVGA3D_PRIMITIVE_MAX = 43 } SVGA3dPrimitiveType; typedef enum { - SVGA3D_COORDINATE_INVALID = 0, - SVGA3D_COORDINATE_LEFTHANDED = 1, - SVGA3D_COORDINATE_RIGHTHANDED = 2, - SVGA3D_COORDINATE_MAX + SVGA3D_COORDINATE_INVALID = 0, + SVGA3D_COORDINATE_LEFTHANDED = 1, + SVGA3D_COORDINATE_RIGHTHANDED = 2, + SVGA3D_COORDINATE_MAX } SVGA3dCoordinateType; typedef enum { - SVGA3D_TRANSFORM_INVALID = 0, - SVGA3D_TRANSFORM_WORLD = 1, - SVGA3D_TRANSFORM_MIN = 1, - SVGA3D_TRANSFORM_VIEW = 2, - SVGA3D_TRANSFORM_PROJECTION = 3, - SVGA3D_TRANSFORM_TEXTURE0 = 4, - SVGA3D_TRANSFORM_TEXTURE1 = 5, - SVGA3D_TRANSFORM_TEXTURE2 = 6, - SVGA3D_TRANSFORM_TEXTURE3 = 7, - SVGA3D_TRANSFORM_TEXTURE4 = 8, - SVGA3D_TRANSFORM_TEXTURE5 = 9, - SVGA3D_TRANSFORM_TEXTURE6 = 10, - SVGA3D_TRANSFORM_TEXTURE7 = 11, - SVGA3D_TRANSFORM_WORLD1 = 12, - SVGA3D_TRANSFORM_WORLD2 = 13, - SVGA3D_TRANSFORM_WORLD3 = 14, - SVGA3D_TRANSFORM_MAX + SVGA3D_TRANSFORM_INVALID = 0, + SVGA3D_TRANSFORM_WORLD = 1, + SVGA3D_TRANSFORM_MIN = 1, + SVGA3D_TRANSFORM_VIEW = 2, + SVGA3D_TRANSFORM_PROJECTION = 3, + SVGA3D_TRANSFORM_TEXTURE0 = 4, + SVGA3D_TRANSFORM_TEXTURE1 = 5, + SVGA3D_TRANSFORM_TEXTURE2 = 6, + SVGA3D_TRANSFORM_TEXTURE3 = 7, + SVGA3D_TRANSFORM_TEXTURE4 = 8, + SVGA3D_TRANSFORM_TEXTURE5 = 9, + SVGA3D_TRANSFORM_TEXTURE6 = 10, + SVGA3D_TRANSFORM_TEXTURE7 = 11, + SVGA3D_TRANSFORM_WORLD1 = 12, + SVGA3D_TRANSFORM_WORLD2 = 13, + SVGA3D_TRANSFORM_WORLD3 = 14, + SVGA3D_TRANSFORM_MAX } SVGA3dTransformType; typedef enum { - SVGA3D_LIGHTTYPE_INVALID = 0, - SVGA3D_LIGHTTYPE_MIN = 1, - SVGA3D_LIGHTTYPE_POINT = 1, - SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */ - SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */ - SVGA3D_LIGHTTYPE_DIRECTIONAL = 4, - SVGA3D_LIGHTTYPE_MAX + SVGA3D_LIGHTTYPE_INVALID = 0, + SVGA3D_LIGHTTYPE_MIN = 1, + SVGA3D_LIGHTTYPE_POINT = 1, + SVGA3D_LIGHTTYPE_SPOT1 = 2, + SVGA3D_LIGHTTYPE_SPOT2 = 3, + SVGA3D_LIGHTTYPE_DIRECTIONAL = 4, + SVGA3D_LIGHTTYPE_MAX } SVGA3dLightType; typedef enum { - SVGA3D_CUBEFACE_POSX = 0, - SVGA3D_CUBEFACE_NEGX = 1, - SVGA3D_CUBEFACE_POSY = 2, - SVGA3D_CUBEFACE_NEGY = 3, - SVGA3D_CUBEFACE_POSZ = 4, - SVGA3D_CUBEFACE_NEGZ = 5, + SVGA3D_CUBEFACE_POSX = 0, + SVGA3D_CUBEFACE_NEGX = 1, + SVGA3D_CUBEFACE_POSY = 2, + SVGA3D_CUBEFACE_NEGY = 3, + SVGA3D_CUBEFACE_POSZ = 4, + SVGA3D_CUBEFACE_NEGZ = 5, } SVGA3dCubeFace; typedef enum { - SVGA3D_SHADERTYPE_INVALID = 0, - SVGA3D_SHADERTYPE_MIN = 1, - SVGA3D_SHADERTYPE_VS = 1, - SVGA3D_SHADERTYPE_PS = 2, - SVGA3D_SHADERTYPE_PREDX_MAX = 3, - SVGA3D_SHADERTYPE_GS = 3, - SVGA3D_SHADERTYPE_DX10_MAX = 4, - SVGA3D_SHADERTYPE_HS = 4, - SVGA3D_SHADERTYPE_DS = 5, - SVGA3D_SHADERTYPE_CS = 6, - SVGA3D_SHADERTYPE_MAX = 7 + SVGA3D_SHADERTYPE_INVALID = 0, + SVGA3D_SHADERTYPE_MIN = 1, + SVGA3D_SHADERTYPE_VS = 1, + SVGA3D_SHADERTYPE_PS = 2, + SVGA3D_SHADERTYPE_PREDX_MAX = 3, + SVGA3D_SHADERTYPE_GS = 3, + SVGA3D_SHADERTYPE_DX10_MAX = 4, + SVGA3D_SHADERTYPE_HS = 4, + SVGA3D_SHADERTYPE_DS = 5, + SVGA3D_SHADERTYPE_CS = 6, + SVGA3D_SHADERTYPE_MAX = 7 } SVGA3dShaderType; -#define SVGA3D_NUM_SHADERTYPE_PREDX \ - (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN) +#define SVGA3D_NUM_SHADERTYPE_PREDX \ + (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN) -#define SVGA3D_NUM_SHADERTYPE_DX10 \ - (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN) +#define SVGA3D_NUM_SHADERTYPE_DX10 \ + (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN) -#define SVGA3D_NUM_SHADERTYPE \ - (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) +#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) typedef enum { - SVGA3D_CONST_TYPE_MIN = 0, - SVGA3D_CONST_TYPE_FLOAT = 0, - SVGA3D_CONST_TYPE_INT = 1, - SVGA3D_CONST_TYPE_BOOL = 2, - SVGA3D_CONST_TYPE_MAX = 3, + SVGA3D_CONST_TYPE_MIN = 0, + SVGA3D_CONST_TYPE_FLOAT = 0, + SVGA3D_CONST_TYPE_INT = 1, + SVGA3D_CONST_TYPE_BOOL = 2, + SVGA3D_CONST_TYPE_MAX = 3, } SVGA3dShaderConstType; -/* - * Register limits for shader consts. - */ -#define SVGA3D_CONSTREG_MAX 256 -#define SVGA3D_CONSTINTREG_MAX 16 -#define SVGA3D_CONSTBOOLREG_MAX 16 +#define SVGA3D_CONSTREG_MAX 256 +#define SVGA3D_CONSTINTREG_MAX 16 +#define SVGA3D_CONSTBOOLREG_MAX 16 typedef enum { - SVGA3D_STRETCH_BLT_POINT = 0, - SVGA3D_STRETCH_BLT_LINEAR = 1, - SVGA3D_STRETCH_BLT_MAX + SVGA3D_STRETCH_BLT_POINT = 0, + SVGA3D_STRETCH_BLT_LINEAR = 1, + SVGA3D_STRETCH_BLT_MAX } SVGA3dStretchBltMode; typedef enum { - SVGA3D_QUERYTYPE_INVALID = ((uint8)-1), - SVGA3D_QUERYTYPE_MIN = 0, - SVGA3D_QUERYTYPE_OCCLUSION = 0, - SVGA3D_QUERYTYPE_TIMESTAMP = 1, - SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT = 2, - SVGA3D_QUERYTYPE_PIPELINESTATS = 3, - SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE = 4, - SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5, - SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6, - SVGA3D_QUERYTYPE_OCCLUSION64 = 7, - SVGA3D_QUERYTYPE_DX10_MAX = 8, - SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 8, - SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 9, - SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 10, - SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 11, - SVGA3D_QUERYTYPE_SOP_STREAM0 = 12, - SVGA3D_QUERYTYPE_SOP_STREAM1 = 13, - SVGA3D_QUERYTYPE_SOP_STREAM2 = 14, - SVGA3D_QUERYTYPE_SOP_STREAM3 = 15, - SVGA3D_QUERYTYPE_MAX + SVGA3D_QUERYTYPE_INVALID = ((uint8)-1), + SVGA3D_QUERYTYPE_MIN = 0, + SVGA3D_QUERYTYPE_OCCLUSION = 0, + SVGA3D_QUERYTYPE_TIMESTAMP = 1, + SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT = 2, + SVGA3D_QUERYTYPE_PIPELINESTATS = 3, + SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE = 4, + SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5, + SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6, + SVGA3D_QUERYTYPE_OCCLUSION64 = 7, + SVGA3D_QUERYTYPE_DX10_MAX = 8, + SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 8, + SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 9, + SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 10, + SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 11, + SVGA3D_QUERYTYPE_SOP_STREAM0 = 12, + SVGA3D_QUERYTYPE_SOP_STREAM1 = 13, + SVGA3D_QUERYTYPE_SOP_STREAM2 = 14, + SVGA3D_QUERYTYPE_SOP_STREAM3 = 15, + SVGA3D_QUERYTYPE_MAX } SVGA3dQueryType; typedef uint8 SVGA3dQueryTypeUint8; -#define SVGA3D_NUM_QUERYTYPE (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN) +#define SVGA3D_NUM_QUERYTYPE (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN) -/* - * This is the maximum number of queries per context that can be active - * simultaneously between a beginQuery and endQuery. - */ #define SVGA3D_MAX_QUERY 64 -/* - * Query result buffer formats - */ -typedef -#include "vmware_pack_begin.h" -struct { - uint32 samplesRendered; -} -#include "vmware_pack_end.h" -SVGADXOcclusionQueryResult; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 passed; -} -#include "vmware_pack_end.h" -SVGADXEventQueryResult; - -typedef -#include "vmware_pack_begin.h" -struct { - uint64 timestamp; -} -#include "vmware_pack_end.h" -SVGADXTimestampQueryResult; - -typedef -#include "vmware_pack_begin.h" -struct { - uint64 realFrequency; - uint32 disjoint; -} -#include "vmware_pack_end.h" -SVGADXTimestampDisjointQueryResult; - -typedef -#include "vmware_pack_begin.h" -struct { - uint64 inputAssemblyVertices; - uint64 inputAssemblyPrimitives; - uint64 vertexShaderInvocations; - uint64 geometryShaderInvocations; - uint64 geometryShaderPrimitives; - uint64 clipperInvocations; - uint64 clipperPrimitives; - uint64 pixelShaderInvocations; - uint64 hullShaderInvocations; - uint64 domainShaderInvocations; - uint64 computeShaderInvocations; -} -#include "vmware_pack_end.h" -SVGADXPipelineStatisticsQueryResult; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 anySamplesRendered; -} -#include "vmware_pack_end.h" -SVGADXOcclusionPredicateQueryResult; - -typedef -#include "vmware_pack_begin.h" -struct { - uint64 numPrimitivesWritten; - uint64 numPrimitivesRequired; -} -#include "vmware_pack_end.h" -SVGADXStreamOutStatisticsQueryResult; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 overflowed; -} -#include "vmware_pack_end.h" -SVGADXStreamOutPredicateQueryResult; - -typedef -#include "vmware_pack_begin.h" -struct { - uint64 samplesRendered; -} -#include "vmware_pack_end.h" -SVGADXOcclusion64QueryResult; - -/* - * SVGADXQueryResultUnion is not intended for use in the protocol, but is - * very helpful when working with queries generically. - */ -typedef -#include "vmware_pack_begin.h" -union SVGADXQueryResultUnion { - SVGADXOcclusionQueryResult occ; - SVGADXEventQueryResult event; - SVGADXTimestampQueryResult ts; - SVGADXTimestampDisjointQueryResult tsDisjoint; - SVGADXPipelineStatisticsQueryResult pipelineStats; - SVGADXOcclusionPredicateQueryResult occPred; - SVGADXStreamOutStatisticsQueryResult soStats; - SVGADXStreamOutPredicateQueryResult soPred; - SVGADXOcclusion64QueryResult occ64; -} -#include "vmware_pack_end.h" -SVGADXQueryResultUnion; +#pragma pack(push, 1) +typedef struct { + uint32 samplesRendered; +} SVGADXOcclusionQueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 passed; +} SVGADXEventQueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint64 timestamp; +} SVGADXTimestampQueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint64 realFrequency; + uint32 disjoint; +} SVGADXTimestampDisjointQueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint64 inputAssemblyVertices; + uint64 inputAssemblyPrimitives; + uint64 vertexShaderInvocations; + uint64 geometryShaderInvocations; + uint64 geometryShaderPrimitives; + uint64 clipperInvocations; + uint64 clipperPrimitives; + uint64 pixelShaderInvocations; + uint64 hullShaderInvocations; + uint64 domainShaderInvocations; + uint64 computeShaderInvocations; +} SVGADXPipelineStatisticsQueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 anySamplesRendered; +} SVGADXOcclusionPredicateQueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint64 numPrimitivesWritten; + uint64 numPrimitivesRequired; +} SVGADXStreamOutStatisticsQueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 overflowed; +} SVGADXStreamOutPredicateQueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint64 samplesRendered; +} SVGADXOcclusion64QueryResult; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef union SVGADXQueryResultUnion { + SVGADXOcclusionQueryResult occ; + SVGADXEventQueryResult event; + SVGADXTimestampQueryResult ts; + SVGADXTimestampDisjointQueryResult tsDisjoint; + SVGADXPipelineStatisticsQueryResult pipelineStats; + SVGADXOcclusionPredicateQueryResult occPred; + SVGADXStreamOutStatisticsQueryResult soStats; + SVGADXStreamOutPredicateQueryResult soPred; + SVGADXOcclusion64QueryResult occ64; +} SVGADXQueryResultUnion; +#pragma pack(pop) typedef enum { - SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */ - SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */ - SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully */ - SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (guest only) */ + SVGA3D_QUERYSTATE_PENDING = 0, + SVGA3D_QUERYSTATE_SUCCEEDED = 1, + SVGA3D_QUERYSTATE_FAILED = 2, + SVGA3D_QUERYSTATE_NEW = 3, } SVGA3dQueryState; typedef enum { - SVGA3D_WRITE_HOST_VRAM = 1, - SVGA3D_READ_HOST_VRAM = 2, + SVGA3D_WRITE_HOST_VRAM = 1, + SVGA3D_READ_HOST_VRAM = 2, } SVGA3dTransferType; -#define SVGA3D_LOGICOP_INVALID 0 -#define SVGA3D_LOGICOP_MIN 1 -#define SVGA3D_LOGICOP_COPY 1 -#define SVGA3D_LOGICOP_NOT 2 -#define SVGA3D_LOGICOP_AND 3 -#define SVGA3D_LOGICOP_OR 4 -#define SVGA3D_LOGICOP_XOR 5 -#define SVGA3D_LOGICOP_NXOR 6 -#define SVGA3D_LOGICOP_ROP3 7 -#define SVGA3D_LOGICOP_MAX 8 +#define SVGA3D_LOGICOP_INVALID 0 +#define SVGA3D_LOGICOP_COPY 1 + +#define SVGA3D_LOGICOP_MIN 1 +#define SVGA3D_LOGICOP_NOT 2 +#define SVGA3D_LOGICOP_AND 3 +#define SVGA3D_LOGICOP_OR 4 +#define SVGA3D_LOGICOP_XOR 5 +#define SVGA3D_LOGICOP_NXOR 6 +#define SVGA3D_LOGICOP_ROP3 7 + +#define SVGA3D_LOGICOP_MAX 8 typedef uint16 SVGA3dLogicOp; -#define SVGA3D_LOGICOP_ROP3_INVALID ((uint16) -1) -#define SVGA3D_LOGICOP_ROP3_MIN 0 -#define SVGA3D_LOGICOP_ROP3_MAX 256 +#define SVGA3D_LOGICOP_ROP3_INVALID ((uint16)-1) +#define SVGA3D_LOGICOP_ROP3_MIN 0 +#define SVGA3D_LOGICOP_ROP3_MAX 256 typedef uint16 SVGA3dLogicOpRop3; -typedef -#include "vmware_pack_begin.h" -struct { - union { - struct { - uint16 function; // SVGA3dFogFunction - uint8 type; // SVGA3dFogType - uint8 base; // SVGA3dFogBase - }; - uint32 uintValue; - }; -} -#include "vmware_pack_end.h" -SVGA3dFogMode; - -/* - * Uniquely identify one image (a 1D/2D/3D array) from a surface. This - * is a surface ID as well as face/mipmap indices. - */ -typedef -#include "vmware_pack_begin.h" -struct SVGA3dSurfaceImageId { - uint32 sid; - uint32 face; - uint32 mipmap; -} -#include "vmware_pack_end.h" -SVGA3dSurfaceImageId; - -typedef -#include "vmware_pack_begin.h" -struct SVGA3dSubSurfaceId { - uint32 sid; - uint32 subResourceId; -} -#include "vmware_pack_end.h" -SVGA3dSubSurfaceId; - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 width; - uint32 height; - uint32 depth; -} -#include "vmware_pack_end.h" -SVGA3dSize; +#pragma pack(push, 1) +typedef struct { + union { + struct { + uint16 function; + uint8 type; + uint8 base; + }; + uint32 uintValue; + }; +} SVGA3dFogMode; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dSurfaceImageId { + uint32 sid; + uint32 face; + uint32 mipmap; +} SVGA3dSurfaceImageId; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGA3dSubSurfaceId { + uint32 sid; + uint32 subResourceId; +} SVGA3dSubSurfaceId; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 width; + uint32 height; + uint32 depth; +} SVGA3dSize; +#pragma pack(pop) -/* - * Guest-backed objects definitions. - */ typedef enum { - SVGA_OTABLE_MOB = 0, - SVGA_OTABLE_MIN = 0, - SVGA_OTABLE_SURFACE = 1, - SVGA_OTABLE_CONTEXT = 2, - SVGA_OTABLE_SHADER = 3, - SVGA_OTABLE_SCREENTARGET = 4, - - SVGA_OTABLE_DX9_MAX = 5, - - SVGA_OTABLE_DXCONTEXT = 5, - SVGA_OTABLE_DX_MAX = 6, - - SVGA_OTABLE_RESERVED1 = 6, - SVGA_OTABLE_RESERVED2 = 7, - - /* - * Additions to this table need to be tied to HW-version features and - * checkpointed accordingly. - */ - SVGA_OTABLE_DEVEL_MAX = 8, - SVGA_OTABLE_MAX = 8 + SVGA_OTABLE_MOB = 0, + SVGA_OTABLE_MIN = 0, + SVGA_OTABLE_SURFACE = 1, + SVGA_OTABLE_CONTEXT = 2, + SVGA_OTABLE_SHADER = 3, + SVGA_OTABLE_SCREENTARGET = 4, + + SVGA_OTABLE_DX9_MAX = 5, + + SVGA_OTABLE_DXCONTEXT = 5, + SVGA_OTABLE_DX_MAX = 6, + + SVGA_OTABLE_DEVEL_MAX = 6, + SVGA_OTABLE_MAX = 6, + + SVGA_OTABLE_RESERVED1 = 6, + SVGA_OTABLE_RESERVED2 = 7, + SVGA_OTABLE_BUG_1952836_MAX = 8, } SVGAOTableType; typedef enum { - SVGA_COTABLE_MIN = 0, - SVGA_COTABLE_RTVIEW = 0, - SVGA_COTABLE_DSVIEW = 1, - SVGA_COTABLE_SRVIEW = 2, - SVGA_COTABLE_ELEMENTLAYOUT = 3, - SVGA_COTABLE_BLENDSTATE = 4, - SVGA_COTABLE_DEPTHSTENCIL = 5, - SVGA_COTABLE_RASTERIZERSTATE = 6, - SVGA_COTABLE_SAMPLER = 7, - SVGA_COTABLE_STREAMOUTPUT = 8, - SVGA_COTABLE_DXQUERY = 9, - SVGA_COTABLE_DXSHADER = 10, - SVGA_COTABLE_DX10_MAX = 11, - SVGA_COTABLE_UAVIEW = 11, - SVGA_COTABLE_MAX = 12, + SVGA_COTABLE_MIN = 0, + SVGA_COTABLE_RTVIEW = 0, + SVGA_COTABLE_DSVIEW = 1, + SVGA_COTABLE_SRVIEW = 2, + SVGA_COTABLE_ELEMENTLAYOUT = 3, + SVGA_COTABLE_BLENDSTATE = 4, + SVGA_COTABLE_DEPTHSTENCIL = 5, + SVGA_COTABLE_RASTERIZERSTATE = 6, + SVGA_COTABLE_SAMPLER = 7, + SVGA_COTABLE_STREAMOUTPUT = 8, + SVGA_COTABLE_DXQUERY = 9, + SVGA_COTABLE_DXSHADER = 10, + SVGA_COTABLE_DX10_MAX = 11, + SVGA_COTABLE_UAVIEW = 11, + SVGA_COTABLE_MAX = 12, } SVGACOTableType; -/* - * The largest size (number of entries) allowed in a COTable. - */ #define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2) typedef enum SVGAMobFormat { - SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, - SVGA3D_MOBFMT_PTDEPTH_0 = 0, - SVGA3D_MOBFMT_MIN = 0, - SVGA3D_MOBFMT_PTDEPTH_1 = 1, - SVGA3D_MOBFMT_PTDEPTH_2 = 2, - SVGA3D_MOBFMT_RANGE = 3, - SVGA3D_MOBFMT_PTDEPTH64_0 = 4, - SVGA3D_MOBFMT_PTDEPTH64_1 = 5, - SVGA3D_MOBFMT_PTDEPTH64_2 = 6, - SVGA3D_MOBFMT_PREDX_MAX = 7, - SVGA3D_MOBFMT_EMPTY = 7, - SVGA3D_MOBFMT_MAX, - - /* - * This isn't actually used by the guest, but is a mob-format used - * internally by the SVGA device (and is therefore not binary compatible). - */ - SVGA3D_MOBFMT_HB, + SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, + SVGA3D_MOBFMT_PT_0 = 0, + SVGA3D_MOBFMT_MIN = 0, + SVGA3D_MOBFMT_PT_1 = 1, + SVGA3D_MOBFMT_PT_2 = 2, + SVGA3D_MOBFMT_RANGE = 3, + SVGA3D_MOBFMT_PT64_0 = 4, + SVGA3D_MOBFMT_PT64_1 = 5, + SVGA3D_MOBFMT_PT64_2 = 6, + SVGA3D_MOBFMT_PREDX_MAX = 7, + SVGA3D_MOBFMT_EMPTY = 7, + + SVGA3D_MOBFMT_MAX, + + SVGA3D_MOBFMT_HB, } SVGAMobFormat; #define SVGA3D_MOB_EMPTY_BASE 1 -/* - * Multisample pattern types. - */ - typedef enum SVGA3dMSPattern { - SVGA3D_MS_PATTERN_NONE = 0, - SVGA3D_MS_PATTERN_MIN = 0, - SVGA3D_MS_PATTERN_STANDARD = 1, - SVGA3D_MS_PATTERN_CENTER = 2, - SVGA3D_MS_PATTERN_MAX = 3, + SVGA3D_MS_PATTERN_NONE = 0, + SVGA3D_MS_PATTERN_MIN = 0, + SVGA3D_MS_PATTERN_STANDARD = 1, + SVGA3D_MS_PATTERN_CENTER = 2, + SVGA3D_MS_PATTERN_MAX = 3, } SVGA3dMSPattern; -/* - * Precision settings for each sample. - */ - typedef enum SVGA3dMSQualityLevel { - SVGA3D_MS_QUALITY_NONE = 0, - SVGA3D_MS_QUALITY_MIN = 0, - SVGA3D_MS_QUALITY_FULL = 1, - SVGA3D_MS_QUALITY_MAX = 2, + SVGA3D_MS_QUALITY_NONE = 0, + SVGA3D_MS_QUALITY_MIN = 0, + SVGA3D_MS_QUALITY_FULL = 1, + SVGA3D_MS_QUALITY_RESOLVED = 2, + SVGA3D_MS_QUALITY_MAX = 3, } SVGA3dMSQualityLevel; -/* - * Screen Target Update Flags - */ - typedef enum SVGA3dFrameUpdateType { - SVGA3D_FRAME_END = 0, - SVGA3D_FRAME_PARTIAL = 1, - SVGA3D_FRAME_UNKNOWN = 2, - SVGA3D_FRAME_MAX = 3, + SVGA3D_FRAME_END = 0, + SVGA3D_FRAME_MIN = 0, + SVGA3D_FRAME_PARTIAL = 1, + SVGA3D_FRAME_UNKNOWN = 2, + SVGA3D_FRAME_MAX = 3, } SVGA3dFrameUpdateType; -#endif /* _SVGA3D_TYPES_H_ */ +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h index acb41e28e46f..bf242c21f352 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2007-2015 VMware, Inc. + * Copyright 2007,2020 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -30,61 +30,27 @@ * Definitions for our own (vendor-specific) SVGA Escape commands. */ -#ifndef _SVGA_ESCAPE_H_ -#define _SVGA_ESCAPE_H_ -/* - * Namespace IDs for the escape command - */ +#ifndef _SVGA_ESCAPE_H_ +#define _SVGA_ESCAPE_H_ #define SVGA_ESCAPE_NSID_VMWARE 0x00000000 -#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF - +#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF -/* - * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to - * the first DWORD of escape data (after the nsID and size). As a - * guideline we're using the high word and low word as a major and - * minor command number, respectively. - * - * Major command number allocation: - * - * 0000: Reserved - * 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h) - * 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h) - * 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h) - */ - -#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000 - - -/* - * SVGA Hint commands. - * - * These escapes let the SVGA driver provide optional information to - * he host about the state of the guest or guest applications. The - * host can use these hints to make user interface or performance - * decisions. - * - * Notes: - * - * - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests - * that use the SVGA Screen Object extension. Instead of sending - * this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your - * Screen Object. - */ +#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000 -#define SVGA_ESCAPE_VMWARE_HINT 0x00030000 -#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 /* Deprecated */ +#define SVGA_ESCAPE_VMWARE_HINT 0x00030000 +#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 -typedef -struct { - uint32 command; - uint32 fullscreen; - struct { - int32 x, y; - } monitorPosition; +#pragma pack(push, 1) +typedef struct { + uint32 command; + uint32 fullscreen; + struct { + int32 x, y; + } monitorPosition; } SVGAEscapeHintFullscreen; +#pragma pack(pop) -#endif /* _SVGA_ESCAPE_H_ */ +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h index e5385146e7fc..aec17c3c6c29 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** - * Copyright 2007-2015 VMware, Inc. + * Copyright 2007-2021 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -30,171 +30,88 @@ * Definitions for video-overlay support. */ + + #ifndef _SVGA_OVERLAY_H_ #define _SVGA_OVERLAY_H_ #include "svga_reg.h" -/* - * Video formats we support - */ +#if defined __cplusplus +extern "C" { +#endif -#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */ -#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */ -#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */ +#define VMWARE_FOURCC_YV12 0x32315659 +#define VMWARE_FOURCC_YUY2 0x32595559 +#define VMWARE_FOURCC_UYVY 0x59565955 typedef enum { - SVGA_OVERLAY_FORMAT_INVALID = 0, - SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12, - SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2, - SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY, + SVGA_OVERLAY_FORMAT_INVALID = 0, + SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12, + SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2, + SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY, } SVGAOverlayFormat; -#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff +#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff -#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000 +#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000 -#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001 - /* FIFO escape layout: - * Type, Stream Id, (Register Id, Value) pairs */ +#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001 -#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002 - /* FIFO escape layout: - * Type, Stream Id */ +#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002 -typedef -struct SVGAEscapeVideoSetRegs { - struct { - uint32 cmdType; - uint32 streamId; - } header; +typedef struct SVGAEscapeVideoSetRegs { + struct { + uint32 cmdType; + uint32 streamId; + } header; - /* May include zero or more items. */ - struct { - uint32 registerId; - uint32 value; - } items[1]; + struct { + uint32 registerId; + uint32 value; + } items[1]; } SVGAEscapeVideoSetRegs; -typedef -struct SVGAEscapeVideoFlush { - uint32 cmdType; - uint32 streamId; +typedef struct SVGAEscapeVideoFlush { + uint32 cmdType; + uint32 streamId; } SVGAEscapeVideoFlush; - -/* - * Struct definitions for the video overlay commands built on - * SVGAFifoCmdEscape. - */ -typedef -struct { - uint32 command; - uint32 overlay; +#pragma pack(push, 1) +typedef struct { + uint32 command; + uint32 overlay; } SVGAFifoEscapeCmdVideoBase; +#pragma pack(pop) -typedef -struct { - SVGAFifoEscapeCmdVideoBase videoCmd; +#pragma pack(push, 1) +typedef struct { + SVGAFifoEscapeCmdVideoBase videoCmd; } SVGAFifoEscapeCmdVideoFlush; - -typedef -struct { - SVGAFifoEscapeCmdVideoBase videoCmd; - struct { - uint32 regId; - uint32 value; - } items[1]; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAFifoEscapeCmdVideoBase videoCmd; + struct { + uint32 regId; + uint32 value; + } items[1]; } SVGAFifoEscapeCmdVideoSetRegs; - -typedef -struct { - SVGAFifoEscapeCmdVideoBase videoCmd; - struct { - uint32 regId; - uint32 value; - } items[SVGA_VIDEO_NUM_REGS]; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAFifoEscapeCmdVideoBase videoCmd; + struct { + uint32 regId; + uint32 value; + } items[SVGA_VIDEO_NUM_REGS]; } SVGAFifoEscapeCmdVideoSetAllRegs; +#pragma pack(pop) - -/* - *---------------------------------------------------------------------- - * - * VMwareVideoGetAttributes -- - * - * Computes the size, pitches and offsets for YUV frames. - * - * Results: - * TRUE on success; otherwise FALSE on failure. - * - * Side effects: - * Pitches and offsets for the given YUV frame are put in 'pitches' - * and 'offsets' respectively. They are both optional though. - * - *---------------------------------------------------------------------- - */ - -static inline bool -VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */ - uint32 *width, /* IN / OUT */ - uint32 *height, /* IN / OUT */ - uint32 *size, /* OUT */ - uint32 *pitches, /* OUT (optional) */ - uint32 *offsets) /* OUT (optional) */ -{ - int tmp; - - *width = (*width + 1) & ~1; - - if (offsets) { - offsets[0] = 0; - } - - switch (format) { - case VMWARE_FOURCC_YV12: - *height = (*height + 1) & ~1; - *size = (*width) * (*height); - - if (pitches) { - pitches[0] = *width; - } - - if (offsets) { - offsets[1] = *size; - } - - tmp = *width >> 1; - - if (pitches) { - pitches[1] = pitches[2] = tmp; - } - - tmp *= (*height >> 1); - *size += tmp; - - if (offsets) { - offsets[2] = *size; - } - - *size += tmp; - break; - - case VMWARE_FOURCC_YUY2: - case VMWARE_FOURCC_UYVY: - *size = *width * 2; - - if (pitches) { - pitches[0] = *size; - } - - *size *= *height; - break; - - default: - return false; - } - - return true; +#if defined __cplusplus } +#endif -#endif /* _SVGA_OVERLAY_H_ */ +#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h index 193a57f6aae5..b3602557de2e 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /********************************************************** * Copyright 1998-2021 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -30,2277 +30,870 @@ * Virtual hardware definitions for the VMware SVGA II device. */ -#ifndef _SVGA_REG_H_ -#define _SVGA_REG_H_ -#include <linux/pci_ids.h> -#define INCLUDE_ALLOW_MODULE -#define INCLUDE_ALLOW_USERLEVEL -#define INCLUDE_ALLOW_VMCORE -#include "includeCheck.h" +#ifndef _SVGA_REG_H_ +#define _SVGA_REG_H_ -#include "svga_types.h" +#include "vm_basic_types.h" -/* - * SVGA_REG_ENABLE bit definitions. - */ typedef enum { - SVGA_REG_ENABLE_DISABLE = 0, - SVGA_REG_ENABLE_ENABLE = (1 << 0), - SVGA_REG_ENABLE_HIDE = (1 << 1), + SVGA_REG_ENABLE_DISABLE = 0, + SVGA_REG_ENABLE_ENABLE = (1 << 0), + SVGA_REG_ENABLE_HIDE = (1 << 1), } SvgaRegEnable; typedef uint32 SVGAMobId; -/* - * Arbitrary and meaningless limits. Please ignore these when writing - * new drivers. - */ -#define SVGA_MAX_WIDTH 2560 -#define SVGA_MAX_HEIGHT 1600 - +#define SVGA_MAX_WIDTH 2560 +#define SVGA_MAX_HEIGHT 1600 -#define SVGA_MAX_BITS_PER_PIXEL 32 -#define SVGA_MAX_DEPTH 24 -#define SVGA_MAX_DISPLAYS 10 -#define SVGA_MAX_SCREEN_SIZE 8192 +#define SVGA_MAX_BITS_PER_PIXEL 32 +#define SVGA_MAX_DEPTH 24 +#define SVGA_MAX_DISPLAYS 10 +#define SVGA_MAX_SCREEN_SIZE 8192 #define SVGA_SCREEN_ROOT_LIMIT (SVGA_MAX_SCREEN_SIZE * SVGA_MAX_DISPLAYS) +#define SVGA_CURSOR_ON_HIDE 0x0 +#define SVGA_CURSOR_ON_SHOW 0x1 -/* - * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned - * cursor bypass mode. - */ -#define SVGA_CURSOR_ON_HIDE 0x0 -#define SVGA_CURSOR_ON_SHOW 0x1 - -/* - * Remove the cursor from the framebuffer - * because we need to see what's under it - */ -#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 +#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 -/* Put the cursor back in the framebuffer so the user can see it */ -#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 +#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 -/* - * The maximum framebuffer size that can traced for guests unless the - * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES. In that case - * the full framebuffer can be traced independent of this limit. - */ -#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000 +#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000 -#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8 -#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH) -#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS) +#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8 +#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH) +#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS) -#define SVGA_MAGIC 0x900000UL -#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver)) +#define SVGA_MAGIC 0x900000UL +#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver)) -/* Version 3 has the control bar instead of the FIFO */ -#define SVGA_VERSION_3 3 -#define SVGA_ID_3 SVGA_MAKE_ID(SVGA_VERSION_3) +#define SVGA_VERSION_3 3 +#define SVGA_ID_3 SVGA_MAKE_ID(SVGA_VERSION_3) -/* Version 2 let the address of the frame buffer be unsigned on Win32 */ -#define SVGA_VERSION_2 2 -#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2) +#define SVGA_VERSION_2 2 +#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2) -/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so - PALETTE_BASE has moved */ -#define SVGA_VERSION_1 1 -#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1) +#define SVGA_VERSION_1 1 +#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1) -/* Version 0 is the initial version */ -#define SVGA_VERSION_0 0 -#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0) +#define SVGA_VERSION_0 0 +#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0) -/* - * "Invalid" value for all SVGA IDs. - * (Version ID, screen object ID, surface ID...) - */ -#define SVGA_ID_INVALID 0xFFFFFFFF +#define SVGA_ID_INVALID 0xFFFFFFFF -/* Port offsets, relative to BAR0 */ -#define SVGA_INDEX_PORT 0x0 -#define SVGA_VALUE_PORT 0x1 -#define SVGA_BIOS_PORT 0x2 -#define SVGA_IRQSTATUS_PORT 0x8 +#define SVGA_INDEX_PORT 0x0 +#define SVGA_VALUE_PORT 0x1 +#define SVGA_BIOS_PORT 0x2 +#define SVGA_IRQSTATUS_PORT 0x8 -/* - * Interrupt source flags for IRQSTATUS_PORT and IRQMASK. - * - * Interrupts are only supported when the - * SVGA_CAP_IRQMASK capability is present. - */ -#define SVGA_IRQFLAG_ANY_FENCE (1 << 0) /* Any fence was passed */ -#define SVGA_IRQFLAG_FIFO_PROGRESS (1 << 1) /* Made forward progress in the FIFO */ -#define SVGA_IRQFLAG_FENCE_GOAL (1 << 2) /* SVGA_FIFO_FENCE_GOAL reached */ -#define SVGA_IRQFLAG_COMMAND_BUFFER (1 << 3) /* Command buffer completed */ -#define SVGA_IRQFLAG_ERROR (1 << 4) /* Error while processing commands */ -#define SVGA_IRQFLAG_MAX (1 << 5) +#define SVGA_IRQFLAG_ANY_FENCE (1 << 0) +#define SVGA_IRQFLAG_FIFO_PROGRESS (1 << 1) +#define SVGA_IRQFLAG_FENCE_GOAL (1 << 2) +#define SVGA_IRQFLAG_COMMAND_BUFFER (1 << 3) +#define SVGA_IRQFLAG_ERROR (1 << 4) +#define SVGA_IRQFLAG_REG_FENCE_GOAL (1 << 5) +#define SVGA_IRQFLAG_MAX (1 << 6) -/* - * The byte-size is the size of the actual cursor data, - * possibly after expanding it to the current bit depth. - * - * 40K is sufficient memory for two 32-bit planes for a 64 x 64 cursor. - * - * The dimension limit is a bound on the maximum width or height. - */ -#define SVGA_MAX_CURSOR_CMD_BYTES (40 * 1024) +#define SVGA_MAX_CURSOR_CMD_BYTES (40 * 1024) #define SVGA_MAX_CURSOR_CMD_DIMENSION 1024 -/* - * Registers - */ - enum { - SVGA_REG_ID = 0, - SVGA_REG_ENABLE = 1, - SVGA_REG_WIDTH = 2, - SVGA_REG_HEIGHT = 3, - SVGA_REG_MAX_WIDTH = 4, - SVGA_REG_MAX_HEIGHT = 5, - SVGA_REG_DEPTH = 6, - SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */ - SVGA_REG_PSEUDOCOLOR = 8, - SVGA_REG_RED_MASK = 9, - SVGA_REG_GREEN_MASK = 10, - SVGA_REG_BLUE_MASK = 11, - SVGA_REG_BYTES_PER_LINE = 12, - SVGA_REG_FB_START = 13, /* (Deprecated) */ - SVGA_REG_FB_OFFSET = 14, - SVGA_REG_VRAM_SIZE = 15, - SVGA_REG_FB_SIZE = 16, - - /* ID 0 implementation only had the above registers, then the palette */ - SVGA_REG_ID_0_TOP = 17, - - SVGA_REG_CAPABILITIES = 17, - SVGA_REG_MEM_START = 18, /* (Deprecated) */ - SVGA_REG_MEM_SIZE = 19, - SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */ - SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */ - SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */ - SVGA_REG_GUEST_ID = 23, /* (Deprecated) */ - SVGA_REG_DEAD = 24, /* Drivers should never write this. */ - SVGA_REG_CURSOR_X = 25, /* (Deprecated) */ - SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */ - SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */ - SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */ - SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */ - SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */ - SVGA_REG_NUM_DISPLAYS = 31, /* (Deprecated) */ - SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */ - SVGA_REG_IRQMASK = 33, /* Interrupt mask */ - - /* Legacy multi-monitor support */ - SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */ - SVGA_REG_DISPLAY_ID = 35, /* Display ID for the following display attributes */ - SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */ - SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */ - SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */ - SVGA_REG_DISPLAY_WIDTH = 39, /* The display's width */ - SVGA_REG_DISPLAY_HEIGHT = 40, /* The display's height */ - - /* See "Guest memory regions" below. */ - SVGA_REG_GMR_ID = 41, - SVGA_REG_GMR_DESCRIPTOR = 42, - SVGA_REG_GMR_MAX_IDS = 43, - SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44, - - SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ - SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ - SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ - SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ - SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ - - /* - * Max primary memory. - * See SVGA_CAP_NO_BB_RESTRICTION. - */ - SVGA_REG_MAX_PRIMARY_MEM = 50, - SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, - - /* - * Legacy version of SVGA_REG_GBOBJECT_MEM_SIZE_KB for drivers that - * don't know how to convert to a 64-bit byte value without overflowing. - * (See SVGA_REG_GBOBJECT_MEM_SIZE_KB). - */ - SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, - - SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ - SVGA_REG_CMD_PREPEND_LOW = 53, - SVGA_REG_CMD_PREPEND_HIGH = 54, - SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, - SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, - SVGA_REG_MOB_MAX_SIZE = 57, - SVGA_REG_BLANK_SCREEN_TARGETS = 58, - SVGA_REG_CAP2 = 59, - SVGA_REG_DEVEL_CAP = 60, - - /* - * Allow the guest to hint to the device which driver is running. - * - * This should not generally change device behavior, but might be - * convenient to work-around specific bugs in guest drivers. - * - * Drivers should first write their id value into SVGA_REG_GUEST_DRIVER_ID, - * and then fill out all of the version registers that they have defined. - * - * After the driver has written all of the registers, they should - * then write the value SVGA_REG_GUEST_DRIVER_ID_SUBMIT to the - * SVGA_REG_GUEST_DRIVER_ID register, to signal that they have finished. - * - * The SVGA_REG_GUEST_DRIVER_ID values are defined below by the - * SVGARegGuestDriverId enum. - * - * The SVGA_REG_GUEST_DRIVER_VERSION fields are driver-specific, - * but ideally should encode a monotonically increasing number that allows - * the device to perform inequality checks against ranges of driver versions. - */ - SVGA_REG_GUEST_DRIVER_ID = 61, - SVGA_REG_GUEST_DRIVER_VERSION1 = 62, - SVGA_REG_GUEST_DRIVER_VERSION2 = 63, - SVGA_REG_GUEST_DRIVER_VERSION3 = 64, - SVGA_REG_CURSOR_MOBID = 65, - SVGA_REG_CURSOR_MAX_BYTE_SIZE = 66, - SVGA_REG_CURSOR_MAX_DIMENSION = 67, - - SVGA_REG_FIFO_CAPS = 68, - SVGA_REG_FENCE = 69, - - SVGA_REG_RESERVED1 = 70, - SVGA_REG_RESERVED2 = 71, - SVGA_REG_RESERVED3 = 72, - SVGA_REG_RESERVED4 = 73, - SVGA_REG_RESERVED5 = 74, - SVGA_REG_SCREENDMA = 75, - - /* - * The maximum amount of guest-backed objects that the device can have - * resident at a time. Guest-drivers should keep their working set size - * below this limit for best performance. - * - * Note that this value is in kilobytes, and not bytes, because the actual - * number of bytes might be larger than can fit in a 32-bit register. - * - * PLEASE USE A 64-BIT VALUE WHEN CONVERTING THIS INTO BYTES. - * (See SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB). - */ - SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76, - - /* - + * These registers are for the addresses of the memory BARs for SVGA3 - */ - SVGA_REG_REGS_START_HIGH32 = 77, - SVGA_REG_REGS_START_LOW32 = 78, - SVGA_REG_FB_START_HIGH32 = 79, - SVGA_REG_FB_START_LOW32 = 80, - - /* - * A hint register that recommends which quality level the guest should - * currently use to define multisample surfaces. - * - * If the register is SVGA_REG_MSHINT_DISABLED, - * the guest is only allowed to use SVGA3D_MS_QUALITY_FULL. - * - * Otherwise, this is a live value that can change while the VM is - * powered on with the hint suggestion for which quality level the guest - * should be using. Guests are free to ignore the hint and use either - * RESOLVE or FULL quality. - */ - SVGA_REG_MSHINT = 81, - - SVGA_REG_IRQ_STATUS = 82, - SVGA_REG_DIRTY_TRACKING = 83, - - SVGA_REG_TOP = 84, /* Must be 1 more than the last register */ - - SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ - /* Next 768 (== 256*3) registers exist for colormap */ - SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS - /* Base of scratch registers */ - /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage: - First 4 are reserved for VESA BIOS Extension; any remaining are for - the use of the current SVGA driver. */ -}; + SVGA_REG_ID = 0, + SVGA_REG_ENABLE = 1, + SVGA_REG_WIDTH = 2, + SVGA_REG_HEIGHT = 3, + SVGA_REG_MAX_WIDTH = 4, + SVGA_REG_MAX_HEIGHT = 5, + SVGA_REG_DEPTH = 6, + SVGA_REG_BITS_PER_PIXEL = 7, + SVGA_REG_PSEUDOCOLOR = 8, + SVGA_REG_RED_MASK = 9, + SVGA_REG_GREEN_MASK = 10, + SVGA_REG_BLUE_MASK = 11, + SVGA_REG_BYTES_PER_LINE = 12, + SVGA_REG_FB_START = 13, + SVGA_REG_FB_OFFSET = 14, + SVGA_REG_VRAM_SIZE = 15, + SVGA_REG_FB_SIZE = 16, + + SVGA_REG_ID_0_TOP = 17, + + SVGA_REG_CAPABILITIES = 17, + SVGA_REG_MEM_START = 18, + SVGA_REG_MEM_SIZE = 19, + SVGA_REG_CONFIG_DONE = 20, + SVGA_REG_SYNC = 21, + SVGA_REG_BUSY = 22, + SVGA_REG_GUEST_ID = 23, + SVGA_REG_DEAD = 24, + SVGA_REG_CURSOR_X = 25, + SVGA_REG_CURSOR_Y = 26, + SVGA_REG_CURSOR_ON = 27, + SVGA_REG_HOST_BITS_PER_PIXEL = 28, + SVGA_REG_SCRATCH_SIZE = 29, + SVGA_REG_MEM_REGS = 30, + SVGA_REG_NUM_DISPLAYS = 31, + SVGA_REG_PITCHLOCK = 32, + SVGA_REG_IRQMASK = 33, + + SVGA_REG_NUM_GUEST_DISPLAYS = 34, + SVGA_REG_DISPLAY_ID = 35, + SVGA_REG_DISPLAY_IS_PRIMARY = 36, + SVGA_REG_DISPLAY_POSITION_X = 37, + SVGA_REG_DISPLAY_POSITION_Y = 38, + SVGA_REG_DISPLAY_WIDTH = 39, + SVGA_REG_DISPLAY_HEIGHT = 40, + + SVGA_REG_GMR_ID = 41, + SVGA_REG_GMR_DESCRIPTOR = 42, + SVGA_REG_GMR_MAX_IDS = 43, + SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44, + + SVGA_REG_TRACES = 45, + SVGA_REG_GMRS_MAX_PAGES = 46, + SVGA_REG_MEMORY_SIZE = 47, + SVGA_REG_COMMAND_LOW = 48, + SVGA_REG_COMMAND_HIGH = 49, + + SVGA_REG_MAX_PRIMARY_MEM = 50, + + SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, + + SVGA_REG_DEV_CAP = 52, + SVGA_REG_CMD_PREPEND_LOW = 53, + SVGA_REG_CMD_PREPEND_HIGH = 54, + SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, + SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, + SVGA_REG_MOB_MAX_SIZE = 57, + SVGA_REG_BLANK_SCREEN_TARGETS = 58, + SVGA_REG_CAP2 = 59, + SVGA_REG_DEVEL_CAP = 60, + + SVGA_REG_GUEST_DRIVER_ID = 61, + SVGA_REG_GUEST_DRIVER_VERSION1 = 62, + SVGA_REG_GUEST_DRIVER_VERSION2 = 63, + SVGA_REG_GUEST_DRIVER_VERSION3 = 64, + + SVGA_REG_CURSOR_MOBID = 65, + SVGA_REG_CURSOR_MAX_BYTE_SIZE = 66, + SVGA_REG_CURSOR_MAX_DIMENSION = 67, + + SVGA_REG_FIFO_CAPS = 68, + SVGA_REG_FENCE = 69, + + SVGA_REG_CURSOR4_ON = 70, + SVGA_REG_CURSOR4_X = 71, + SVGA_REG_CURSOR4_Y = 72, + SVGA_REG_CURSOR4_SCREEN_ID = 73, + SVGA_REG_CURSOR4_SUBMIT = 74, + + SVGA_REG_SCREENDMA = 75, + + SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76, + + SVGA_REG_REGS_START_HIGH32 = 77, + SVGA_REG_REGS_START_LOW32 = 78, + SVGA_REG_FB_START_HIGH32 = 79, + SVGA_REG_FB_START_LOW32 = 80, + + SVGA_REG_MSHINT = 81, + + SVGA_REG_IRQ_STATUS = 82, + + SVGA_REG_DIRTY_TRACKING = 83, + SVGA_REG_FENCE_GOAL = 84, + + SVGA_REG_TOP = 85, + + SVGA_PALETTE_BASE = 1024, + + SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS +}; -/* - * Values for SVGA_REG_GUEST_DRIVER_ID. - */ typedef enum SVGARegGuestDriverId { - SVGA_REG_GUEST_DRIVER_ID_UNKNOWN = 0, - SVGA_REG_GUEST_DRIVER_ID_WDDM = 1, - SVGA_REG_GUEST_DRIVER_ID_LINUX = 2, - SVGA_REG_GUEST_DRIVER_ID_MAX, + SVGA_REG_GUEST_DRIVER_ID_UNKNOWN = 0, + SVGA_REG_GUEST_DRIVER_ID_WDDM = 1, + SVGA_REG_GUEST_DRIVER_ID_LINUX = 2, + SVGA_REG_GUEST_DRIVER_ID_MAX, - SVGA_REG_GUEST_DRIVER_ID_SUBMIT = MAX_UINT32, + SVGA_REG_GUEST_DRIVER_ID_SUBMIT = MAX_UINT32, } SVGARegGuestDriverId; typedef enum SVGARegMSHint { - SVGA_REG_MSHINT_DISABLED = 0, - SVGA_REG_MSHINT_FULL = 1, - SVGA_REG_MSHINT_RESOLVED = 2, + SVGA_REG_MSHINT_DISABLED = 0, + SVGA_REG_MSHINT_FULL = 1, + SVGA_REG_MSHINT_RESOLVED = 2, } SVGARegMSHint; typedef enum SVGARegDirtyTracking { - SVGA_REG_DIRTY_TRACKING_PER_IMAGE = 0, - SVGA_REG_DIRTY_TRACKING_PER_SURFACE = 1, + SVGA_REG_DIRTY_TRACKING_PER_IMAGE = 0, + SVGA_REG_DIRTY_TRACKING_PER_SURFACE = 1, } SVGARegDirtyTracking; - -/* - * Guest memory regions (GMRs): - * - * This is a new memory mapping feature available in SVGA devices - * which have the SVGA_CAP_GMR bit set. Previously, there were two - * fixed memory regions available with which to share data between the - * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs - * are our name for an extensible way of providing arbitrary DMA - * buffers for use between the driver and the SVGA device. They are a - * new alternative to framebuffer memory, usable for both 2D and 3D - * graphics operations. - * - * Since GMR mapping must be done synchronously with guest CPU - * execution, we use a new pair of SVGA registers: - * - * SVGA_REG_GMR_ID -- - * - * Read/write. - * This register holds the 32-bit ID (a small positive integer) - * of a GMR to create, delete, or redefine. Writing this register - * has no side-effects. - * - * SVGA_REG_GMR_DESCRIPTOR -- - * - * Write-only. - * Writing this register will create, delete, or redefine the GMR - * specified by the above ID register. If this register is zero, - * the GMR is deleted. Any pointers into this GMR (including those - * currently being processed by FIFO commands) will be - * synchronously invalidated. - * - * If this register is nonzero, it must be the physical page - * number (PPN) of a data structure which describes the physical - * layout of the memory region this GMR should describe. The - * descriptor structure will be read synchronously by the SVGA - * device when this register is written. The descriptor need not - * remain allocated for the lifetime of the GMR. - * - * The guest driver should write SVGA_REG_GMR_ID first, then - * SVGA_REG_GMR_DESCRIPTOR. - * - * SVGA_REG_GMR_MAX_IDS -- - * - * Read-only. - * The SVGA device may choose to support a maximum number of - * user-defined GMR IDs. This register holds the number of supported - * IDs. (The maximum supported ID plus 1) - * - * SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH -- - * - * Read-only. - * The SVGA device may choose to put a limit on the total number - * of SVGAGuestMemDescriptor structures it will read when defining - * a single GMR. - * - * The descriptor structure is an array of SVGAGuestMemDescriptor - * structures. Each structure may do one of three things: - * - * - Terminate the GMR descriptor list. - * (ppn==0, numPages==0) - * - * - Add a PPN or range of PPNs to the GMR's virtual address space. - * (ppn != 0, numPages != 0) - * - * - Provide the PPN of the next SVGAGuestMemDescriptor, in order to - * support multi-page GMR descriptor tables without forcing the - * driver to allocate physically contiguous memory. - * (ppn != 0, numPages == 0) - * - * Note that each physical page of SVGAGuestMemDescriptor structures - * can describe at least 2MB of guest memory. If the driver needs to - * use more than one page of descriptor structures, it must use one of - * its SVGAGuestMemDescriptors to point to an additional page. The - * device will never automatically cross a page boundary. - * - * Once the driver has described a GMR, it is immediately available - * for use via any FIFO command that uses an SVGAGuestPtr structure. - * These pointers include a GMR identifier plus an offset into that - * GMR. - * - * The driver must check the SVGA_CAP_GMR bit before using the GMR - * registers. - */ - -/* - * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer - * memory as well. In the future, these IDs could even be used to - * allow legacy memory regions to be redefined by the guest as GMRs. - * - * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA - * is being phased out. Please try to use user-defined GMRs whenever - * possible. - */ -#define SVGA_GMR_NULL ((uint32) -1) -#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */ - -typedef -#include "vmware_pack_begin.h" -struct SVGAGuestMemDescriptor { - uint32 ppn; - uint32 numPages; -} -#include "vmware_pack_end.h" -SVGAGuestMemDescriptor; - -typedef -#include "vmware_pack_begin.h" -struct SVGAGuestPtr { - uint32 gmrId; - uint32 offset; -} -#include "vmware_pack_end.h" -SVGAGuestPtr; - -/* - * Register based command buffers -- - * - * Provide an SVGA device interface that allows the guest to submit - * command buffers to the SVGA device through an SVGA device register. - * The metadata for each command buffer is contained in the - * SVGACBHeader structure along with the return status codes. - * - * The SVGA device supports command buffers if - * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register. The - * fifo must be enabled for command buffers to be submitted. - * - * Command buffers are submitted when the guest writing the 64 byte - * aligned physical address into the SVGA_REG_COMMAND_LOW and - * SVGA_REG_COMMAND_HIGH. SVGA_REG_COMMAND_HIGH contains the upper 32 - * bits of the physical address. SVGA_REG_COMMAND_LOW contains the - * lower 32 bits of the physical address, since the command buffer - * headers are required to be 64 byte aligned the lower 6 bits are - * used for the SVGACBContext value. Writing to SVGA_REG_COMMAND_LOW - * submits the command buffer to the device and queues it for - * execution. The SVGA device supports at least - * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued - * per context and if that limit is reached the device will write the - * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command - * buffer header synchronously and not raise any IRQs. - * - * It is invalid to submit a command buffer without a valid physical - * address and results are undefined. - * - * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE - * will be supported. If a larger command buffer is submitted results - * are unspecified and the device will either complete the command - * buffer or return an error. - * - * The device guarantees that any individual command in a command - * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is - * enough to fit a 64x64 color-cursor definition. If the command is - * too large the device is allowed to process the command or return an - * error. - * - * The device context is a special SVGACBContext that allows for - * synchronous register like accesses with the flexibility of - * commands. There is a different command set defined by - * SVGADeviceContextCmdId. The commands in each command buffer is not - * allowed to straddle physical pages. - * - * The offset field which is available starting with the - * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the - * start of command processing into the buffer. If an error is - * encountered the errorOffset will still be relative to the specific - * PA, not biased by the offset. When the command buffer is finished - * the guest should not read the offset field as there is no guarantee - * what it will set to. - * - * When the SVGA_CAP_HP_CMD_QUEUE cap bit is set a new command queue - * SVGA_CB_CONTEXT_1 is available. Commands submitted to this queue - * will be executed as quickly as possible by the SVGA device - * potentially before already queued commands on SVGA_CB_CONTEXT_0. - * The SVGA device guarantees that any command buffers submitted to - * SVGA_CB_CONTEXT_0 will be executed after any _already_ submitted - * command buffers to SVGA_CB_CONTEXT_1. - */ - -#define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */ +#define SVGA_GMR_NULL ((uint32)-1) +#define SVGA_GMR_FRAMEBUFFER ((uint32)-2) + +#pragma pack(push, 1) +typedef struct SVGAGuestMemDescriptor { + uint32 ppn; + uint32 numPages; +} SVGAGuestMemDescriptor; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct SVGAGuestPtr { + uint32 gmrId; + uint32 offset; +} SVGAGuestPtr; +#pragma pack(pop) + +#define SVGA_CB_MAX_SIZE_DEFAULT (KBYTES_2_BYTES(512)) +#define SVGA_CB_MAX_SIZE_4MB (MBYTES_2_BYTES(4)) +#define SVGA_CB_MAX_SIZE SVGA_CB_MAX_SIZE_4MB #define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32 -#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */ +#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) #define SVGA_CB_CONTEXT_MASK 0x3f typedef enum { - SVGA_CB_CONTEXT_DEVICE = 0x3f, - SVGA_CB_CONTEXT_0 = 0x0, - SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */ - SVGA_CB_CONTEXT_MAX = 0x2, + SVGA_CB_CONTEXT_DEVICE = 0x3f, + SVGA_CB_CONTEXT_0 = 0x0, + SVGA_CB_CONTEXT_1 = 0x1, + SVGA_CB_CONTEXT_MAX = 0x2, } SVGACBContext; - typedef enum { - /* - * The guest is supposed to write SVGA_CB_STATUS_NONE to the status - * field before submitting the command buffer header, the host will - * change the value when it is done with the command buffer. - */ - SVGA_CB_STATUS_NONE = 0, - - /* - * Written by the host when a command buffer completes successfully. - * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless - * the SVGA_CB_FLAG_NO_IRQ flag is set. - */ - SVGA_CB_STATUS_COMPLETED = 1, - - /* - * Written by the host synchronously with the command buffer - * submission to indicate the command buffer was not submitted. No - * IRQ is raised. - */ - SVGA_CB_STATUS_QUEUE_FULL = 2, - - /* - * Written by the host when an error was detected parsing a command - * in the command buffer, errorOffset is written to contain the - * offset to the first byte of the failing command. The device - * raises the IRQ with both SVGA_IRQFLAG_ERROR and - * SVGA_IRQFLAG_COMMAND_BUFFER. Some of the commands may have been - * processed. - */ - SVGA_CB_STATUS_COMMAND_ERROR = 3, - - /* - * Written by the host if there is an error parsing the command - * buffer header. The device raises the IRQ with both - * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER. The device - * did not processes any of the command buffer. - */ - SVGA_CB_STATUS_CB_HEADER_ERROR = 4, - - /* - * Written by the host if the guest requested the host to preempt - * the command buffer. The device will not raise any IRQs and the - * command buffer was not processed. - */ - SVGA_CB_STATUS_PREEMPTED = 5, - - /* - * Written by the host synchronously with the command buffer - * submission to indicate the the command buffer was not submitted - * due to an error. No IRQ is raised. - */ - SVGA_CB_STATUS_SUBMISSION_ERROR = 6, - - /* - * Written by the host when the host finished a - * SVGA_DC_CMD_ASYNC_STOP_QUEUE request for this command buffer - * queue. The offset of the first byte not processed is stored in - * the errorOffset field of the command buffer header. All guest - * visible side effects of commands till that point are guaranteed - * to be finished before this is written. The - * SVGA_IRQFLAG_COMMAND_BUFFER IRQ is raised as long as the - * SVGA_CB_FLAG_NO_IRQ is not set. - */ - SVGA_CB_STATUS_PARTIAL_COMPLETE = 7, + + SVGA_CB_STATUS_NONE = 0, + + SVGA_CB_STATUS_COMPLETED = 1, + + SVGA_CB_STATUS_QUEUE_FULL = 2, + + SVGA_CB_STATUS_COMMAND_ERROR = 3, + + SVGA_CB_STATUS_CB_HEADER_ERROR = 4, + + SVGA_CB_STATUS_PREEMPTED = 5, + + SVGA_CB_STATUS_SUBMISSION_ERROR = 6, + + SVGA_CB_STATUS_PARTIAL_COMPLETE = 7, } SVGACBStatus; typedef enum { - SVGA_CB_FLAG_NONE = 0, - SVGA_CB_FLAG_NO_IRQ = 1 << 0, - SVGA_CB_FLAG_DX_CONTEXT = 1 << 1, - SVGA_CB_FLAG_MOB = 1 << 2, + SVGA_CB_FLAG_NONE = 0, + SVGA_CB_FLAG_NO_IRQ = 1 << 0, + SVGA_CB_FLAG_DX_CONTEXT = 1 << 1, + SVGA_CB_FLAG_MOB = 1 << 2, } SVGACBFlags; -typedef -#include "vmware_pack_begin.h" -struct { - volatile SVGACBStatus status; /* Modified by device. */ - volatile uint32 errorOffset; /* Modified by device. */ - uint64 id; - SVGACBFlags flags; - uint32 length; - union { - PA pa; - struct { - SVGAMobId mobid; - uint32 mobOffset; - } mob; - } ptr; - uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise, - * modified by device. - */ - uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */ - uint32 mustBeZero[6]; -} -#include "vmware_pack_end.h" -SVGACBHeader; +#pragma pack(push, 1) +typedef struct { + volatile SVGACBStatus status; + volatile uint32 errorOffset; + uint64 id; + SVGACBFlags flags; + uint32 length; + union { + PA pa; + struct { + SVGAMobId mobid; + uint32 mobOffset; + } mob; + } ptr; + uint32 offset; + uint32 dxContext; + uint32 mustBeZero[6]; +} SVGACBHeader; +#pragma pack(pop) typedef enum { - SVGA_DC_CMD_NOP = 0, - SVGA_DC_CMD_START_STOP_CONTEXT = 1, - SVGA_DC_CMD_PREEMPT = 2, - SVGA_DC_CMD_START_QUEUE = 3, /* Requires SVGA_CAP_HP_CMD_QUEUE */ - SVGA_DC_CMD_ASYNC_STOP_QUEUE = 4, /* Requires SVGA_CAP_HP_CMD_QUEUE */ - SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE = 5, /* Requires SVGA_CAP_HP_CMD_QUEUE */ - SVGA_DC_CMD_MAX = 6, + SVGA_DC_CMD_NOP = 0, + SVGA_DC_CMD_START_STOP_CONTEXT = 1, + SVGA_DC_CMD_PREEMPT = 2, + SVGA_DC_CMD_START_QUEUE = 3, + SVGA_DC_CMD_ASYNC_STOP_QUEUE = 4, + SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE = 5, + SVGA_DC_CMD_MAX = 6 } SVGADeviceContextCmdId; -/* - * Starts or stops both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1. - */ - typedef struct SVGADCCmdStartStop { - uint32 enable; - SVGACBContext context; /* Must be zero */ + uint32 enable; + SVGACBContext context; } SVGADCCmdStartStop; -/* - * SVGADCCmdPreempt -- - * - * This command allows the guest to request that all command buffers - * on SVGA_CB_CONTEXT_0 be preempted that can be. After execution - * of this command all command buffers that were preempted will - * already have SVGA_CB_STATUS_PREEMPTED written into the status - * field. The device might still be processing a command buffer, - * assuming execution of it started before the preemption request was - * received. Specifying the ignoreIDZero flag to TRUE will cause the - * device to not preempt command buffers with the id field in the - * command buffer header set to zero. - */ - typedef struct SVGADCCmdPreempt { - SVGACBContext context; /* Must be zero */ - uint32 ignoreIDZero; + SVGACBContext context; + uint32 ignoreIDZero; } SVGADCCmdPreempt; -/* - * Starts the requested command buffer processing queue. Valid only - * if the SVGA_CAP_HP_CMD_QUEUE cap is set. - * - * For a command queue to be considered runnable it must be enabled - * and any corresponding higher priority queues must also be enabled. - * For example in order for command buffers to be processed on - * SVGA_CB_CONTEXT_0 both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1 must - * be enabled. But for commands to be runnable on SVGA_CB_CONTEXT_1 - * only that queue must be enabled. - */ - typedef struct SVGADCCmdStartQueue { - SVGACBContext context; + SVGACBContext context; } SVGADCCmdStartQueue; -/* - * Requests the SVGA device to stop processing the requested command - * buffer queue as soon as possible. The guest knows the stop has - * completed when one of the following happens. - * - * 1) A command buffer status of SVGA_CB_STATUS_PARTIAL_COMPLETE is returned - * 2) A command buffer error is encountered with would stop the queue - * regardless of the async stop request. - * 3) All command buffers that have been submitted complete successfully. - * 4) The stop completes synchronously if no command buffers are - * active on the queue when it is issued. - * - * If the command queue is not in a runnable state there is no - * guarentee this async stop will finish. For instance if the high - * priority queue is not enabled and a stop is requested on the low - * priority queue, the high priority queue must be reenabled to - * guarantee that the async stop will finish. - * - * This command along with SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE can be used - * to implement mid command buffer preemption. - * - * Valid only if the SVGA_CAP_HP_CMD_QUEUE cap is set. - */ - typedef struct SVGADCCmdAsyncStopQueue { - SVGACBContext context; + SVGACBContext context; } SVGADCCmdAsyncStopQueue; -/* - * Requests the SVGA device to throw away any full command buffers on - * the requested command queue that have not been started. For a - * driver to know which command buffers were thrown away a driver - * should only issue this command when the queue is stopped, for - * whatever reason. - */ - typedef struct SVGADCCmdEmptyQueue { - SVGACBContext context; + SVGACBContext context; } SVGADCCmdEmptyQueue; - -/* - * SVGAGMRImageFormat -- - * - * This is a packed representation of the source 2D image format - * for a GMR-to-screen blit. Currently it is defined as an encoding - * of the screen's color depth and bits-per-pixel, however, 16 bits - * are reserved for future use to identify other encodings (such as - * RGBA or higher-precision images). - * - * Currently supported formats: - * - * bpp depth Format Name - * --- ----- ----------- - * 32 24 32-bit BGRX - * 24 24 24-bit BGR - * 16 16 RGB 5-6-5 - * 16 15 RGB 5-5-5 - * - */ - typedef struct SVGAGMRImageFormat { - union { - struct { - uint32 bitsPerPixel : 8; - uint32 colorDepth : 8; - uint32 reserved : 16; /* Must be zero */ - }; - - uint32 value; - }; + union { + struct { + uint32 bitsPerPixel : 8; + uint32 colorDepth : 8; + uint32 reserved : 16; + }; + + uint32 value; + }; } SVGAGMRImageFormat; -typedef -#include "vmware_pack_begin.h" -struct SVGAGuestImage { - SVGAGuestPtr ptr; - - /* - * A note on interpretation of pitch: This value of pitch is the - * number of bytes between vertically adjacent image - * blocks. Normally this is the number of bytes between the first - * pixel of two adjacent scanlines. With compressed textures, - * however, this may represent the number of bytes between - * compression blocks rather than between rows of pixels. - * - * XXX: Compressed textures currently must be tightly packed in guest memory. - * - * If the image is 1-dimensional, pitch is ignored. - * - * If 'pitch' is zero, the SVGA3D device calculates a pitch value - * assuming each row of blocks is tightly packed. - */ - uint32 pitch; -} -#include "vmware_pack_end.h" -SVGAGuestImage; +#pragma pack(push, 1) +typedef struct SVGAGuestImage { + SVGAGuestPtr ptr; -/* - * SVGAColorBGRX -- - * - * A 24-bit color format (BGRX), which does not depend on the - * format of the legacy guest framebuffer (GFB) or the current - * GMRFB state. - */ + uint32 pitch; +} SVGAGuestImage; +#pragma pack(pop) typedef struct SVGAColorBGRX { - union { - struct { - uint32 b : 8; - uint32 g : 8; - uint32 r : 8; - uint32 x : 8; /* Unused */ - }; - - uint32 value; - }; + union { + struct { + uint32 b : 8; + uint32 g : 8; + uint32 r : 8; + uint32 x : 8; + }; + + uint32 value; + }; } SVGAColorBGRX; - -/* - * SVGASignedRect -- - * SVGASignedPoint -- - * - * Signed rectangle and point primitives. These are used by the new - * 2D primitives for drawing to Screen Objects, which can occupy a - * signed virtual coordinate space. - * - * SVGASignedRect specifies a half-open interval: the (left, top) - * pixel is part of the rectangle, but the (right, bottom) pixel is - * not. - */ - -typedef -#include "vmware_pack_begin.h" -struct { - int32 left; - int32 top; - int32 right; - int32 bottom; -} -#include "vmware_pack_end.h" -SVGASignedRect; - -typedef -#include "vmware_pack_begin.h" -struct { - int32 x; - int32 y; -} -#include "vmware_pack_end.h" -SVGASignedPoint; - - -/* - * SVGA Device Capabilities - * - * Note the holes in the bitfield. Missing bits have been deprecated, - * and must not be reused. Those capabilities will never be reported - * by new versions of the SVGA device. - * - * SVGA_CAP_IRQMASK -- - * Provides device interrupts. Adds device register SVGA_REG_IRQMASK - * to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to - * set/clear pending interrupts. - * - * SVGA_CAP_GMR -- - * Provides synchronous mapping of guest memory regions (GMR). - * Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR, - * SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH. - * - * SVGA_CAP_TRACES -- - * Allows framebuffer trace-based updates even when FIFO is enabled. - * Adds device register SVGA_REG_TRACES. - * - * SVGA_CAP_GMR2 -- - * Provides asynchronous commands to define and remap guest memory - * regions. Adds device registers SVGA_REG_GMRS_MAX_PAGES and - * SVGA_REG_MEMORY_SIZE. - * - * SVGA_CAP_SCREEN_OBJECT_2 -- - * Allow screen object support, and require backing stores from the - * guest for each screen object. - * - * SVGA_CAP_COMMAND_BUFFERS -- - * Enable register based command buffer submission. - * - * SVGA_CAP_DEAD1 -- - * This cap was incorrectly used by old drivers and should not be - * reused. - * - * SVGA_CAP_CMD_BUFFERS_2 -- - * Enable support for the prepend command buffer submision - * registers. SVGA_REG_CMD_PREPEND_LOW and - * SVGA_REG_CMD_PREPEND_HIGH. - * - * SVGA_CAP_GBOBJECTS -- - * Enable guest-backed objects and surfaces. - * - * SVGA_CAP_DX -- - * Enable support for DX commands, and command buffers in a mob. - * - * SVGA_CAP_HP_CMD_QUEUE -- - * Enable support for the high priority command queue, and the - * ScreenCopy command. - * - * SVGA_CAP_NO_BB_RESTRICTION -- - * Allow ScreenTargets to be defined without regard to the 32-bpp - * bounding-box memory restrictions. ie: - * - * The summed memory usage of all screens (assuming they were defined as - * 32-bpp) must always be less than the value of the - * SVGA_REG_MAX_PRIMARY_MEM register. - * - * If this cap is not present, the 32-bpp bounding box around all screens - * must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM - * register. - * - * If the cap is present, the bounding box restriction is lifted (and only - * the screen-sum limit applies). - * - * (Note that this is a slight lie... there is still a sanity limit on any - * dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even - * when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be - * large enough to express any possible topology without holes between - * monitors.) - * - * SVGA_CAP_CAP2_REGISTER -- - * If this cap is present, the SVGA_REG_CAP2 register is supported. - */ - -#define SVGA_CAP_NONE 0x00000000 -#define SVGA_CAP_RECT_COPY 0x00000002 -#define SVGA_CAP_CURSOR 0x00000020 -#define SVGA_CAP_CURSOR_BYPASS 0x00000040 -#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 -#define SVGA_CAP_8BIT_EMULATION 0x00000100 -#define SVGA_CAP_ALPHA_CURSOR 0x00000200 -#define SVGA_CAP_3D 0x00004000 -#define SVGA_CAP_EXTENDED_FIFO 0x00008000 -#define SVGA_CAP_MULTIMON 0x00010000 -#define SVGA_CAP_PITCHLOCK 0x00020000 -#define SVGA_CAP_IRQMASK 0x00040000 -#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 -#define SVGA_CAP_GMR 0x00100000 -#define SVGA_CAP_TRACES 0x00200000 -#define SVGA_CAP_GMR2 0x00400000 -#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 -#define SVGA_CAP_COMMAND_BUFFERS 0x01000000 -#define SVGA_CAP_DEAD1 0x02000000 -#define SVGA_CAP_CMD_BUFFERS_2 0x04000000 -#define SVGA_CAP_GBOBJECTS 0x08000000 -#define SVGA_CAP_DX 0x10000000 -#define SVGA_CAP_HP_CMD_QUEUE 0x20000000 -#define SVGA_CAP_NO_BB_RESTRICTION 0x40000000 -#define SVGA_CAP_CAP2_REGISTER 0x80000000 - -/* - * The SVGA_REG_CAP2 register is an additional set of SVGA capability bits. - * - * SVGA_CAP2_GROW_OTABLE -- - * Allow the GrowOTable/DXGrowCOTable commands. - * - * SVGA_CAP2_INTRA_SURFACE_COPY -- - * Allow the IntraSurfaceCopy command. - * - * SVGA_CAP2_DX2 -- - * Allow the DefineGBSurface_v3, WholeSurfaceCopy, WriteZeroSurface, and - * HintZeroSurface commands, and the SVGA_REG_GUEST_DRIVER_ID register. - * - * SVGA_CAP2_GB_MEMSIZE_2 -- - * Allow the SVGA_REG_GBOBJECT_MEM_SIZE_KB register. - * - * SVGA_CAP2_SCREENDMA_REG -- - * Allow the SVGA_REG_SCREENDMA register. - * - * SVGA_CAP2_OTABLE_PTDEPTH_2 -- - * Allow 2 level page tables for OTable commands. - * - * SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT -- - * Allow a stretch blt from a non-multisampled surface to a multisampled - * surface. - * - * SVGA_CAP2_CURSOR_MOB -- - * Allow the SVGA_REG_CURSOR_MOBID register. - * - * SVGA_CAP2_MSHINT -- - * Allow the SVGA_REG_MSHINT register. - * - * SVGA_CAP2_DX3 -- - * Allows the DefineGBSurface_v4 command. - * Allows the DXDefineDepthStencilView_v2, DXDefineStreamOutputWithMob, - * and DXBindStreamOutput commands if 3D is also available. - * Allows the DXPredStagingCopy and DXStagingCopy commands if SM41 - * is also available. - * - * SVGA_CAP2_RESERVED -- - * Reserve the last bit for extending the SVGA capabilities to some - * future mechanisms. - */ -#define SVGA_CAP2_NONE 0x00000000 -#define SVGA_CAP2_GROW_OTABLE 0x00000001 -#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002 -#define SVGA_CAP2_DX2 0x00000004 -#define SVGA_CAP2_GB_MEMSIZE_2 0x00000008 -#define SVGA_CAP2_SCREENDMA_REG 0x00000010 -#define SVGA_CAP2_OTABLE_PTDEPTH_2 0x00000020 +#pragma pack(push, 1) +typedef struct { + int32 left; + int32 top; + int32 right; + int32 bottom; +} SVGASignedRect; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + int32 x; + int32 y; +} SVGASignedPoint; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 x; + uint32 y; +} SVGAUnsignedPoint; +#pragma pack(pop) + +#define SVGA_CAP_NONE 0x00000000 +#define SVGA_CAP_RECT_COPY 0x00000002 +#define SVGA_CAP_CURSOR 0x00000020 +#define SVGA_CAP_CURSOR_BYPASS 0x00000040 +#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 +#define SVGA_CAP_8BIT_EMULATION 0x00000100 +#define SVGA_CAP_ALPHA_CURSOR 0x00000200 +#define SVGA_CAP_3D 0x00004000 +#define SVGA_CAP_EXTENDED_FIFO 0x00008000 +#define SVGA_CAP_MULTIMON 0x00010000 +#define SVGA_CAP_PITCHLOCK 0x00020000 +#define SVGA_CAP_IRQMASK 0x00040000 +#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 +#define SVGA_CAP_GMR 0x00100000 +#define SVGA_CAP_TRACES 0x00200000 +#define SVGA_CAP_GMR2 0x00400000 +#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 +#define SVGA_CAP_COMMAND_BUFFERS 0x01000000 +#define SVGA_CAP_DEAD1 0x02000000 +#define SVGA_CAP_CMD_BUFFERS_2 0x04000000 +#define SVGA_CAP_GBOBJECTS 0x08000000 +#define SVGA_CAP_DX 0x10000000 +#define SVGA_CAP_HP_CMD_QUEUE 0x20000000 +#define SVGA_CAP_NO_BB_RESTRICTION 0x40000000 +#define SVGA_CAP_CAP2_REGISTER 0x80000000 + +#define SVGA_CAP2_NONE 0x00000000 +#define SVGA_CAP2_GROW_OTABLE 0x00000001 +#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002 +#define SVGA_CAP2_DX2 0x00000004 +#define SVGA_CAP2_GB_MEMSIZE_2 0x00000008 +#define SVGA_CAP2_SCREENDMA_REG 0x00000010 +#define SVGA_CAP2_OTABLE_PTDEPTH_2 0x00000020 #define SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT 0x00000040 -#define SVGA_CAP2_CURSOR_MOB 0x00000080 -#define SVGA_CAP2_MSHINT 0x00000100 -#define SVGA_CAP2_DX3 0x00000400 -#define SVGA_CAP2_RESERVED 0x80000000 - - -/* - * The Guest can optionally read some SVGA device capabilities through - * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before - * the SVGA device is initialized. The type of capability the guest - * is requesting from the SVGABackdoorCapType enum should be placed in - * the upper 16 bits of the backdoor command id (ECX). On success the - * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to - * the requested capability. If the command is not supported then EBX - * will be left unchanged and EAX will be set to -1. Because it is - * possible that -1 is the value of the requested cap the correct way - * to check if the command was successful is to check if EBX was changed - * to BDOOR_MAGIC making sure to initialize the register to something - * else first. - */ +#define SVGA_CAP2_CURSOR_MOB 0x00000080 +#define SVGA_CAP2_MSHINT 0x00000100 +#define SVGA_CAP2_CB_MAX_SIZE_4MB 0x00000200 +#define SVGA_CAP2_DX3 0x00000400 +#define SVGA_CAP2_FRAME_TYPE 0x00000800 +#define SVGA_CAP2_COTABLE_COPY 0x00001000 +#define SVGA_CAP2_TRACE_FULL_FB 0x00002000 +#define SVGA_CAP2_EXTRA_REGS 0x00004000 +#define SVGA_CAP2_LO_STAGING 0x00008000 +#define SVGA_CAP2_RESERVED 0x80000000 typedef enum { - SVGABackdoorCapDeviceCaps = 0, - SVGABackdoorCapFifoCaps = 1, - SVGABackdoorCap3dHWVersion = 2, - SVGABackdoorCapDeviceCaps2 = 3, - SVGABackdoorCapDevelCaps = 4, - SVGABackdoorDevelRenderer = 5, - SVGABackdoorCapMax = 6, + SVGABackdoorCapDeviceCaps = 0, + SVGABackdoorCapFifoCaps = 1, + SVGABackdoorCap3dHWVersion = 2, + SVGABackdoorCapDeviceCaps2 = 3, + SVGABackdoorCapDevelCaps = 4, + SVGABackdoorDevelRenderer = 5, + SVGABackdoorDevelUsingISB = 6, + SVGABackdoorCapMax = 7, } SVGABackdoorCapType; - -/* - * FIFO register indices. - * - * The FIFO is a chunk of device memory mapped into guest physmem. It - * is always treated as 32-bit words. - * - * The guest driver gets to decide how to partition it between - * - FIFO registers (there are always at least 4, specifying where the - * following data area is and how much data it contains; there may be - * more registers following these, depending on the FIFO protocol - * version in use) - * - FIFO data, written by the guest and slurped out by the VMX. - * These indices are 32-bit word offsets into the FIFO. - */ - enum { - /* - * Block 1 (basic registers): The originally defined FIFO registers. - * These exist and are valid for all versions of the FIFO protocol. - */ - - SVGA_FIFO_MIN = 0, - SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */ - SVGA_FIFO_NEXT_CMD, - SVGA_FIFO_STOP, - - /* - * Block 2 (extended registers): Mandatory registers for the extended - * FIFO. These exist if the SVGA caps register includes - * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their - * associated capability bit is enabled. - * - * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied - * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE. - * This means that the guest has to test individually (in most cases - * using FIFO caps) for the presence of registers after this; the VMX - * can define "extended FIFO" to mean whatever it wants, and currently - * won't enable it unless there's room for that set and much more. - */ - - SVGA_FIFO_CAPABILITIES = 4, - SVGA_FIFO_FLAGS, - /* Valid with SVGA_FIFO_CAP_FENCE: */ - SVGA_FIFO_FENCE, - - /* - * Block 3a (optional extended registers): Additional registers for the - * extended FIFO, whose presence isn't actually implied by - * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to - * leave room for them. - * - * These in block 3a, the VMX currently considers mandatory for the - * extended FIFO. - */ - - /* Valid if exists (i.e. if extended FIFO enabled): */ - SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */ - /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */ - SVGA_FIFO_PITCHLOCK, - - /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */ - SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */ - SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */ - SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */ - SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */ - SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */ - - /* Valid with SVGA_FIFO_CAP_RESERVE: */ - SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */ - - /* - * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2: - * - * By default this is SVGA_ID_INVALID, to indicate that the cursor - * coordinates are specified relative to the virtual root. If this - * is set to a specific screen ID, cursor position is reinterpreted - * as a signed offset relative to that screen's origin. - */ - SVGA_FIFO_CURSOR_SCREEN_ID, - - /* - * Valid with SVGA_FIFO_CAP_DEAD - * - * An arbitrary value written by the host, drivers should not use it. - */ - SVGA_FIFO_DEAD, - - /* - * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED: - * - * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h) - * on platforms that can enforce graphics resource limits. - */ - SVGA_FIFO_3D_HWVERSION_REVISED, - - /* - * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new - * registers, but this must be done carefully and with judicious use of - * capability bits, since comparisons based on SVGA_FIFO_MIN aren't - * enough to tell you whether the register exists: we've shipped drivers - * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of - * the earlier ones. The actual order of introduction was: - * - PITCHLOCK - * - 3D_CAPS - * - CURSOR_* (cursor bypass 3) - * - RESERVED - * So, code that wants to know whether it can use any of the - * aforementioned registers, or anything else added after PITCHLOCK and - * before 3D_CAPS, needs to reason about something other than - * SVGA_FIFO_MIN. - */ - - /* - * 3D caps block space; valid with 3D hardware version >= - * SVGA3D_HWVERSION_WS6_B1. - */ - SVGA_FIFO_3D_CAPS = 32, - SVGA_FIFO_3D_CAPS_LAST = 32 + 255, - - /* - * End of VMX's current definition of "extended-FIFO registers". - * Registers before here are always enabled/disabled as a block; either - * the extended FIFO is enabled and includes all preceding registers, or - * it's disabled entirely. - * - * Block 3b (truly optional extended registers): Additional registers for - * the extended FIFO, which the VMX already knows how to enable and - * disable with correct granularity. - * - * Registers after here exist if and only if the guest SVGA driver - * sets SVGA_FIFO_MIN high enough to leave room for them. - */ - - /* Valid if register exists: */ - SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */ - SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */ - SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */ - - /* - * Always keep this last. This defines the maximum number of - * registers we know about. At power-on, this value is placed in - * the SVGA_REG_MEM_REGS register, and we expect the guest driver - * to allocate this much space in FIFO memory for registers. - */ - SVGA_FIFO_NUM_REGS -}; + SVGA_FIFO_MIN = 0, + SVGA_FIFO_MAX, + SVGA_FIFO_NEXT_CMD, + SVGA_FIFO_STOP, -/* - * Definition of registers included in extended FIFO support. - * - * The guest SVGA driver gets to allocate the FIFO between registers - * and data. It must always allocate at least 4 registers, but old - * drivers stopped there. - * - * The VMX will enable extended FIFO support if and only if the guest - * left enough room for all registers defined as part of the mandatory - * set for the extended FIFO. - * - * Note that the guest drivers typically allocate the FIFO only at - * initialization time, not at mode switches, so it's likely that the - * number of FIFO registers won't change without a reboot. - * - * All registers less than this value are guaranteed to be present if - * svgaUser->fifo.extended is set. Any later registers must be tested - * individually for compatibility at each use (in the VMX). - * - * This value is used only by the VMX, so it can change without - * affecting driver compatibility; keep it that way? - */ -#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1) + SVGA_FIFO_CAPABILITIES = 4, + SVGA_FIFO_FLAGS, + SVGA_FIFO_FENCE, -/* - * FIFO Synchronization Registers - * - * SVGA_REG_SYNC -- - * - * The SYNC register can be used by the guest driver to signal to the - * device that the guest driver is waiting for previously submitted - * commands to complete. - * - * When the guest driver writes to the SYNC register, the device sets - * the BUSY register to TRUE, and starts processing the submitted commands - * (if it was not already doing so). When all previously submitted - * commands are finished and the device is idle again, it sets the BUSY - * register back to FALSE. (If the guest driver submits new commands - * after writing the SYNC register, the new commands are not guaranteed - * to have been procesesd.) - * - * When guest drivers are submitting commands using the FIFO, the device - * periodically polls to check for new FIFO commands when idle, which may - * introduce a delay in command processing. If the guest-driver wants - * the commands to be processed quickly (which it typically does), it - * should write SYNC after each batch of commands is committed to the - * FIFO to immediately wake up the device. For even better performance, - * the guest can use the SVGA_FIFO_BUSY register to avoid these extra - * SYNC writes if the device is already active, using the technique known - * as "Ringing the Doorbell" (described below). (Note that command - * buffer submission implicitly wakes up the device, and so doesn't - * suffer from this problem.) - * - * The SYNC register can also be used in combination with BUSY to - * synchronously ensure that all SVGA commands are processed (with both - * the FIFO and command-buffers). To do this, the guest driver should - * write to SYNC, and then loop reading BUSY until BUSY returns FALSE. - * This technique is known as a "Legacy Sync". - * - * SVGA_REG_BUSY -- - * - * This register is set to TRUE when SVGA_REG_SYNC is written, - * and is set back to FALSE when the device has finished processing - * all commands and is idle again. - * - * Every read from the BUSY reigster will block for an undefined - * amount of time (normally until the device finishes some interesting - * work unit), or the device is idle. - * - * Guest drivers can also do a partial Legacy Sync to check for some - * particular condition, for instance by stopping early when a fence - * passes before BUSY has been set back to FALSE. This is particularly - * useful if the guest-driver knows that it is blocked waiting on the - * device, because it will yield CPU time back to the host. - * - * SVGA_FIFO_BUSY -- - * - * The SVGA_FIFO_BUSY register is a fast way for the guest driver to check - * whether the device is actively processing FIFO commands before writing - * the more expensive SYNC register. - * - * If this register reads as TRUE, the device is actively processing - * FIFO commands. - * - * If this register reads as FALSE, the device may not be actively - * processing commands, and the guest driver should try - * "Ringing the Doorbell". - * - * To Ring the Doorbell, the guest should: - * - * 1. Have already written their batch of commands into the FIFO. - * 2. Check if the SVGA_FIFO_BUSY register is available by reading - * SVGA_FIFO_MIN. - * 3. Read SVGA_FIFO_BUSY. If it reads as TRUE, the device is actively - * processing FIFO commands, and no further action is necessary. - * 4. If SVGA_FIFO_BUSY was FALSE, write TRUE to SVGA_REG_SYNC. - * - * For maximum performance, this procedure should be followed after - * every meaningful batch of commands has been written into the FIFO. - * (Normally when the underlying application signals it's finished a - * meaningful work unit by calling Flush.) - */ + SVGA_FIFO_3D_HWVERSION, + SVGA_FIFO_PITCHLOCK, -/* - * FIFO Capabilities - * - * Fence -- Fence register and command are supported - * Accel Front -- Front buffer only commands are supported - * Pitch Lock -- Pitch lock register is supported - * Video -- SVGA Video overlay units are supported - * Escape -- Escape command is supported - * - * SVGA_FIFO_CAP_SCREEN_OBJECT -- - * - * Provides dynamic multi-screen rendering, for improved Unity and - * multi-monitor modes. With Screen Object, the guest can - * dynamically create and destroy 'screens', which can represent - * Unity windows or virtual monitors. Screen Object also provides - * strong guarantees that DMA operations happen only when - * guest-initiated. Screen Object deprecates the BAR1 guest - * framebuffer (GFB) and all commands that work only with the GFB. - * - * New registers: - * FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID - * - * New 2D commands: - * DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN, - * BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY - * - * New 3D commands: - * BLIT_SURFACE_TO_SCREEN - * - * New guarantees: - * - * - The host will not read or write guest memory, including the GFB, - * except when explicitly initiated by a DMA command. - * - * - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK, - * is guaranteed to complete before any subsequent FENCEs. - * - * - All legacy commands which affect a Screen (UPDATE, PRESENT, - * PRESENT_READBACK) as well as new Screen blit commands will - * all behave consistently as blits, and memory will be read - * or written in FIFO order. - * - * For example, if you PRESENT from one SVGA3D surface to multiple - * places on the screen, the data copied will always be from the - * SVGA3D surface at the time the PRESENT was issued in the FIFO. - * This was not necessarily true on devices without Screen Object. - * - * This means that on devices that support Screen Object, the - * PRESENT_READBACK command should not be necessary unless you - * actually want to read back the results of 3D rendering into - * system memory. (And for that, the BLIT_SCREEN_TO_GMRFB - * command provides a strict superset of functionality.) - * - * - When a screen is resized, either using Screen Object commands or - * legacy multimon registers, its contents are preserved. - * - * SVGA_FIFO_CAP_GMR2 -- - * - * Provides new commands to define and remap guest memory regions (GMR). - * - * New 2D commands: - * DEFINE_GMR2, REMAP_GMR2. - * - * SVGA_FIFO_CAP_3D_HWVERSION_REVISED -- - * - * Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists. - * This register may replace SVGA_FIFO_3D_HWVERSION on platforms - * that enforce graphics resource limits. This allows the platform - * to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest - * drivers that do not limit their resources. - * - * Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators - * are codependent (and thus we use a single capability bit). - * - * SVGA_FIFO_CAP_SCREEN_OBJECT_2 -- - * - * Modifies the DEFINE_SCREEN command to include a guest provided - * backing store in GMR memory and the bytesPerLine for the backing - * store. This capability requires the use of a backing store when - * creating screen objects. However if SVGA_FIFO_CAP_SCREEN_OBJECT - * is present then backing stores are optional. - * - * SVGA_FIFO_CAP_DEAD -- - * - * Drivers should not use this cap bit. This cap bit can not be - * reused since some hosts already expose it. - */ + SVGA_FIFO_CURSOR_ON, + SVGA_FIFO_CURSOR_X, + SVGA_FIFO_CURSOR_Y, + SVGA_FIFO_CURSOR_COUNT, + SVGA_FIFO_CURSOR_LAST_UPDATED, -#define SVGA_FIFO_CAP_NONE 0 -#define SVGA_FIFO_CAP_FENCE (1<<0) -#define SVGA_FIFO_CAP_ACCELFRONT (1<<1) -#define SVGA_FIFO_CAP_PITCHLOCK (1<<2) -#define SVGA_FIFO_CAP_VIDEO (1<<3) -#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4) -#define SVGA_FIFO_CAP_ESCAPE (1<<5) -#define SVGA_FIFO_CAP_RESERVE (1<<6) -#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7) -#define SVGA_FIFO_CAP_GMR2 (1<<8) -#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED SVGA_FIFO_CAP_GMR2 -#define SVGA_FIFO_CAP_SCREEN_OBJECT_2 (1<<9) -#define SVGA_FIFO_CAP_DEAD (1<<10) + SVGA_FIFO_RESERVED, + SVGA_FIFO_CURSOR_SCREEN_ID, -/* - * FIFO Flags - * - * Accel Front -- Driver should use front buffer only commands - */ + SVGA_FIFO_DEAD, -#define SVGA_FIFO_FLAG_NONE 0 -#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0) -#define SVGA_FIFO_FLAG_RESERVED (1<<31) /* Internal use only */ + SVGA_FIFO_3D_HWVERSION_REVISED, -/* - * FIFO reservation sentinel value - */ + SVGA_FIFO_3D_CAPS = 32, + SVGA_FIFO_3D_CAPS_LAST = 32 + 255, -#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff + SVGA_FIFO_GUEST_3D_HWVERSION, + SVGA_FIFO_FENCE_GOAL, + SVGA_FIFO_BUSY, + SVGA_FIFO_NUM_REGS +}; -/* - * ScreenDMA Register Values - */ +#define SVGA_FIFO_3D_CAPS_SIZE (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) -#define SVGA_SCREENDMA_REG_UNDEFINED 0 -#define SVGA_SCREENDMA_REG_NOT_PRESENT 1 -#define SVGA_SCREENDMA_REG_PRESENT 2 -#define SVGA_SCREENDMA_REG_MAX 3 +#define SVGA3D_FIFO_CAPS_RECORD_DEVCAPS 0x100 +typedef uint32 SVGA3dFifoCapsRecordType; -/* - * Video overlay support - */ +typedef uint32 SVGA3dFifoCapPair[2]; -#define SVGA_NUM_OVERLAY_UNITS 32 +#pragma pack(push, 1) +typedef struct SVGA3dFifoCapsRecordHeader { + uint32 length; + SVGA3dFifoCapsRecordType type; +} SVGA3dFifoCapsRecordHeader; +#pragma pack(pop) -/* - * Video capabilities that the guest is currently using - */ +#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1) -#define SVGA_VIDEO_FLAG_COLORKEY 0x0001 +#define SVGA_FIFO_CAP_NONE 0 +#define SVGA_FIFO_CAP_FENCE (1 << 0) +#define SVGA_FIFO_CAP_ACCELFRONT (1 << 1) +#define SVGA_FIFO_CAP_PITCHLOCK (1 << 2) +#define SVGA_FIFO_CAP_VIDEO (1 << 3) +#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1 << 4) +#define SVGA_FIFO_CAP_ESCAPE (1 << 5) +#define SVGA_FIFO_CAP_RESERVE (1 << 6) +#define SVGA_FIFO_CAP_SCREEN_OBJECT (1 << 7) +#define SVGA_FIFO_CAP_GMR2 (1 << 8) +#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED SVGA_FIFO_CAP_GMR2 +#define SVGA_FIFO_CAP_SCREEN_OBJECT_2 (1 << 9) +#define SVGA_FIFO_CAP_DEAD (1 << 10) +#define SVGA_FIFO_FLAG_NONE 0 +#define SVGA_FIFO_FLAG_ACCELFRONT (1 << 0) +#define SVGA_FIFO_FLAG_RESERVED (1 << 31) -/* - * Offsets for the video overlay registers - */ +#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff -enum { - SVGA_VIDEO_ENABLED = 0, - SVGA_VIDEO_FLAGS, - SVGA_VIDEO_DATA_OFFSET, - SVGA_VIDEO_FORMAT, - SVGA_VIDEO_COLORKEY, - SVGA_VIDEO_SIZE, /* Deprecated */ - SVGA_VIDEO_WIDTH, - SVGA_VIDEO_HEIGHT, - SVGA_VIDEO_SRC_X, - SVGA_VIDEO_SRC_Y, - SVGA_VIDEO_SRC_WIDTH, - SVGA_VIDEO_SRC_HEIGHT, - SVGA_VIDEO_DST_X, /* Signed int32 */ - SVGA_VIDEO_DST_Y, /* Signed int32 */ - SVGA_VIDEO_DST_WIDTH, - SVGA_VIDEO_DST_HEIGHT, - SVGA_VIDEO_PITCH_1, - SVGA_VIDEO_PITCH_2, - SVGA_VIDEO_PITCH_3, - SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */ - SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */ - /* (SVGA_ID_INVALID) */ - SVGA_VIDEO_NUM_REGS -}; +#define SVGA_SCREENDMA_REG_UNDEFINED 0 +#define SVGA_SCREENDMA_REG_NOT_PRESENT 1 +#define SVGA_SCREENDMA_REG_PRESENT 2 +#define SVGA_SCREENDMA_REG_MAX 3 +#define SVGA_NUM_OVERLAY_UNITS 32 -/* - * SVGA Overlay Units - * - * width and height relate to the entire source video frame. - * srcX, srcY, srcWidth and srcHeight represent subset of the source - * video frame to be displayed. - */ +#define SVGA_VIDEO_FLAG_COLORKEY 0x0001 -typedef -#include "vmware_pack_begin.h" -struct SVGAOverlayUnit { - uint32 enabled; - uint32 flags; - uint32 dataOffset; - uint32 format; - uint32 colorKey; - uint32 size; - uint32 width; - uint32 height; - uint32 srcX; - uint32 srcY; - uint32 srcWidth; - uint32 srcHeight; - int32 dstX; - int32 dstY; - uint32 dstWidth; - uint32 dstHeight; - uint32 pitches[3]; - uint32 dataGMRId; - uint32 dstScreenId; -} -#include "vmware_pack_end.h" -SVGAOverlayUnit; +enum { + SVGA_VIDEO_ENABLED = 0, + SVGA_VIDEO_FLAGS, + SVGA_VIDEO_DATA_OFFSET, + SVGA_VIDEO_FORMAT, + SVGA_VIDEO_COLORKEY, + SVGA_VIDEO_SIZE, + SVGA_VIDEO_WIDTH, + SVGA_VIDEO_HEIGHT, + SVGA_VIDEO_SRC_X, + SVGA_VIDEO_SRC_Y, + SVGA_VIDEO_SRC_WIDTH, + SVGA_VIDEO_SRC_HEIGHT, + SVGA_VIDEO_DST_X, + SVGA_VIDEO_DST_Y, + SVGA_VIDEO_DST_WIDTH, + SVGA_VIDEO_DST_HEIGHT, + SVGA_VIDEO_PITCH_1, + SVGA_VIDEO_PITCH_2, + SVGA_VIDEO_PITCH_3, + SVGA_VIDEO_DATA_GMRID, + SVGA_VIDEO_DST_SCREEN_ID, + SVGA_VIDEO_NUM_REGS +}; +#pragma pack(push, 1) +typedef struct SVGAOverlayUnit { + uint32 enabled; + uint32 flags; + uint32 dataOffset; + uint32 format; + uint32 colorKey; + uint32 size; + uint32 width; + uint32 height; + uint32 srcX; + uint32 srcY; + uint32 srcWidth; + uint32 srcHeight; + int32 dstX; + int32 dstY; + uint32 dstWidth; + uint32 dstHeight; + uint32 pitches[3]; + uint32 dataGMRId; + uint32 dstScreenId; +} SVGAOverlayUnit; +#pragma pack(pop) -/* - * Guest display topology - * - * XXX: This structure is not part of the SVGA device's interface, and - * doesn't really belong here. - */ #define SVGA_INVALID_DISPLAY_ID ((uint32)-1) typedef struct SVGADisplayTopology { - uint16 displayId; - uint16 isPrimary; - uint32 width; - uint32 height; - uint32 positionX; - uint32 positionY; + uint16 displayId; + uint16 isPrimary; + uint32 width; + uint32 height; + uint32 positionX; + uint32 positionY; } SVGADisplayTopology; - -/* - * SVGAScreenObject -- - * - * This is a new way to represent a guest's multi-monitor screen or - * Unity window. Screen objects are only supported if the - * SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set. - * - * If Screen Objects are supported, they can be used to fully - * replace the functionality provided by the framebuffer registers - * (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY. - * - * The screen object is a struct with guaranteed binary - * compatibility. New flags can be added, and the struct may grow, - * but existing fields must retain their meaning. - * - * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of - * a SVGAGuestPtr that is used to back the screen contents. This - * memory must come from the GFB. The guest is not allowed to - * access the memory and doing so will have undefined results. The - * backing store is required to be page aligned and the size is - * padded to the next page boundry. The number of pages is: - * (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE - * - * The pitch in the backingStore is required to be at least large - * enough to hold a 32bbp scanline. It is recommended that the - * driver pad bytesPerLine for a potential performance win. - * - * The cloneCount field is treated as a hint from the guest that - * the user wants this display to be cloned, countCount times. A - * value of zero means no cloning should happen. - */ - -#define SVGA_SCREEN_MUST_BE_SET (1 << 0) -#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */ -#define SVGA_SCREEN_IS_PRIMARY (1 << 1) +#define SVGA_SCREEN_MUST_BE_SET (1 << 0) +#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET +#define SVGA_SCREEN_IS_PRIMARY (1 << 1) #define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) -/* - * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is - * deactivated the base layer is defined to lose all contents and - * become black. When a screen is deactivated the backing store is - * optional. When set backingPtr and bytesPerLine will be ignored. - */ -#define SVGA_SCREEN_DEACTIVATE (1 << 3) +#define SVGA_SCREEN_DEACTIVATE (1 << 3) -/* - * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When this flag is set - * the screen contents will be outputted as all black to the user - * though the base layer contents is preserved. The screen base layer - * can still be read and written to like normal though the no visible - * effect will be seen by the user. When the flag is changed the - * screen will be blanked or redrawn to the current contents as needed - * without any extra commands from the driver. This flag only has an - * effect when the screen is not deactivated. - */ #define SVGA_SCREEN_BLANKING (1 << 4) -typedef -#include "vmware_pack_begin.h" -struct { - uint32 structSize; /* sizeof(SVGAScreenObject) */ - uint32 id; - uint32 flags; - struct { - uint32 width; - uint32 height; - } size; - struct { - int32 x; - int32 y; - } root; - - /* - * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional - * with SVGA_FIFO_CAP_SCREEN_OBJECT. - */ - SVGAGuestImage backingStore; - - /* - * The cloneCount field is treated as a hint from the guest that - * the user wants this display to be cloned, cloneCount times. - * - * A value of zero means no cloning should happen. - */ - uint32 cloneCount; -} -#include "vmware_pack_end.h" -SVGAScreenObject; - - -/* - * Commands in the command FIFO: - * - * Command IDs defined below are used for the traditional 2D FIFO - * communication (not all commands are available for all versions of the - * SVGA FIFO protocol). - * - * Note the holes in the command ID numbers: These commands have been - * deprecated, and the old IDs must not be reused. - * - * Command IDs from 1000 to 2999 are reserved for use by the SVGA3D - * protocol. - * - * Each command's parameters are described by the comments and - * structs below. - */ +#pragma pack(push, 1) +typedef struct { + uint32 structSize; + uint32 id; + uint32 flags; + struct { + uint32 width; + uint32 height; + } size; + struct { + int32 x; + int32 y; + } root; + + SVGAGuestImage backingStore; + + uint32 cloneCount; +} SVGAScreenObject; +#pragma pack(pop) typedef enum { - SVGA_CMD_INVALID_CMD = 0, - SVGA_CMD_UPDATE = 1, - SVGA_CMD_RECT_COPY = 3, - SVGA_CMD_RECT_ROP_COPY = 14, - SVGA_CMD_DEFINE_CURSOR = 19, - SVGA_CMD_DEFINE_ALPHA_CURSOR = 22, - SVGA_CMD_UPDATE_VERBOSE = 25, - SVGA_CMD_FRONT_ROP_FILL = 29, - SVGA_CMD_FENCE = 30, - SVGA_CMD_ESCAPE = 33, - SVGA_CMD_DEFINE_SCREEN = 34, - SVGA_CMD_DESTROY_SCREEN = 35, - SVGA_CMD_DEFINE_GMRFB = 36, - SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37, - SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38, - SVGA_CMD_ANNOTATION_FILL = 39, - SVGA_CMD_ANNOTATION_COPY = 40, - SVGA_CMD_DEFINE_GMR2 = 41, - SVGA_CMD_REMAP_GMR2 = 42, - SVGA_CMD_DEAD = 43, - SVGA_CMD_DEAD_2 = 44, - SVGA_CMD_NOP = 45, - SVGA_CMD_NOP_ERROR = 46, - SVGA_CMD_MAX + SVGA_CMD_INVALID_CMD = 0, + SVGA_CMD_UPDATE = 1, + SVGA_CMD_RECT_COPY = 3, + SVGA_CMD_RECT_ROP_COPY = 14, + SVGA_CMD_DEFINE_CURSOR = 19, + SVGA_CMD_DEFINE_ALPHA_CURSOR = 22, + SVGA_CMD_UPDATE_VERBOSE = 25, + SVGA_CMD_FRONT_ROP_FILL = 29, + SVGA_CMD_FENCE = 30, + SVGA_CMD_ESCAPE = 33, + SVGA_CMD_DEFINE_SCREEN = 34, + SVGA_CMD_DESTROY_SCREEN = 35, + SVGA_CMD_DEFINE_GMRFB = 36, + SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37, + SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38, + SVGA_CMD_ANNOTATION_FILL = 39, + SVGA_CMD_ANNOTATION_COPY = 40, + SVGA_CMD_DEFINE_GMR2 = 41, + SVGA_CMD_REMAP_GMR2 = 42, + SVGA_CMD_DEAD = 43, + SVGA_CMD_DEAD_2 = 44, + SVGA_CMD_NOP = 45, + SVGA_CMD_NOP_ERROR = 46, + SVGA_CMD_MAX } SVGAFifoCmdId; -#define SVGA_CMD_MAX_DATASIZE (256 * 1024) -#define SVGA_CMD_MAX_ARGS 64 - - -/* - * SVGA_CMD_UPDATE -- - * - * This is a DMA transfer which copies from the Guest Framebuffer - * (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which - * intersect with the provided virtual rectangle. - * - * This command does not support using arbitrary guest memory as a - * data source- it only works with the pre-defined GFB memory. - * This command also does not support signed virtual coordinates. - * If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with - * negative root x/y coordinates, the negative portion of those - * screens will not be reachable by this command. - * - * This command is not necessary when using framebuffer - * traces. Traces are automatically enabled if the SVGA FIFO is - * disabled, and you may explicitly enable/disable traces using - * SVGA_REG_TRACES. With traces enabled, any write to the GFB will - * automatically act as if a subsequent SVGA_CMD_UPDATE was issued. - * - * Traces and SVGA_CMD_UPDATE are the only supported ways to render - * pseudocolor screen updates. The newer Screen Object commands - * only support true color formats. - * - * Availability: - * Always available. - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 x; - uint32 y; - uint32 width; - uint32 height; -} -#include "vmware_pack_end.h" -SVGAFifoCmdUpdate; - - -/* - * SVGA_CMD_RECT_COPY -- - * - * Perform a rectangular DMA transfer from one area of the GFB to - * another, and copy the result to any screens which intersect it. - * - * Availability: - * SVGA_CAP_RECT_COPY - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 srcX; - uint32 srcY; - uint32 destX; - uint32 destY; - uint32 width; - uint32 height; -} -#include "vmware_pack_end.h" -SVGAFifoCmdRectCopy; - - -/* - * SVGA_CMD_RECT_ROP_COPY -- - * - * Perform a rectangular DMA transfer from one area of the GFB to - * another, and copy the result to any screens which intersect it. - * The value of ROP may only be SVGA_ROP_COPY, and this command is - * only supported for backwards compatibility reasons. - * - * Availability: - * SVGA_CAP_RECT_COPY - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 srcX; - uint32 srcY; - uint32 destX; - uint32 destY; - uint32 width; - uint32 height; - uint32 rop; -} -#include "vmware_pack_end.h" -SVGAFifoCmdRectRopCopy; - - -/* - * SVGA_CMD_DEFINE_CURSOR -- - * - * Provide a new cursor image, as an AND/XOR mask. - * - * The recommended way to position the cursor overlay is by using - * the SVGA_FIFO_CURSOR_* registers, supported by the - * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability. - * - * Availability: - * SVGA_CAP_CURSOR - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 id; /* Reserved, must be zero. */ - uint32 hotspotX; - uint32 hotspotY; - uint32 width; - uint32 height; - uint32 andMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */ - uint32 xorMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */ - /* - * Followed by scanline data for AND mask, then XOR mask. - * Each scanline is padded to a 32-bit boundary. - */ -} -#include "vmware_pack_end.h" -SVGAFifoCmdDefineCursor; - - -/* - * SVGA_CMD_DEFINE_ALPHA_CURSOR -- - * - * Provide a new cursor image, in 32-bit BGRA format. - * - * The recommended way to position the cursor overlay is by using - * the SVGA_FIFO_CURSOR_* registers, supported by the - * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability. - * - * Availability: - * SVGA_CAP_ALPHA_CURSOR - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 id; /* Reserved, must be zero. */ - uint32 hotspotX; - uint32 hotspotY; - uint32 width; - uint32 height; - /* Followed by scanline data */ -} -#include "vmware_pack_end.h" -SVGAFifoCmdDefineAlphaCursor; - - -/* - * Provide a new large cursor image, as an AND/XOR mask. - * - * Should only be used for CursorMob functionality - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 hotspotX; - uint32 hotspotY; - uint32 width; - uint32 height; - uint32 andMaskDepth; - uint32 xorMaskDepth; - /* - * Followed by scanline data for AND mask, then XOR mask. - * Each scanline is padded to a 32-bit boundary. - */ -} -#include "vmware_pack_end.h" -SVGAGBColorCursorHeader; - - -/* - * Provide a new large cursor image, in 32-bit BGRA format. - * - * Should only be used for CursorMob functionality - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 hotspotX; - uint32 hotspotY; - uint32 width; - uint32 height; - /* Followed by scanline data */ -} -#include "vmware_pack_end.h" -SVGAGBAlphaCursorHeader; - - /* - * Define the SVGA guest backed cursor types - */ +#define SVGA_CMD_MAX_DATASIZE (256 * 1024) +#define SVGA_CMD_MAX_ARGS 64 + +#pragma pack(push, 1) +typedef struct { + uint32 x; + uint32 y; + uint32 width; + uint32 height; +} SVGAFifoCmdUpdate; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 srcX; + uint32 srcY; + uint32 destX; + uint32 destY; + uint32 width; + uint32 height; +} SVGAFifoCmdRectCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 srcX; + uint32 srcY; + uint32 destX; + uint32 destY; + uint32 width; + uint32 height; + uint32 rop; +} SVGAFifoCmdRectRopCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 id; + uint32 hotspotX; + uint32 hotspotY; + uint32 width; + uint32 height; + uint32 andMaskDepth; + uint32 xorMaskDepth; + +} SVGAFifoCmdDefineCursor; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 id; + uint32 hotspotX; + uint32 hotspotY; + uint32 width; + uint32 height; + +} SVGAFifoCmdDefineAlphaCursor; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 hotspotX; + uint32 hotspotY; + uint32 width; + uint32 height; + uint32 andMaskDepth; + uint32 xorMaskDepth; + +} SVGAGBColorCursorHeader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 hotspotX; + uint32 hotspotY; + uint32 width; + uint32 height; + +} SVGAGBAlphaCursorHeader; +#pragma pack(pop) typedef enum { - SVGA_COLOR_CURSOR = 0, - SVGA_ALPHA_CURSOR = 1, + SVGA_COLOR_CURSOR = 0, + SVGA_ALPHA_CURSOR = 1, } SVGAGBCursorType; -/* - * Provide a new large cursor image. - * - * Should only be used for CursorMob functionality - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAGBCursorType type; - union { - SVGAGBColorCursorHeader colorHeader; - SVGAGBAlphaCursorHeader alphaHeader; - } header; - uint32 sizeInBytes; - /* - * Followed by the cursor data - */ -} -#include "vmware_pack_end.h" -SVGAGBCursorHeader; - - -/* - * SVGA_CMD_UPDATE_VERBOSE -- - * - * Just like SVGA_CMD_UPDATE, but also provide a per-rectangle - * 'reason' value, an opaque cookie which is used by internal - * debugging tools. Third party drivers should not use this - * command. - * - * Availability: - * SVGA_CAP_EXTENDED_FIFO - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 x; - uint32 y; - uint32 width; - uint32 height; - uint32 reason; -} -#include "vmware_pack_end.h" -SVGAFifoCmdUpdateVerbose; - - -/* - * SVGA_CMD_FRONT_ROP_FILL -- - * - * This is a hint which tells the SVGA device that the driver has - * just filled a rectangular region of the GFB with a solid - * color. Instead of reading these pixels from the GFB, the device - * can assume that they all equal 'color'. This is primarily used - * for remote desktop protocols. - * - * Availability: - * SVGA_FIFO_CAP_ACCELFRONT - */ - -#define SVGA_ROP_COPY 0x03 - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 color; /* In the same format as the GFB */ - uint32 x; - uint32 y; - uint32 width; - uint32 height; - uint32 rop; /* Must be SVGA_ROP_COPY */ -} -#include "vmware_pack_end.h" -SVGAFifoCmdFrontRopFill; - - -/* - * SVGA_CMD_FENCE -- - * - * Insert a synchronization fence. When the SVGA device reaches - * this command, it will copy the 'fence' value into the - * SVGA_FIFO_FENCE register. It will also compare the fence against - * SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the - * SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will - * raise this interrupt. - * - * Availability: - * SVGA_FIFO_FENCE for this command, - * SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL. - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 fence; -} -#include "vmware_pack_end.h" -SVGAFifoCmdFence; - - -/* - * SVGA_CMD_ESCAPE -- - * - * Send an extended or vendor-specific variable length command. - * This is used for video overlay, third party plugins, and - * internal debugging tools. See svga_escape.h - * - * Availability: - * SVGA_FIFO_CAP_ESCAPE - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 nsid; - uint32 size; - /* followed by 'size' bytes of data */ -} -#include "vmware_pack_end.h" -SVGAFifoCmdEscape; - - -/* - * SVGA_CMD_DEFINE_SCREEN -- - * - * Define or redefine an SVGAScreenObject. See the description of - * SVGAScreenObject above. The video driver is responsible for - * generating new screen IDs. They should be small positive - * integers. The virtual device will have an implementation - * specific upper limit on the number of screen IDs - * supported. Drivers are responsible for recycling IDs. The first - * valid ID is zero. - * - * - Interaction with other registers: - * - * For backwards compatibility, when the GFB mode registers (WIDTH, - * HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device - * deletes all screens other than screen #0, and redefines screen - * #0 according to the specified mode. Drivers that use - * SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0. - * - * If you use screen objects, do not use the legacy multi-mon - * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*). - * - * Availability: - * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAScreenObject screen; /* Variable-length according to version */ -} -#include "vmware_pack_end.h" -SVGAFifoCmdDefineScreen; - - -/* - * SVGA_CMD_DESTROY_SCREEN -- - * - * Destroy an SVGAScreenObject. Its ID is immediately available for - * re-use. - * - * Availability: - * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 screenId; -} -#include "vmware_pack_end.h" -SVGAFifoCmdDestroyScreen; - - -/* - * SVGA_CMD_DEFINE_GMRFB -- - * - * This command sets a piece of SVGA device state called the - * Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a - * piece of light-weight state which identifies the location and - * format of an image in guest memory or in BAR1. The GMRFB has - * an arbitrary size, and it doesn't need to match the geometry - * of the GFB or any screen object. - * - * The GMRFB can be redefined as often as you like. You could - * always use the same GMRFB, you could redefine it before - * rendering from a different guest screen, or you could even - * redefine it before every blit. - * - * There are multiple ways to use this command. The simplest way is - * to use it to move the framebuffer either to elsewhere in the GFB - * (BAR1) memory region, or to a user-defined GMR. This lets a - * driver use a framebuffer allocated entirely out of normal system - * memory, which we encourage. - * - * Another way to use this command is to set up a ring buffer of - * updates in GFB memory. If a driver wants to ensure that no - * frames are skipped by the SVGA device, it is important that the - * driver not modify the source data for a blit until the device is - * done processing the command. One efficient way to accomplish - * this is to use a ring of small DMA buffers. Each buffer is used - * for one blit, then we move on to the next buffer in the - * ring. The FENCE mechanism is used to protect each buffer from - * re-use until the device is finished with that buffer's - * corresponding blit. - * - * This command does not affect the meaning of SVGA_CMD_UPDATE. - * UPDATEs always occur from the legacy GFB memory area. This - * command has no support for pseudocolor GMRFBs. Currently only - * true-color 15, 16, and 24-bit depths are supported. Future - * devices may expose capabilities for additional framebuffer - * formats. - * - * The default GMRFB value is undefined. Drivers must always send - * this command at least once before performing any blit from the - * GMRFB. - * - * Availability: - * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAGuestPtr ptr; - uint32 bytesPerLine; - SVGAGMRImageFormat format; -} -#include "vmware_pack_end.h" -SVGAFifoCmdDefineGMRFB; - - -/* - * SVGA_CMD_BLIT_GMRFB_TO_SCREEN -- - * - * This is a guest-to-host blit. It performs a DMA operation to - * copy a rectangular region of pixels from the current GMRFB to - * a ScreenObject. - * - * The destination coordinate may be specified relative to a - * screen's origin. The provided screen ID must be valid. - * - * The SVGA device is guaranteed to finish reading from the GMRFB - * by the time any subsequent FENCE commands are reached. - * - * This command consumes an annotation. See the - * SVGA_CMD_ANNOTATION_* commands for details. - * - * Availability: - * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGASignedPoint srcOrigin; - SVGASignedRect destRect; - uint32 destScreenId; -} -#include "vmware_pack_end.h" -SVGAFifoCmdBlitGMRFBToScreen; - - -/* - * SVGA_CMD_BLIT_SCREEN_TO_GMRFB -- - * - * This is a host-to-guest blit. It performs a DMA operation to - * copy a rectangular region of pixels from a single ScreenObject - * back to the current GMRFB. - * - * The source coordinate is specified relative to a screen's - * origin. The provided screen ID must be valid. If any parameters - * are invalid, the resulting pixel values are undefined. - * - * The SVGA device is guaranteed to finish writing to the GMRFB by - * the time any subsequent FENCE commands are reached. - * - * Availability: - * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGASignedPoint destOrigin; - SVGASignedRect srcRect; - uint32 srcScreenId; -} -#include "vmware_pack_end.h" -SVGAFifoCmdBlitScreenToGMRFB; - - -/* - * SVGA_CMD_ANNOTATION_FILL -- - * - * The annotation commands have been deprecated, should not be used - * by new drivers. They used to provide performance hints to the SVGA - * device about the content of screen updates, but newer SVGA devices - * ignore these. - * - * Availability: - * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGAColorBGRX color; -} -#include "vmware_pack_end.h" -SVGAFifoCmdAnnotationFill; - - -/* - * SVGA_CMD_ANNOTATION_COPY -- - * - * The annotation commands have been deprecated, should not be used - * by new drivers. They used to provide performance hints to the SVGA - * device about the content of screen updates, but newer SVGA devices - * ignore these. - * - * Availability: - * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 - */ - -typedef -#include "vmware_pack_begin.h" -struct { - SVGASignedPoint srcOrigin; - uint32 srcScreenId; -} -#include "vmware_pack_end.h" -SVGAFifoCmdAnnotationCopy; - - -/* - * SVGA_CMD_DEFINE_GMR2 -- - * - * Define guest memory region v2. See the description of GMRs above. - * - * Availability: - * SVGA_CAP_GMR2 - */ - -typedef -#include "vmware_pack_begin.h" -struct { - uint32 gmrId; - uint32 numPages; -} -#include "vmware_pack_end.h" -SVGAFifoCmdDefineGMR2; - - -/* - * SVGA_CMD_REMAP_GMR2 -- - * - * Remap guest memory region v2. See the description of GMRs above. - * - * This command allows guest to modify a portion of an existing GMR by - * invalidating it or reassigning it to different guest physical pages. - * The pages are identified by physical page number (PPN). The pages - * are assumed to be pinned and valid for DMA operations. - * - * Description of command flags: - * - * SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR. - * The PPN list must not overlap with the remap region (this can be - * handled trivially by referencing a separate GMR). If flag is - * disabled, PPN list is appended to SVGARemapGMR command. - * - * SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise - * it is in PPN32 format. - * - * SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry. - * A single PPN can be used to invalidate a portion of a GMR or - * map it to to a single guest scratch page. - * - * Availability: - * SVGA_CAP_GMR2 - */ +#pragma pack(push, 1) +typedef struct { + SVGAGBCursorType type; + union { + SVGAGBColorCursorHeader colorHeader; + SVGAGBAlphaCursorHeader alphaHeader; + } header; + uint32 sizeInBytes; + +} SVGAGBCursorHeader; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 x; + uint32 y; + uint32 width; + uint32 height; + uint32 reason; +} SVGAFifoCmdUpdateVerbose; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 color; + uint32 x; + uint32 y; + uint32 width; + uint32 height; + uint32 rop; +} SVGAFifoCmdFrontRopFill; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 fence; +} SVGAFifoCmdFence; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 nsid; + uint32 size; + +} SVGAFifoCmdEscape; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAScreenObject screen; +} SVGAFifoCmdDefineScreen; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 screenId; +} SVGAFifoCmdDestroyScreen; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAGuestPtr ptr; + uint32 bytesPerLine; + SVGAGMRImageFormat format; +} SVGAFifoCmdDefineGMRFB; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGASignedPoint srcOrigin; + SVGASignedRect destRect; + uint32 destScreenId; +} SVGAFifoCmdBlitGMRFBToScreen; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGASignedPoint destOrigin; + SVGASignedRect srcRect; + uint32 srcScreenId; +} SVGAFifoCmdBlitScreenToGMRFB; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGAColorBGRX color; +} SVGAFifoCmdAnnotationFill; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + SVGASignedPoint srcOrigin; + uint32 srcScreenId; +} SVGAFifoCmdAnnotationCopy; +#pragma pack(pop) + +#pragma pack(push, 1) +typedef struct { + uint32 gmrId; + uint32 numPages; +} SVGAFifoCmdDefineGMR2; +#pragma pack(pop) typedef enum { - SVGA_REMAP_GMR2_PPN32 = 0, - SVGA_REMAP_GMR2_VIA_GMR = (1 << 0), - SVGA_REMAP_GMR2_PPN64 = (1 << 1), - SVGA_REMAP_GMR2_SINGLE_PPN = (1 << 2), + SVGA_REMAP_GMR2_PPN32 = 0, + SVGA_REMAP_GMR2_VIA_GMR = (1 << 0), + SVGA_REMAP_GMR2_PPN64 = (1 << 1), + SVGA_REMAP_GMR2_SINGLE_PPN = (1 << 2), } SVGARemapGMR2Flags; -typedef -#include "vmware_pack_begin.h" -struct { - uint32 gmrId; - SVGARemapGMR2Flags flags; - uint32 offsetPages; /* offset in pages to begin remap */ - uint32 numPages; /* number of pages to remap */ - /* - * Followed by additional data depending on SVGARemapGMR2Flags. - * - * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows. - * Otherwise an array of page descriptors in PPN32 or PPN64 format - * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag - * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry. - */ -} -#include "vmware_pack_end.h" -SVGAFifoCmdRemapGMR2; - - -/* - * Size of SVGA device memory such as frame buffer and FIFO. - */ -#define SVGA_VRAM_MIN_SIZE (4 * 640 * 480) /* bytes */ -#define SVGA_VRAM_MIN_SIZE_3D (16 * 1024 * 1024) -#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024) -#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024) -#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024) -#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024) +#pragma pack(push, 1) +typedef struct { + uint32 gmrId; + SVGARemapGMR2Flags flags; + uint32 offsetPages; + uint32 numPages; + +} SVGAFifoCmdRemapGMR2; +#pragma pack(pop) + +#define SVGA_VRAM_MIN_SIZE (4 * 640 * 480) +#define SVGA_VRAM_MIN_SIZE_3D (16 * 1024 * 1024) +#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024) +#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024) +#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024) +#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024) #define SVGA_GRAPHICS_MEMORY_KB_MAX_2GB (2 * 1024 * 1024) #define SVGA_GRAPHICS_MEMORY_KB_MAX_3GB (3 * 1024 * 1024) #define SVGA_GRAPHICS_MEMORY_KB_MAX_4GB (4 * 1024 * 1024) #define SVGA_GRAPHICS_MEMORY_KB_MAX_8GB (8 * 1024 * 1024) #define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024) -#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */ +#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) #if defined(VMX86_SERVER) -#define SVGA_VRAM_SIZE (4 * 1024 * 1024) -#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024) -#define SVGA_FIFO_SIZE (256 * 1024) -#define SVGA_FIFO_SIZE_3D (516 * 1024) -#define SVGA_MEMORY_SIZE_DEFAULT (160 * 1024 * 1024) -#define SVGA_AUTODETECT_DEFAULT FALSE +#define SVGA_VRAM_SIZE (4 * 1024 * 1024) +#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024) +#define SVGA_FIFO_SIZE (256 * 1024) +#define SVGA_FIFO_SIZE_3D (516 * 1024) +#define SVGA_MEMORY_SIZE_DEFAULT (160 * 1024 * 1024) +#define SVGA_AUTODETECT_DEFAULT FALSE #else -#define SVGA_VRAM_SIZE (16 * 1024 * 1024) -#define SVGA_VRAM_SIZE_3D SVGA_VRAM_MAX_SIZE -#define SVGA_FIFO_SIZE (2 * 1024 * 1024) -#define SVGA_FIFO_SIZE_3D SVGA_FIFO_SIZE -#define SVGA_MEMORY_SIZE_DEFAULT (768 * 1024 * 1024) -#define SVGA_AUTODETECT_DEFAULT TRUE +#define SVGA_VRAM_SIZE (16 * 1024 * 1024) +#define SVGA_VRAM_SIZE_3D SVGA_VRAM_MAX_SIZE +#define SVGA_FIFO_SIZE (2 * 1024 * 1024) +#define SVGA_FIFO_SIZE_3D SVGA_FIFO_SIZE +#define SVGA_MEMORY_SIZE_DEFAULT (768 * 1024 * 1024) +#define SVGA_AUTODETECT_DEFAULT TRUE #endif -#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024) -#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024) - -#define SVGA_PCI_REGS_PAGES (1) +#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024) +#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024) #endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h deleted file mode 100644 index beddccee40f6..000000000000 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h +++ /dev/null @@ -1,51 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/********************************************************** - * Copyright 2015 VMware, Inc. - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, - * modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - **********************************************************/ -#ifndef _VM_BASIC_TYPES_H_ -#define _VM_BASIC_TYPES_H_ -#include <linux/kernel.h> - -typedef u32 uint32; -typedef s32 int32; -typedef u64 uint64; -typedef u16 uint16; -typedef s16 int16; -typedef u8 uint8; -typedef s8 int8; - -typedef uint64 PA; -typedef uint32 PPN; -typedef uint32 PPN32; -typedef uint64 PPN64; - -typedef bool Bool; - -#define MAX_UINT64 U64_MAX -#define MAX_UINT32 U32_MAX -#define MAX_UINT16 U16_MAX - -#define CONST64U(x) x##ULL - -#endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h index 3a195e8106b3..35bd2852189f 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h +++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h @@ -1,7 +1,34 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _VM_BASIC_TYPES_H_ -#define _VM_BASIC_TYPES_H_ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/********************************************************** + * Copyright 2015-2021 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ +#ifndef VM_BASIC_TYPES_H +#define VM_BASIC_TYPES_H + #include <linux/kernel.h> +#include <linux/mm.h> +#include <asm/page.h> typedef u32 uint32; typedef s32 int32; @@ -13,10 +40,108 @@ typedef s8 int8; typedef uint64 PA; typedef uint32 PPN; +typedef uint32 PPN32; typedef uint64 PPN64; typedef bool Bool; +#define MAX_UINT64 U64_MAX #define MAX_UINT32 U32_MAX +#define MAX_UINT16 U16_MAX + +#define CONST64U(x) x##ULL + +#ifndef MBYTES_SHIFT +#define MBYTES_SHIFT 20 +#endif +#ifndef MBYTES_2_BYTES +#define MBYTES_2_BYTES(_nbytes) ((uint64)(_nbytes) << MBYTES_SHIFT) +#endif + +/* + * MKS Guest Stats types + */ + +typedef struct MKSGuestStatCounter { + atomic64_t count; +} MKSGuestStatCounter; + +typedef struct MKSGuestStatCounterTime { + MKSGuestStatCounter counter; + atomic64_t selfCycles; + atomic64_t totalCycles; +} MKSGuestStatCounterTime; + +/* + * Flags for MKSGuestStatInfoEntry::flags below + */ + +#define MKS_GUEST_STAT_FLAG_NONE 0 +#define MKS_GUEST_STAT_FLAG_TIME (1U << 0) + +typedef __attribute__((aligned(32))) struct MKSGuestStatInfoEntry { + union { + const char *s; + uint64 u; + } name; + union { + const char *s; + uint64 u; + } description; + uint64 flags; + union { + MKSGuestStatCounter *counter; + MKSGuestStatCounterTime *counterTime; + uint64 u; + } stat; +} MKSGuestStatInfoEntry; + +#define INVALID_PPN64 ((PPN64)0x000fffffffffffffULL) +#define vmw_num_pages(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) + +#define MKS_GUEST_STAT_INSTANCE_DESC_LENGTH 1024 +#define MKS_GUEST_STAT_INSTANCE_MAX_STATS 4096 +#define MKS_GUEST_STAT_INSTANCE_MAX_STAT_PPNS \ + (vmw_num_pages(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \ + sizeof(MKSGuestStatCounterTime))) +#define MKS_GUEST_STAT_INSTANCE_MAX_INFO_PPNS \ + (vmw_num_pages(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \ + sizeof(MKSGuestStatInfoEntry))) +#define MKS_GUEST_STAT_AVERAGE_NAME_LENGTH 40 +#define MKS_GUEST_STAT_INSTANCE_MAX_STRS_PPNS \ + (vmw_num_pages(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \ + MKS_GUEST_STAT_AVERAGE_NAME_LENGTH)) + +/* + * The MKSGuestStatInstanceDescriptor is used as main interface to + * communicate guest stats back to the host code. The guest must + * allocate an instance of this structure at the start of a page and + * provide the physical address to the host. From there the host code + * can walk this structure to find other (pinned) pages containing the + * stats data. + * + * Since the MKSGuestStatInfoEntry structures contain userlevel + * pointers, the InstanceDescriptor also contains pointers to the + * begining of these sections allowing the host side code to correctly + * interpret the pointers. + * + * Because the host side code never acknowledges anything back to the + * guest there is no strict requirement to maintain compatability + * across releases. If the interface changes the host might not be + * able to log stats, but the guest will continue to run normally. + */ + +typedef struct MKSGuestStatInstanceDescriptor { + uint64 reservedMBZ; /* must be zero for now. */ + uint64 statStartVA; /* VA of the start of the stats section. */ + uint64 strsStartVA; /* VA of the start of the strings section. */ + uint64 statLength; /* length of the stats section in bytes. */ + uint64 infoLength; /* length of the info entry section in bytes. */ + uint64 strsLength; /* length of the strings section in bytes. */ + PPN64 statPPNs[MKS_GUEST_STAT_INSTANCE_MAX_STAT_PPNS]; /* stat counters */ + PPN64 infoPPNs[MKS_GUEST_STAT_INSTANCE_MAX_INFO_PPNS]; /* stat info */ + PPN64 strsPPNs[MKS_GUEST_STAT_INSTANCE_MAX_STRS_PPNS]; /* strings */ + char description[MKS_GUEST_STAT_INSTANCE_DESC_LENGTH]; +} MKSGuestStatInstanceDescriptor; #endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h deleted file mode 100644 index 75308bd0d970..000000000000 --- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include <linux/compiler.h> diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h deleted file mode 100644 index e93d6f28b68c..000000000000 --- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -__packed diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c index aeb0a22a2c34..edd17c30d5a5 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_memory.c +++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c @@ -435,8 +435,10 @@ int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev) si_meminfo(&si); + spin_lock(&glob->lock); /* set it as 0 by default to keep original behavior of OOM */ glob->lower_mem_limit = 0; + spin_unlock(&glob->lock); ret = ttm_mem_init_kernel_zone(glob, &si); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h b/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h new file mode 100644 index 000000000000..b0d87c5f58d8 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h @@ -0,0 +1,539 @@ +/********************************************************** + * Copyright 2021 VMware, Inc. + * SPDX-License-Identifier: GPL-2.0 OR MIT + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + **********************************************************/ + +#ifndef VMW_SURFACE_CACHE_H +#define VMW_SURFACE_CACHE_H + +#include "device_include/svga3d_surfacedefs.h" + +#include <drm/vmwgfx_drm.h> + +static inline u32 clamped_umul32(u32 a, u32 b) +{ + uint64_t tmp = (uint64_t) a*b; + return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; +} + +/** + * vmw_surface_get_desc - Look up the appropriate SVGA3dSurfaceDesc for the + * given format. + */ +static inline const SVGA3dSurfaceDesc * +vmw_surface_get_desc(SVGA3dSurfaceFormat format) +{ + if (format < ARRAY_SIZE(g_SVGA3dSurfaceDescs)) + return &g_SVGA3dSurfaceDescs[format]; + + return &g_SVGA3dSurfaceDescs[SVGA3D_FORMAT_INVALID]; +} + +/** + * vmw_surface_get_mip_size - Given a base level size and the mip level, + * compute the size of the mip level. + */ +static inline struct drm_vmw_size +vmw_surface_get_mip_size(struct drm_vmw_size base_level, u32 mip_level) +{ + struct drm_vmw_size size = { + .width = max_t(u32, base_level.width >> mip_level, 1), + .height = max_t(u32, base_level.height >> mip_level, 1), + .depth = max_t(u32, base_level.depth >> mip_level, 1) + }; + + return size; +} + +static inline void +vmw_surface_get_size_in_blocks(const SVGA3dSurfaceDesc *desc, + const struct drm_vmw_size *pixel_size, + SVGA3dSize *block_size) +{ + block_size->width = __KERNEL_DIV_ROUND_UP(pixel_size->width, + desc->blockSize.width); + block_size->height = __KERNEL_DIV_ROUND_UP(pixel_size->height, + desc->blockSize.height); + block_size->depth = __KERNEL_DIV_ROUND_UP(pixel_size->depth, + desc->blockSize.depth); +} + +static inline bool +vmw_surface_is_planar_surface(const SVGA3dSurfaceDesc *desc) +{ + return (desc->blockDesc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0; +} + +static inline u32 +vmw_surface_calculate_pitch(const SVGA3dSurfaceDesc *desc, + const struct drm_vmw_size *size) +{ + u32 pitch; + SVGA3dSize blocks; + + vmw_surface_get_size_in_blocks(desc, size, &blocks); + + pitch = blocks.width * desc->pitchBytesPerBlock; + + return pitch; +} + +/** + * vmw_surface_get_image_buffer_size - Calculates image buffer size. + * + * Return the number of bytes of buffer space required to store one image of a + * surface, optionally using the specified pitch. + * + * If pitch is zero, it is assumed that rows are tightly packed. + * + * This function is overflow-safe. If the result would have overflowed, instead + * we return MAX_UINT32. + */ +static inline u32 +vmw_surface_get_image_buffer_size(const SVGA3dSurfaceDesc *desc, + const struct drm_vmw_size *size, + u32 pitch) +{ + SVGA3dSize image_blocks; + u32 slice_size, total_size; + + vmw_surface_get_size_in_blocks(desc, size, &image_blocks); + + if (vmw_surface_is_planar_surface(desc)) { + total_size = clamped_umul32(image_blocks.width, + image_blocks.height); + total_size = clamped_umul32(total_size, image_blocks.depth); + total_size = clamped_umul32(total_size, desc->bytesPerBlock); + return total_size; + } + + if (pitch == 0) + pitch = vmw_surface_calculate_pitch(desc, size); + + slice_size = clamped_umul32(image_blocks.height, pitch); + total_size = clamped_umul32(slice_size, image_blocks.depth); + + return total_size; +} + +/** + * vmw_surface_get_serialized_size - Get the serialized size for the image. + */ +static inline u32 +vmw_surface_get_serialized_size(SVGA3dSurfaceFormat format, + struct drm_vmw_size base_level_size, + u32 num_mip_levels, + u32 num_layers) +{ + const SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format); + u32 total_size = 0; + u32 mip; + + for (mip = 0; mip < num_mip_levels; mip++) { + struct drm_vmw_size size = + vmw_surface_get_mip_size(base_level_size, mip); + total_size += vmw_surface_get_image_buffer_size(desc, + &size, 0); + } + + return total_size * num_layers; +} + +/** + * vmw_surface_get_serialized_size_extended - Returns the number of bytes + * required for a surface with given parameters. Support for sample count. + */ +static inline u32 +vmw_surface_get_serialized_size_extended(SVGA3dSurfaceFormat format, + struct drm_vmw_size base_level_size, + u32 num_mip_levels, + u32 num_layers, + u32 num_samples) +{ + uint64_t total_size = + vmw_surface_get_serialized_size(format, + base_level_size, + num_mip_levels, + num_layers); + total_size *= max_t(u32, 1, num_samples); + + return min_t(uint64_t, total_size, (uint64_t)U32_MAX); +} + +/** + * vmw_surface_get_pixel_offset - Compute the offset (in bytes) to a pixel + * in an image (or volume). + * + * @width: The image width in pixels. + * @height: The image height in pixels + */ +static inline u32 +vmw_surface_get_pixel_offset(SVGA3dSurfaceFormat format, + u32 width, u32 height, + u32 x, u32 y, u32 z) +{ + const SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format); + const u32 bw = desc->blockSize.width, bh = desc->blockSize.height; + const u32 bd = desc->blockSize.depth; + const u32 rowstride = __KERNEL_DIV_ROUND_UP(width, bw) * + desc->bytesPerBlock; + const u32 imgstride = __KERNEL_DIV_ROUND_UP(height, bh) * rowstride; + const u32 offset = (z / bd * imgstride + + y / bh * rowstride + + x / bw * desc->bytesPerBlock); + return offset; +} + +static inline u32 +vmw_surface_get_image_offset(SVGA3dSurfaceFormat format, + struct drm_vmw_size baseLevelSize, + u32 numMipLevels, + u32 face, + u32 mip) + +{ + u32 offset; + u32 mipChainBytes; + u32 mipChainBytesToLevel; + u32 i; + const SVGA3dSurfaceDesc *desc; + struct drm_vmw_size mipSize; + u32 bytes; + + desc = vmw_surface_get_desc(format); + + mipChainBytes = 0; + mipChainBytesToLevel = 0; + for (i = 0; i < numMipLevels; i++) { + mipSize = vmw_surface_get_mip_size(baseLevelSize, i); + bytes = vmw_surface_get_image_buffer_size(desc, &mipSize, 0); + mipChainBytes += bytes; + if (i < mip) + mipChainBytesToLevel += bytes; + } + + offset = mipChainBytes * face + mipChainBytesToLevel; + + return offset; +} + + +/** + * vmw_surface_is_gb_screen_target_format - Is the specified format usable as + * a ScreenTarget? + * (with just the GBObjects cap-bit + * set) + * @format: format to queried + * + * RETURNS: + * true if queried format is valid for screen targets + */ +static inline bool +vmw_surface_is_gb_screen_target_format(SVGA3dSurfaceFormat format) +{ + return (format == SVGA3D_X8R8G8B8 || + format == SVGA3D_A8R8G8B8 || + format == SVGA3D_R5G6B5 || + format == SVGA3D_X1R5G5B5 || + format == SVGA3D_A1R5G5B5 || + format == SVGA3D_P8); +} + + +/** + * vmw_surface_is_dx_screen_target_format - Is the specified format usable as + * a ScreenTarget? + * (with DX10 enabled) + * + * @format: format to queried + * + * Results: + * true if queried format is valid for screen targets + */ +static inline bool +vmw_surface_is_dx_screen_target_format(SVGA3dSurfaceFormat format) +{ + return (format == SVGA3D_R8G8B8A8_UNORM || + format == SVGA3D_B8G8R8A8_UNORM || + format == SVGA3D_B8G8R8X8_UNORM); +} + + +/** + * vmw_surface_is_screen_target_format - Is the specified format usable as a + * ScreenTarget? + * (for some combination of caps) + * + * @format: format to queried + * + * Results: + * true if queried format is valid for screen targets + */ +static inline bool +vmw_surface_is_screen_target_format(SVGA3dSurfaceFormat format) +{ + if (vmw_surface_is_gb_screen_target_format(format)) { + return true; + } + return vmw_surface_is_dx_screen_target_format(format); +} + +/** + * struct vmw_surface_mip - Mimpmap level information + * @bytes: Bytes required in the backing store of this mipmap level. + * @img_stride: Byte stride per image. + * @row_stride: Byte stride per block row. + * @size: The size of the mipmap. + */ +struct vmw_surface_mip { + size_t bytes; + size_t img_stride; + size_t row_stride; + struct drm_vmw_size size; + +}; + +/** + * struct vmw_surface_cache - Cached surface information + * @desc: Pointer to the surface descriptor + * @mip: Array of mipmap level information. Valid size is @num_mip_levels. + * @mip_chain_bytes: Bytes required in the backing store for the whole chain + * of mip levels. + * @sheet_bytes: Bytes required in the backing store for a sheet + * representing a single sample. + * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in + * a chain. + * @num_layers: Number of slices in an array texture or number of faces in + * a cubemap texture. + */ +struct vmw_surface_cache { + const SVGA3dSurfaceDesc *desc; + struct vmw_surface_mip mip[DRM_VMW_MAX_MIP_LEVELS]; + size_t mip_chain_bytes; + size_t sheet_bytes; + u32 num_mip_levels; + u32 num_layers; +}; + +/** + * struct vmw_surface_loc - Surface location + * @sheet: The multisample sheet. + * @sub_resource: Surface subresource. Defined as layer * num_mip_levels + + * mip_level. + * @x: X coordinate. + * @y: Y coordinate. + * @z: Z coordinate. + */ +struct vmw_surface_loc { + u32 sheet; + u32 sub_resource; + u32 x, y, z; +}; + +/** + * vmw_surface_subres - Compute the subresource from layer and mipmap. + * @cache: Surface layout data. + * @mip_level: The mipmap level. + * @layer: The surface layer (face or array slice). + * + * Return: The subresource. + */ +static inline u32 vmw_surface_subres(const struct vmw_surface_cache *cache, + u32 mip_level, u32 layer) +{ + return cache->num_mip_levels * layer + mip_level; +} + +/** + * vmw_surface_setup_cache - Build a surface cache entry + * @size: The surface base level dimensions. + * @format: The surface format. + * @num_mip_levels: Number of mipmap levels. + * @num_layers: Number of layers. + * @cache: Pointer to a struct vmw_surface_cach object to be filled in. + * + * Return: Zero on success, -EINVAL on invalid surface layout. + */ +static inline int vmw_surface_setup_cache(const struct drm_vmw_size *size, + SVGA3dSurfaceFormat format, + u32 num_mip_levels, + u32 num_layers, + u32 num_samples, + struct vmw_surface_cache *cache) +{ + const SVGA3dSurfaceDesc *desc; + u32 i; + + memset(cache, 0, sizeof(*cache)); + cache->desc = desc = vmw_surface_get_desc(format); + cache->num_mip_levels = num_mip_levels; + cache->num_layers = num_layers; + for (i = 0; i < cache->num_mip_levels; i++) { + struct vmw_surface_mip *mip = &cache->mip[i]; + + mip->size = vmw_surface_get_mip_size(*size, i); + mip->bytes = vmw_surface_get_image_buffer_size + (desc, &mip->size, 0); + mip->row_stride = + __KERNEL_DIV_ROUND_UP(mip->size.width, + desc->blockSize.width) * + desc->bytesPerBlock * num_samples; + if (!mip->row_stride) + goto invalid_dim; + + mip->img_stride = + __KERNEL_DIV_ROUND_UP(mip->size.height, + desc->blockSize.height) * + mip->row_stride; + if (!mip->img_stride) + goto invalid_dim; + + cache->mip_chain_bytes += mip->bytes; + } + cache->sheet_bytes = cache->mip_chain_bytes * num_layers; + if (!cache->sheet_bytes) + goto invalid_dim; + + return 0; + +invalid_dim: + VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n"); + return -EINVAL; +} + +/** + * vmw_surface_get_loc - Get a surface location from an offset into the + * backing store + * @cache: Surface layout data. + * @loc: Pointer to a struct vmw_surface_loc to be filled in. + * @offset: Offset into the surface backing store. + */ +static inline void +vmw_surface_get_loc(const struct vmw_surface_cache *cache, + struct vmw_surface_loc *loc, + size_t offset) +{ + const struct vmw_surface_mip *mip = &cache->mip[0]; + const SVGA3dSurfaceDesc *desc = cache->desc; + u32 layer; + int i; + + loc->sheet = offset / cache->sheet_bytes; + offset -= loc->sheet * cache->sheet_bytes; + + layer = offset / cache->mip_chain_bytes; + offset -= layer * cache->mip_chain_bytes; + for (i = 0; i < cache->num_mip_levels; ++i, ++mip) { + if (mip->bytes > offset) + break; + offset -= mip->bytes; + } + + loc->sub_resource = vmw_surface_subres(cache, i, layer); + loc->z = offset / mip->img_stride; + offset -= loc->z * mip->img_stride; + loc->z *= desc->blockSize.depth; + loc->y = offset / mip->row_stride; + offset -= loc->y * mip->row_stride; + loc->y *= desc->blockSize.height; + loc->x = offset / desc->bytesPerBlock; + loc->x *= desc->blockSize.width; +} + +/** + * vmw_surface_inc_loc - Clamp increment a surface location with one block + * size + * in each dimension. + * @loc: Pointer to a struct vmw_surface_loc to be incremented. + * + * When computing the size of a range as size = end - start, the range does not + * include the end element. However a location representing the last byte + * of a touched region in the backing store *is* included in the range. + * This function modifies such a location to match the end definition + * given as start + size which is the one used in a SVGA3dBox. + */ +static inline void +vmw_surface_inc_loc(const struct vmw_surface_cache *cache, + struct vmw_surface_loc *loc) +{ + const SVGA3dSurfaceDesc *desc = cache->desc; + u32 mip = loc->sub_resource % cache->num_mip_levels; + const struct drm_vmw_size *size = &cache->mip[mip].size; + + loc->sub_resource++; + loc->x += desc->blockSize.width; + if (loc->x > size->width) + loc->x = size->width; + loc->y += desc->blockSize.height; + if (loc->y > size->height) + loc->y = size->height; + loc->z += desc->blockSize.depth; + if (loc->z > size->depth) + loc->z = size->depth; +} + +/** + * vmw_surface_min_loc - The start location in a subresource + * @cache: Surface layout data. + * @sub_resource: The subresource. + * @loc: Pointer to a struct vmw_surface_loc to be filled in. + */ +static inline void +vmw_surface_min_loc(const struct vmw_surface_cache *cache, + u32 sub_resource, + struct vmw_surface_loc *loc) +{ + loc->sheet = 0; + loc->sub_resource = sub_resource; + loc->x = loc->y = loc->z = 0; +} + +/** + * vmw_surface_min_loc - The end location in a subresource + * @cache: Surface layout data. + * @sub_resource: The subresource. + * @loc: Pointer to a struct vmw_surface_loc to be filled in. + * + * Following the end definition given in vmw_surface_inc_loc(), + * Compute the end location of a surface subresource. + */ +static inline void +vmw_surface_max_loc(const struct vmw_surface_cache *cache, + u32 sub_resource, + struct vmw_surface_loc *loc) +{ + const struct drm_vmw_size *size; + u32 mip; + + loc->sheet = 0; + loc->sub_resource = sub_resource + 1; + mip = sub_resource % cache->num_mip_levels; + size = &cache->mip[mip].size; + loc->x = size->width; + loc->y = size->height; + loc->z = size->depth; +} + + +#endif /* VMW_SURFACE_CACHE_H */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index 05b324825900..6f27d69bad0e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -715,7 +715,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind) * without checking which bindings actually need to be emitted * * @cbs: Pointer to the context's struct vmw_ctx_binding_state - * @bi: Pointer to where the binding info array is stored in @cbs + * @biv: Pointer to where the binding info array is stored in @cbs * @max_num: Maximum number of entries in the @bi array. * * Scans the @bi array for bindings and builds a buffer of view id data. @@ -725,11 +725,9 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind) * contains the command data. */ static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs, - const struct vmw_ctx_bindinfo *bi, + const struct vmw_ctx_bindinfo_view *biv, u32 max_num) { - const struct vmw_ctx_bindinfo_view *biv = - container_of(bi, struct vmw_ctx_bindinfo_view, bi); unsigned long i; cbs->bind_cmd_count = 0; @@ -838,7 +836,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs, */ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) { - const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi; + const struct vmw_ctx_bindinfo_view *loc = &cbs->render_targets[0]; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetRenderTargets body; @@ -846,7 +844,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) size_t cmd_size, view_id_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); - vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS); + vmw_collect_view_ids(cbs, loc, SVGA3D_DX_MAX_RENDER_TARGETS); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); @@ -874,7 +872,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) * without checking which bindings actually need to be emitted * * @cbs: Pointer to the context's struct vmw_ctx_binding_state - * @bi: Pointer to where the binding info array is stored in @cbs + * @biso: Pointer to where the binding info array is stored in @cbs * @max_num: Maximum number of entries in the @bi array. * * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data. @@ -884,11 +882,9 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) * contains the command data. */ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs, - const struct vmw_ctx_bindinfo *bi, + const struct vmw_ctx_bindinfo_so_target *biso, u32 max_num) { - const struct vmw_ctx_bindinfo_so_target *biso = - container_of(bi, struct vmw_ctx_bindinfo_so_target, bi); unsigned long i; SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer; @@ -919,7 +915,7 @@ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs, */ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs) { - const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi; + const struct vmw_ctx_bindinfo_so_target *loc = &cbs->so_targets[0]; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetSOTargets body; @@ -1066,7 +1062,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs) static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs) { - const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[0].views[0].bi; + const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[0].views[0]; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetUAViews body; @@ -1096,7 +1092,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs) static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs) { - const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[1].views[0].bi; + const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[1].views[0]; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetCSUAViews body; @@ -1444,7 +1440,7 @@ u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type) static void vmw_binding_build_asserts(void) { BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3); - BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX); + BUILD_BUG_ON(SVGA3D_DX_MAX_RENDER_TARGETS > SVGA3D_RT_MAX); BUILD_BUG_ON(sizeof(uint32) != sizeof(u32)); /* diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index 956b85e35cef..67db472d3493 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -30,6 +30,7 @@ #include <drm/ttm/ttm_placement.h> #include "vmwgfx_drv.h" +#include "vmwgfx_devcaps.h" bool vmw_supports_3d(struct vmw_private *dev_priv) { @@ -45,10 +46,7 @@ bool vmw_supports_3d(struct vmw_private *dev_priv) if (!dev_priv->has_mob) return false; - spin_lock(&dev_priv->cap_lock); - vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); - result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); - spin_unlock(&dev_priv->cap_lock); + result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D); return (result != 0); } @@ -142,7 +140,8 @@ struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv) min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES); - DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n", + drm_info(&dev_priv->drm, + "Fifo max 0x%08x min 0x%08x cap 0x%08x\n", (unsigned int) max, (unsigned int) min, (unsigned int) fifo->capabilities); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 6bb4961e64a5..81b84570da0a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -516,7 +516,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) struct vmw_cmdbuf_man *man = container_of(work, struct vmw_cmdbuf_man, work); struct vmw_cmdbuf_header *entry, *next; - uint32_t dummy; + uint32_t dummy = 0; bool send_fence = false; struct list_head restart_head[SVGA_CB_CONTEXT_MAX]; int i; @@ -1272,7 +1272,8 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) * submissions to be able to free up space. */ man->default_size = VMW_CMDBUF_INLINE_SIZE; - DRM_INFO("Using command buffers with %s pool.\n", + drm_info(&dev_priv->drm, + "Using command buffers with %s pool.\n", (man->using_mob) ? "MOB" : "DMA"); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index b262d61d839d..9487faff5229 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -159,6 +159,7 @@ void vmw_cmdbuf_res_commit(struct list_head *list) void vmw_cmdbuf_res_revert(struct list_head *list) { struct vmw_cmdbuf_res *entry, *next; + int ret; list_for_each_entry_safe(entry, next, list, head) { switch (entry->state) { @@ -166,7 +167,8 @@ void vmw_cmdbuf_res_revert(struct list_head *list) vmw_cmdbuf_res_free(entry->man, entry); break; case VMW_CMDBUF_RES_DEL: - drm_ht_insert_item(&entry->man->resources, &entry->hash); + ret = drm_ht_insert_item(&entry->man->resources, &entry->hash); + BUG_ON(ret); list_del(&entry->head); list_add_tail(&entry->head, &entry->man->list); entry->state = VMW_CMDBUF_RES_COMMITTED; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index dffe3804ad3e..4446758b6880 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -185,7 +185,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, container_of(res, struct vmw_user_context, res); res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) : - SVGA3D_CONTEXT_DATA_SIZE); + sizeof(SVGAGBContextData)); ret = vmw_resource_init(dev_priv, res, true, res_free, dx ? &vmw_dx_context_func : @@ -259,7 +259,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, goto out_early; } - if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { + if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) { DRM_ERROR("Out of hw context ids.\n"); vmw_resource_unreference(&res); return -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c new file mode 100644 index 000000000000..829df395c2ed --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright 2021 VMware, Inc., Palo Alto, CA., USA + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_devcaps.h" + +#include "vmwgfx_drv.h" + + +struct svga_3d_compat_cap { + SVGA3dFifoCapsRecordHeader header; + SVGA3dFifoCapPair pairs[SVGA3D_DEVCAP_MAX]; +}; + + +static u32 vmw_mask_legacy_multisample(unsigned int cap, u32 fmt_value) +{ + /* + * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES + * to check the sample count supported by virtual device. Since there + * never was support for multisample count for backing MOB return 0. + * + * MULTISAMPLE_MASKABLESAMPLES devcap is marked as deprecated by virtual + * device. + */ + if (cap == SVGA3D_DEVCAP_DEAD5) + return 0; + + return fmt_value; +} + +static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, + size_t size) +{ + struct svga_3d_compat_cap *compat_cap = + (struct svga_3d_compat_cap *) bounce; + unsigned int i; + size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs); + unsigned int max_size; + + if (size < pair_offset) + return -EINVAL; + + max_size = (size - pair_offset) / sizeof(SVGA3dFifoCapPair); + + if (max_size > SVGA3D_DEVCAP_MAX) + max_size = SVGA3D_DEVCAP_MAX; + + compat_cap->header.length = + (pair_offset + max_size * sizeof(SVGA3dFifoCapPair)) / sizeof(u32); + compat_cap->header.type = SVGA3D_FIFO_CAPS_RECORD_DEVCAPS; + + for (i = 0; i < max_size; ++i) { + compat_cap->pairs[i][0] = i; + compat_cap->pairs[i][1] = vmw_mask_legacy_multisample + (i, dev_priv->devcaps[i]); + } + + return 0; +} + +int vmw_devcaps_create(struct vmw_private *vmw) +{ + bool gb_objects = !!(vmw->capabilities & SVGA_CAP_GBOBJECTS); + uint32_t i; + + if (gb_objects) { + vmw->devcaps = vzalloc(sizeof(uint32_t) * SVGA3D_DEVCAP_MAX); + if (!vmw->devcaps) + return -ENOMEM; + for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { + vmw_write(vmw, SVGA_REG_DEV_CAP, i); + vmw->devcaps[i] = vmw_read(vmw, SVGA_REG_DEV_CAP); + } + } + return 0; +} + +void vmw_devcaps_destroy(struct vmw_private *vmw) +{ + vfree(vmw->devcaps); + vmw->devcaps = NULL; +} + + +uint32 vmw_devcaps_size(const struct vmw_private *vmw, + bool gb_aware) +{ + bool gb_objects = !!(vmw->capabilities & SVGA_CAP_GBOBJECTS); + if (gb_objects && gb_aware) + return SVGA3D_DEVCAP_MAX * sizeof(uint32_t); + else if (gb_objects) + return sizeof(struct svga_3d_compat_cap) + + sizeof(uint32_t); + else if (vmw->fifo_mem != NULL) + return (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) * + sizeof(uint32_t); + else + return 0; +} + +int vmw_devcaps_copy(struct vmw_private *vmw, bool gb_aware, + void *dst, uint32_t dst_size) +{ + int ret; + bool gb_objects = !!(vmw->capabilities & SVGA_CAP_GBOBJECTS); + if (gb_objects && gb_aware) { + memcpy(dst, vmw->devcaps, dst_size); + } else if (gb_objects) { + ret = vmw_fill_compat_cap(vmw, dst, dst_size); + if (unlikely(ret != 0)) + return ret; + } else if (vmw->fifo_mem) { + u32 *fifo_mem = vmw->fifo_mem; + memcpy(dst, &fifo_mem[SVGA_FIFO_3D_CAPS], dst_size); + } else + return -EINVAL; + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.h b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.h new file mode 100644 index 000000000000..f70e923ac3e6 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright 2021 VMware, Inc., Palo Alto, CA., USA + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef _VMWGFX_DEVCAPS_H_ +#define _VMWGFX_DEVCAPS_H_ + +#include "vmwgfx_drv.h" + +#include "device_include/svga_reg.h" + +int vmw_devcaps_create(struct vmw_private *vmw); +void vmw_devcaps_destroy(struct vmw_private *vmw); +uint32_t vmw_devcaps_size(const struct vmw_private *vmw, bool gb_aware); +int vmw_devcaps_copy(struct vmw_private *vmw, bool gb_aware, + void *dst, uint32_t dst_size); + +static inline uint32_t vmw_devcap_get(struct vmw_private *vmw, + uint32_t devcap) +{ + bool gb_objects = !!(vmw->capabilities & SVGA_CAP_GBOBJECTS); + if (gb_objects) + return vmw->devcaps[devcap]; + return 0; +} + +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 45aeeca9b8f6..ab9a1750e1df 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -42,7 +42,9 @@ #include "ttm_object.h" #include "vmwgfx_binding.h" +#include "vmwgfx_devcaps.h" #include "vmwgfx_drv.h" +#include "vmwgfx_mksstat.h" #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" @@ -148,102 +150,111 @@ #define DRM_IOCTL_VMW_MSG \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ struct drm_vmw_msg_arg) - -/* - * The core DRM version of this macro doesn't account for - * DRM_COMMAND_BASE. - */ - -#define VMW_IOCTL_DEF(ioctl, func, flags) \ - [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} +#define DRM_IOCTL_VMW_MKSSTAT_RESET \ + DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET) +#define DRM_IOCTL_VMW_MKSSTAT_ADD \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD, \ + struct drm_vmw_mksstat_add_arg) +#define DRM_IOCTL_VMW_MKSSTAT_REMOVE \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE, \ + struct drm_vmw_mksstat_remove_arg) /* * Ioctl definitions. */ static const struct drm_ioctl_desc vmw_ioctls[] = { - VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, - vmw_kms_cursor_bypass_ioctl, - DRM_MASTER), - - VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, - DRM_MASTER), - VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, - DRM_MASTER), - VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, - DRM_MASTER), - - VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, - vmw_fence_obj_signaled_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, - DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS, + vmw_kms_cursor_bypass_ioctl, + DRM_MASTER), + + DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM, vmw_overlay_ioctl, + DRM_MASTER), + DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, + DRM_MASTER), + DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, + DRM_MASTER), + + DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE, vmw_surface_reference_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_EXECBUF, vmw_execbuf_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED, + vmw_fence_obj_signaled_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT, vmw_fence_event_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, + DRM_RENDER_ALLOW), /* these allow direct access to the framebuffers mark as master only */ - VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, - DRM_MASTER | DRM_AUTH), - VMW_IOCTL_DEF(VMW_PRESENT_READBACK, - vmw_present_readback_ioctl, - DRM_MASTER | DRM_AUTH), + DRM_IOCTL_DEF_DRV(VMW_PRESENT, vmw_present_ioctl, + DRM_MASTER | DRM_AUTH), + DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK, + vmw_present_readback_ioctl, + DRM_MASTER | DRM_AUTH), /* * The permissions of the below ioctl are overridden in * vmw_generic_ioctl(). We require either * DRM_MASTER or capable(CAP_SYS_ADMIN). */ - VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, - vmw_kms_update_layout_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_CREATE_SHADER, - vmw_shader_define_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_UNREF_SHADER, - vmw_shader_destroy_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, - vmw_gb_surface_define_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, - vmw_gb_surface_reference_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_SYNCCPU, - vmw_user_bo_synccpu_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, - vmw_extended_context_define_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT, - vmw_gb_surface_define_ext_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT, - vmw_gb_surface_reference_ext_ioctl, - DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_MSG, - vmw_msg_ioctl, - DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT, + vmw_kms_update_layout_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER, + vmw_shader_define_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER, + vmw_shader_destroy_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE, + vmw_gb_surface_define_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF, + vmw_gb_surface_reference_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_SYNCCPU, + vmw_user_bo_synccpu_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT, + vmw_extended_context_define_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT, + vmw_gb_surface_define_ext_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT, + vmw_gb_surface_reference_ext_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_MSG, + vmw_msg_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET, + vmw_mksstat_reset_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD, + vmw_mksstat_add_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE, + vmw_mksstat_remove_ioctl, + DRM_RENDER_ALLOW), }; static const struct pci_device_id vmw_pci_id_list[] = { @@ -254,7 +265,6 @@ static const struct pci_device_id vmw_pci_id_list[] = { MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); -static int vmw_force_iommu; static int vmw_restrict_iommu; static int vmw_force_coherent; static int vmw_restrict_dma_mask; @@ -266,8 +276,6 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); module_param_named(enable_fbdev, enable_fbdev, int, 0600); -MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); -module_param_named(force_dma_api, vmw_force_iommu, int, 0600); MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); @@ -278,62 +286,92 @@ MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); -static void vmw_print_capabilities2(uint32_t capabilities2) +struct bitmap_name { + uint32 value; + const char *name; +}; + +static const struct bitmap_name cap1_names[] = { + { SVGA_CAP_RECT_COPY, "rect copy" }, + { SVGA_CAP_CURSOR, "cursor" }, + { SVGA_CAP_CURSOR_BYPASS, "cursor bypass" }, + { SVGA_CAP_CURSOR_BYPASS_2, "cursor bypass 2" }, + { SVGA_CAP_8BIT_EMULATION, "8bit emulation" }, + { SVGA_CAP_ALPHA_CURSOR, "alpha cursor" }, + { SVGA_CAP_3D, "3D" }, + { SVGA_CAP_EXTENDED_FIFO, "extended fifo" }, + { SVGA_CAP_MULTIMON, "multimon" }, + { SVGA_CAP_PITCHLOCK, "pitchlock" }, + { SVGA_CAP_IRQMASK, "irq mask" }, + { SVGA_CAP_DISPLAY_TOPOLOGY, "display topology" }, + { SVGA_CAP_GMR, "gmr" }, + { SVGA_CAP_TRACES, "traces" }, + { SVGA_CAP_GMR2, "gmr2" }, + { SVGA_CAP_SCREEN_OBJECT_2, "screen object 2" }, + { SVGA_CAP_COMMAND_BUFFERS, "command buffers" }, + { SVGA_CAP_CMD_BUFFERS_2, "command buffers 2" }, + { SVGA_CAP_GBOBJECTS, "gbobject" }, + { SVGA_CAP_DX, "dx" }, + { SVGA_CAP_HP_CMD_QUEUE, "hp cmd queue" }, + { SVGA_CAP_NO_BB_RESTRICTION, "no bb restriction" }, + { SVGA_CAP_CAP2_REGISTER, "cap2 register" }, +}; + + +static const struct bitmap_name cap2_names[] = { + { SVGA_CAP2_GROW_OTABLE, "grow otable" }, + { SVGA_CAP2_INTRA_SURFACE_COPY, "intra surface copy" }, + { SVGA_CAP2_DX2, "dx2" }, + { SVGA_CAP2_GB_MEMSIZE_2, "gb memsize 2" }, + { SVGA_CAP2_SCREENDMA_REG, "screendma reg" }, + { SVGA_CAP2_OTABLE_PTDEPTH_2, "otable ptdepth2" }, + { SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT, "non ms to ms stretchblt" }, + { SVGA_CAP2_CURSOR_MOB, "cursor mob" }, + { SVGA_CAP2_MSHINT, "mshint" }, + { SVGA_CAP2_CB_MAX_SIZE_4MB, "cb max size 4mb" }, + { SVGA_CAP2_DX3, "dx3" }, + { SVGA_CAP2_FRAME_TYPE, "frame type" }, + { SVGA_CAP2_COTABLE_COPY, "cotable copy" }, + { SVGA_CAP2_TRACE_FULL_FB, "trace full fb" }, + { SVGA_CAP2_EXTRA_REGS, "extra regs" }, + { SVGA_CAP2_LO_STAGING, "lo staging" }, +}; + +static void vmw_print_bitmap(struct drm_device *drm, + const char *prefix, uint32_t bitmap, + const struct bitmap_name *bnames, + uint32_t num_names) { - DRM_INFO("Capabilities2:\n"); - if (capabilities2 & SVGA_CAP2_GROW_OTABLE) - DRM_INFO(" Grow oTable.\n"); - if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY) - DRM_INFO(" IntraSurface copy.\n"); - if (capabilities2 & SVGA_CAP2_DX3) - DRM_INFO(" DX3.\n"); + char buf[512]; + uint32_t i; + uint32_t offset = 0; + for (i = 0; i < num_names; ++i) { + if ((bitmap & bnames[i].value) != 0) { + offset += snprintf(buf + offset, + ARRAY_SIZE(buf) - offset, + "%s, ", bnames[i].name); + bitmap &= ~bnames[i].value; + } + } + + drm_info(drm, "%s: %s\n", prefix, buf); + if (bitmap != 0) + drm_dbg(drm, "%s: unknown enums: %x\n", prefix, bitmap); } -static void vmw_print_capabilities(uint32_t capabilities) + +static void vmw_print_sm_type(struct vmw_private *dev_priv) { - DRM_INFO("Capabilities:\n"); - if (capabilities & SVGA_CAP_RECT_COPY) - DRM_INFO(" Rect copy.\n"); - if (capabilities & SVGA_CAP_CURSOR) - DRM_INFO(" Cursor.\n"); - if (capabilities & SVGA_CAP_CURSOR_BYPASS) - DRM_INFO(" Cursor bypass.\n"); - if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) - DRM_INFO(" Cursor bypass 2.\n"); - if (capabilities & SVGA_CAP_8BIT_EMULATION) - DRM_INFO(" 8bit emulation.\n"); - if (capabilities & SVGA_CAP_ALPHA_CURSOR) - DRM_INFO(" Alpha cursor.\n"); - if (capabilities & SVGA_CAP_3D) - DRM_INFO(" 3D.\n"); - if (capabilities & SVGA_CAP_EXTENDED_FIFO) - DRM_INFO(" Extended Fifo.\n"); - if (capabilities & SVGA_CAP_MULTIMON) - DRM_INFO(" Multimon.\n"); - if (capabilities & SVGA_CAP_PITCHLOCK) - DRM_INFO(" Pitchlock.\n"); - if (capabilities & SVGA_CAP_IRQMASK) - DRM_INFO(" Irq mask.\n"); - if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) - DRM_INFO(" Display Topology.\n"); - if (capabilities & SVGA_CAP_GMR) - DRM_INFO(" GMR.\n"); - if (capabilities & SVGA_CAP_TRACES) - DRM_INFO(" Traces.\n"); - if (capabilities & SVGA_CAP_GMR2) - DRM_INFO(" GMR2.\n"); - if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) - DRM_INFO(" Screen Object 2.\n"); - if (capabilities & SVGA_CAP_COMMAND_BUFFERS) - DRM_INFO(" Command Buffers.\n"); - if (capabilities & SVGA_CAP_CMD_BUFFERS_2) - DRM_INFO(" Command Buffers 2.\n"); - if (capabilities & SVGA_CAP_GBOBJECTS) - DRM_INFO(" Guest Backed Resources.\n"); - if (capabilities & SVGA_CAP_DX) - DRM_INFO(" DX Features.\n"); - if (capabilities & SVGA_CAP_HP_CMD_QUEUE) - DRM_INFO(" HP Command Queue.\n"); + static const char *names[] = { + [VMW_SM_LEGACY] = "Legacy", + [VMW_SM_4] = "SM4", + [VMW_SM_4_1] = "SM4_1", + [VMW_SM_5] = "SM_5", + [VMW_SM_MAX] = "Invalid" + }; + BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1)); + drm_info(&dev_priv->drm, "Available shader model: %s.\n", + names[dev_priv->sm_type]); } /** @@ -400,10 +438,6 @@ static int vmw_device_init(struct vmw_private *dev_priv) { bool uses_fb_traces = false; - DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); - DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); - DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); - dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); @@ -627,7 +661,6 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) static int vmw_dma_select_mode(struct vmw_private *dev_priv) { static const char *names[vmw_dma_map_max] = { - [vmw_dma_phys] = "Using physical TTM page addresses.", [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_map_populate] = "Caching DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early."}; @@ -643,7 +676,8 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) else dev_priv->map_mode = vmw_dma_map_populate; - DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); + drm_info(&dev_priv->drm, + "DMA map mode: %s\n", names[dev_priv->map_mode]); return 0; } @@ -661,9 +695,9 @@ static int vmw_dma_masks(struct vmw_private *dev_priv) int ret = 0; ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); - if (dev_priv->map_mode != vmw_dma_phys && - (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { - DRM_INFO("Restricting DMA addresses to 44 bits.\n"); + if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) { + drm_info(&dev_priv->drm, + "Restricting DMA addresses to 44 bits.\n"); return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); } @@ -693,7 +727,7 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv) } static int vmw_setup_pci_resources(struct vmw_private *dev, - unsigned long pci_id) + u32 pci_id) { resource_size_t rmmio_start; resource_size_t rmmio_size; @@ -715,13 +749,15 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, dev->vram_start = pci_resource_start(pdev, 2); dev->vram_size = pci_resource_len(pdev, 2); - DRM_INFO("Register MMIO at 0x%pa size is %llu kiB\n", + drm_info(&dev->drm, + "Register MMIO at 0x%pa size is %llu kiB\n", &rmmio_start, (uint64_t)rmmio_size / 1024); dev->rmmio = devm_ioremap(dev->drm.dev, rmmio_start, rmmio_size); if (!dev->rmmio) { - DRM_ERROR("Failed mapping registers mmio memory.\n"); + drm_err(&dev->drm, + "Failed mapping registers mmio memory.\n"); pci_release_regions(pdev); return -ENOMEM; } @@ -732,7 +768,8 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, fifo_start = pci_resource_start(pdev, 2); fifo_size = pci_resource_len(pdev, 2); - DRM_INFO("FIFO at %pa size is %llu kiB\n", + drm_info(&dev->drm, + "FIFO at %pa size is %llu kiB\n", &fifo_start, (uint64_t)fifo_size / 1024); dev->fifo_mem = devm_memremap(dev->drm.dev, fifo_start, @@ -740,7 +777,8 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, MEMREMAP_WB); if (IS_ERR(dev->fifo_mem)) { - DRM_ERROR("Failed mapping FIFO memory.\n"); + drm_err(&dev->drm, + "Failed mapping FIFO memory.\n"); pci_release_regions(pdev); return PTR_ERR(dev->fifo_mem); } @@ -755,7 +793,8 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, * size will be equal to or bigger than the size reported by * SVGA_REG_VRAM_SIZE. */ - DRM_INFO("VRAM at %pa size is %llu kiB\n", + drm_info(&dev->drm, + "VRAM at %pa size is %llu kiB\n", &dev->vram_start, (uint64_t)dev->vram_size / 1024); return 0; @@ -769,12 +808,14 @@ static int vmw_detect_version(struct vmw_private *dev) SVGA_ID_3 : SVGA_ID_2); svga_id = vmw_read(dev, SVGA_REG_ID); if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) { - DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n", - svga_id, dev->vmw_chipset); + drm_err(&dev->drm, + "Unsupported SVGA ID 0x%x on chipset 0x%x\n", + svga_id, dev->pci_id); return -ENOSYS; } BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3)); - DRM_INFO("Running on SVGA version %d.\n", (svga_id & 0xff)); + drm_info(&dev->drm, + "Running on SVGA version %d.\n", (svga_id & 0xff)); return 0; } @@ -785,7 +826,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) bool refuse_dma = false; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - dev_priv->vmw_chipset = pci_id; dev_priv->drm.dev_private = dev_priv; mutex_init(&dev_priv->cmdbuf_mutex); @@ -793,7 +833,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) spin_lock_init(&dev_priv->resource_lock); spin_lock_init(&dev_priv->hw_lock); spin_lock_init(&dev_priv->waiter_lock); - spin_lock_init(&dev_priv->cap_lock); spin_lock_init(&dev_priv->cursor_lock); ret = vmw_setup_pci_resources(dev_priv, pci_id); @@ -830,10 +869,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) ret = vmw_dma_select_mode(dev_priv); if (unlikely(ret != 0)) { - DRM_INFO("Restricting capabilities since DMA not available.\n"); + drm_info(&dev_priv->drm, + "Restricting capabilities since DMA not available.\n"); refuse_dma = true; if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) - DRM_INFO("Disabling 3D acceleration.\n"); + drm_info(&dev_priv->drm, + "Disabling 3D acceleration.\n"); } dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); @@ -879,9 +920,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) mem_size *= 3; dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; - dev_priv->prim_bb_mem = - vmw_read(dev_priv, - SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); + dev_priv->max_primary_mem = + vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM); dev_priv->max_mob_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); dev_priv->stdu_max_width = @@ -900,14 +940,25 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) } else { dev_priv->texture_max_width = 8192; dev_priv->texture_max_height = 8192; - dev_priv->prim_bb_mem = dev_priv->vram_size; + dev_priv->max_primary_mem = dev_priv->vram_size; } - - vmw_print_capabilities(dev_priv->capabilities); + drm_info(&dev_priv->drm, + "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n", + (u64)dev_priv->vram_size / 1024, + (u64)dev_priv->fifo_mem_size / 1024, + dev_priv->memory_size / 1024); + + drm_info(&dev_priv->drm, + "MOB limits: max mob size = %u kB, max mob pages = %u\n", + dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages); + + vmw_print_bitmap(&dev_priv->drm, "Capabilities", + dev_priv->capabilities, + cap1_names, ARRAY_SIZE(cap1_names)); if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) - vmw_print_capabilities2(dev_priv->capabilities2); - DRM_INFO("Supports command queues = %d\n", - vmw_cmd_supported((dev_priv))); + vmw_print_bitmap(&dev_priv->drm, "Capabilities2", + dev_priv->capabilities2, + cap2_names, ARRAY_SIZE(cap2_names)); ret = vmw_dma_masks(dev_priv); if (unlikely(ret != 0)) @@ -916,15 +967,16 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX); if (dev_priv->capabilities & SVGA_CAP_GMR2) { - DRM_INFO("Max GMR ids is %u\n", + drm_info(&dev_priv->drm, + "Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); - DRM_INFO("Max number of GMR pages is %u\n", + drm_info(&dev_priv->drm, + "Max number of GMR pages is %u\n", (unsigned)dev_priv->max_gmr_pages); - DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", - (unsigned)dev_priv->memory_size / 1024); } - DRM_INFO("Maximum display memory size is %llu kiB\n", - (uint64_t)dev_priv->prim_bb_mem / 1024); + drm_info(&dev_priv->drm, + "Maximum display memory size is %llu kiB\n", + (uint64_t)dev_priv->max_primary_mem / 1024); /* Need mmio memory to check for fifo pitchlock cap. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && @@ -939,7 +991,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) &vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { - DRM_ERROR("Unable to initialize TTM object management.\n"); + drm_err(&dev_priv->drm, + "Unable to initialize TTM object management.\n"); ret = -ENOMEM; goto out_err0; } @@ -947,7 +1000,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { ret = vmw_irq_install(&dev_priv->drm, pdev->irq); if (ret != 0) { - DRM_ERROR("Failed installing irq: %d\n", ret); + drm_err(&dev_priv->drm, + "Failed installing irq: %d\n", ret); goto out_no_irq; } } @@ -968,7 +1022,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->map_mode == vmw_dma_alloc_coherent, false); if (unlikely(ret != 0)) { - DRM_ERROR("Failed initializing TTM buffer object driver.\n"); + drm_err(&dev_priv->drm, + "Failed initializing TTM buffer object driver.\n"); goto out_no_bdev; } @@ -979,7 +1034,15 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) ret = vmw_vram_manager_init(dev_priv); if (unlikely(ret != 0)) { - DRM_ERROR("Failed initializing memory manager for VRAM.\n"); + drm_err(&dev_priv->drm, + "Failed initializing memory manager for VRAM.\n"); + goto out_no_vram; + } + + ret = vmw_devcaps_create(dev_priv); + if (unlikely(ret != 0)) { + drm_err(&dev_priv->drm, + "Failed initializing device caps.\n"); goto out_no_vram; } @@ -993,7 +1056,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || refuse_dma || vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) { - DRM_INFO("No GMR memory available. " + drm_info(&dev_priv->drm, + "No GMR memory available. " "Graphics memory resources are very limited.\n"); dev_priv->has_gmr = false; } @@ -1002,18 +1066,16 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->has_mob = true; if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) { - DRM_INFO("No MOB memory available. " + drm_info(&dev_priv->drm, + "No MOB memory available. " "3D will be disabled.\n"); dev_priv->has_mob = false; } } if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { - spin_lock(&dev_priv->cap_lock); - vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT); - if (vmw_read(dev_priv, SVGA_REG_DEV_CAP)) + if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT)) dev_priv->sm_type = VMW_SM_4; - spin_unlock(&dev_priv->cap_lock); } vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); @@ -1021,15 +1083,11 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ if (has_sm4_context(dev_priv) && (dev_priv->capabilities2 & SVGA_CAP2_DX2)) { - vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41); - - if (vmw_read(dev_priv, SVGA_REG_DEV_CAP)) + if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41)) dev_priv->sm_type = VMW_SM_4_1; - if (has_sm4_1_context(dev_priv) && - (dev_priv->capabilities2 & SVGA_CAP2_DX3)) { - vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5); - if (vmw_read(dev_priv, SVGA_REG_DEV_CAP)) + (dev_priv->capabilities2 & SVGA_CAP2_DX3)) { + if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) dev_priv->sm_type = VMW_SM_5; } } @@ -1043,14 +1101,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) if (ret) goto out_no_fifo; - if (dev_priv->sm_type == VMW_SM_5) - DRM_INFO("SM5 support available.\n"); - if (dev_priv->sm_type == VMW_SM_4_1) - DRM_INFO("SM4_1 support available.\n"); - if (dev_priv->sm_type == VMW_SM_4) - DRM_INFO("SM4 support available.\n"); - DRM_INFO("Running without reservation semaphore\n"); - + vmw_print_sm_type(dev_priv); vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)", VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE); @@ -1074,6 +1125,7 @@ out_no_kms: vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); if (dev_priv->has_gmr) vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); + vmw_devcaps_destroy(dev_priv); vmw_vram_manager_fini(dev_priv); out_no_vram: ttm_device_fini(&dev_priv->bdev); @@ -1122,6 +1174,7 @@ static void vmw_driver_unload(struct drm_device *dev) vmw_release_device_early(dev_priv); if (dev_priv->has_mob) vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); + vmw_devcaps_destroy(dev_priv); vmw_vram_manager_fini(dev_priv); ttm_device_fini(&dev_priv->bdev); drm_vma_offset_manager_destroy(&dev_priv->vma_manager); @@ -1137,6 +1190,8 @@ static void vmw_driver_unload(struct drm_device *dev) for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); + vmw_mksstat_remove_all(dev_priv); + pci_release_regions(pdev); } @@ -1560,7 +1615,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct vmw_private *vmw; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); if (ret) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index d1cef3b69e9d..63437270cbca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2021 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -54,10 +54,10 @@ #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20210218" +#define VMWGFX_DRIVER_DATE "20210722" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 18 -#define VMWGFX_DRIVER_PATCHLEVEL 1 +#define VMWGFX_DRIVER_MINOR 19 +#define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 #define VMWGFX_MAX_VALIDATIONS 2048 @@ -91,6 +91,9 @@ #define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_SHADER ttm_driver_type4 +#define MKSSTAT_CAPACITY_LOG2 5U +#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2) + struct vmw_fpriv { struct ttm_object_file *tfile; bool gb_aware; /* user-space is guest-backed aware */ @@ -311,7 +314,6 @@ struct vmw_res_cache_entry { * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. */ enum vmw_dma_map_mode { - vmw_dma_phys, /* Use physical page addresses */ vmw_dma_alloc_coherent, /* Use TTM coherent pages */ vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ vmw_dma_map_bind, /* Unmap from DMA just before unbind */ @@ -356,7 +358,6 @@ struct vmw_piter { unsigned long num_pages; bool (*next)(struct vmw_piter *); dma_addr_t (*dma_address)(struct vmw_piter *); - struct page *(*page)(struct vmw_piter *); }; /* @@ -366,7 +367,8 @@ enum vmw_display_unit_type { vmw_du_invalid = 0, vmw_du_legacy, vmw_du_screen_object, - vmw_du_screen_target + vmw_du_screen_target, + vmw_du_max }; struct vmw_validation_context; @@ -486,13 +488,12 @@ struct vmw_private { struct ttm_device bdev; struct drm_vma_offset_manager vma_manager; - unsigned long pci_id; - u32 vmw_chipset; + u32 pci_id; resource_size_t io_start; resource_size_t vram_start; resource_size_t vram_size; - resource_size_t prim_bb_mem; - void __iomem *rmmio; + resource_size_t max_primary_mem; + u32 __iomem *rmmio; u32 *fifo_mem; resource_size_t fifo_mem_size; uint32_t fb_max_width; @@ -513,7 +514,6 @@ struct vmw_private { bool has_gmr; bool has_mob; spinlock_t hw_lock; - spinlock_t cap_lock; bool assume_16bpp; enum vmw_sm_type sm_type; @@ -629,6 +629,20 @@ struct vmw_private { /* Validation memory reservation */ struct vmw_validation_mem vvm; + + uint32 *devcaps; + + /* + * mksGuestStat instance-descriptor and pid arrays + */ + struct page *mksstat_user_pages[MKSSTAT_CAPACITY]; + atomic_t mksstat_user_pids[MKSSTAT_CAPACITY]; + +#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) + struct page *mksstat_kern_pages[MKSSTAT_CAPACITY]; + u8 mksstat_kern_top_timer[MKSSTAT_CAPACITY]; + atomic_t mksstat_kern_pids[MKSSTAT_CAPACITY]; +#endif }; static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) @@ -1073,7 +1087,7 @@ static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) */ static inline struct page *vmw_piter_page(struct vmw_piter *viter) { - return viter->page(viter); + return viter->pages[viter->i]; } /** @@ -1502,6 +1516,17 @@ __printf(1, 2) int vmw_host_printf(const char *fmt, ...); int vmw_msg_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +/* Host mksGuestStats -vmwgfx_msg.c: */ +int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv); + +int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_mksstat_remove_all(struct vmw_private *dev_priv); + /* VMW logging */ /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index a2b8464b3f56..5f2ffa9de5c8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -32,6 +32,7 @@ #include <drm/ttm/ttm_placement.h> #include "vmwgfx_so.h" #include "vmwgfx_binding.h" +#include "vmwgfx_mksstat.h" #define VMW_RES_HT_ORDER 12 @@ -2364,7 +2365,7 @@ static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, sizeof(SVGA3dRenderTargetViewId); int ret; - if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) { + if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) { VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n"); return -EINVAL; } @@ -2546,6 +2547,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, so_type = vmw_so_cmd_to_type(header->id); res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); + if (IS_ERR(res)) + return PTR_ERR(res); cmd = container_of(header, typeof(*cmd), header); ret = vmw_cotable_notify(res, cmd->defined_id); @@ -4406,6 +4409,9 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, int ret; struct dma_fence *in_fence = NULL; + MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF); + MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF); + /* * Extend the ioctl argument while maintaining backwards compatibility: * We take different code paths depending on the value of arg->version. @@ -4415,7 +4421,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION || arg->version == 0)) { VMW_DEBUG_USER("Incorrect execbuf version.\n"); - return -EINVAL; + ret = -EINVAL; + goto mksstats_out; } switch (arg->version) { @@ -4435,7 +4442,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, if (!in_fence) { VMW_DEBUG_USER("Cannot get imported fence\n"); - return -EINVAL; + ret = -EINVAL; + goto mksstats_out; } ret = vmw_wait_dma_fence(dev_priv->fman, in_fence); @@ -4458,5 +4466,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, out: if (in_fence) dma_fence_put(in_fence); + +mksstats_out: + MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 28ceb749a733..b2c4af331c9d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -71,8 +71,40 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, if (gman->max_gmr_pages > 0) { gman->used_gmr_pages += (*res)->num_pages; - if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) - goto nospace; + /* + * Because the graphics memory is a soft limit we can try to + * expand it instead of letting the userspace apps crash. + * We're just going to have a sane limit (half of RAM) + * on the number of MOB's that we create and will try to keep + * the system running until we reach that. + */ + if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) { + const unsigned long max_graphics_pages = totalram_pages() / 2; + uint32_t new_max_pages = 0; + + DRM_WARN("vmwgfx: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n"); + vmw_host_printf("vmwgfx, warning: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n"); + + if (gman->max_gmr_pages > (max_graphics_pages / 2)) { + DRM_WARN("vmwgfx: guest requires more than half of RAM for graphics.\n"); + new_max_pages = max_graphics_pages; + } else + new_max_pages = gman->max_gmr_pages * 2; + if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) { + DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n", + ((new_max_pages) << (PAGE_SHIFT - 10))); + + gman->max_gmr_pages = new_max_pages; + } else { + char buf[256]; + snprintf(buf, sizeof(buf), + "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n", + ((gman->max_gmr_pages) << (PAGE_SHIFT - 10))); + vmw_host_printf(buf); + DRM_WARN("%s", buf); + goto nospace; + } + } } (*res)->start = id; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 4fdacf9924e6..28af34ab6ed6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -26,14 +26,9 @@ **************************************************************************/ #include "vmwgfx_drv.h" +#include "vmwgfx_devcaps.h" #include <drm/vmwgfx_drm.h> #include "vmwgfx_kms.h" -#include "device_include/svga3d_caps.h" - -struct svga_3d_compat_cap { - SVGA3dCapsRecordHeader header; - SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX]; -}; int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -63,7 +58,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, param->value = vmw_fifo_caps(dev_priv); break; case DRM_VMW_PARAM_MAX_FB_SIZE: - param->value = dev_priv->prim_bb_mem; + param->value = dev_priv->max_primary_mem; break; case DRM_VMW_PARAM_FIFO_HW_VERSION: { @@ -88,16 +83,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, param->value = dev_priv->memory_size; break; case DRM_VMW_PARAM_3D_CAPS_SIZE: - if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && - vmw_fp->gb_aware) - param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); - else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) - param->value = sizeof(struct svga_3d_compat_cap) + - sizeof(uint32_t); - else - param->value = (SVGA_FIFO_3D_CAPS_LAST - - SVGA_FIFO_3D_CAPS + 1) * - sizeof(uint32_t); + param->value = vmw_devcaps_size(dev_priv, vmw_fp->gb_aware); break; case DRM_VMW_PARAM_MAX_MOB_MEMORY: vmw_fp->gb_aware = true; @@ -126,55 +112,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, return 0; } -static u32 vmw_mask_legacy_multisample(unsigned int cap, u32 fmt_value) -{ - /* - * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES - * to check the sample count supported by virtual device. Since there - * never was support for multisample count for backing MOB return 0. - * - * MULTISAMPLE_MASKABLESAMPLES devcap is marked as deprecated by virtual - * device. - */ - if (cap == SVGA3D_DEVCAP_DEAD5) - return 0; - - return fmt_value; -} - -static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, - size_t size) -{ - struct svga_3d_compat_cap *compat_cap = - (struct svga_3d_compat_cap *) bounce; - unsigned int i; - size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs); - unsigned int max_size; - - if (size < pair_offset) - return -EINVAL; - - max_size = (size - pair_offset) / sizeof(SVGA3dCapPair); - - if (max_size > SVGA3D_DEVCAP_MAX) - max_size = SVGA3D_DEVCAP_MAX; - - compat_cap->header.length = - (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); - compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; - - spin_lock(&dev_priv->cap_lock); - for (i = 0; i < max_size; ++i) { - vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); - compat_cap->pairs[i][0] = i; - compat_cap->pairs[i][1] = vmw_mask_legacy_multisample - (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP)); - } - spin_unlock(&dev_priv->cap_lock); - - return 0; -} - int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -183,11 +120,9 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, (struct drm_vmw_get_3d_cap_arg *) data; struct vmw_private *dev_priv = vmw_priv(dev); uint32_t size; - u32 *fifo_mem; void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); - void *bounce; + void *bounce = NULL; int ret; - bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) { @@ -195,13 +130,11 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (gb_objects && vmw_fp->gb_aware) - size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); - else if (gb_objects) - size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t); - else - size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) * - sizeof(uint32_t); + size = vmw_devcaps_size(dev_priv, vmw_fp->gb_aware); + if (unlikely(size == 0)) { + DRM_ERROR("Failed to figure out the devcaps size (no 3D).\n"); + return -ENOMEM; + } if (arg->max_size < size) size = arg->max_size; @@ -212,29 +145,9 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, return -ENOMEM; } - if (gb_objects && vmw_fp->gb_aware) { - int i, num; - uint32_t *bounce32 = (uint32_t *) bounce; - - num = size / sizeof(uint32_t); - if (num > SVGA3D_DEVCAP_MAX) - num = SVGA3D_DEVCAP_MAX; - - spin_lock(&dev_priv->cap_lock); - for (i = 0; i < num; ++i) { - vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); - *bounce32++ = vmw_mask_legacy_multisample - (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP)); - } - spin_unlock(&dev_priv->cap_lock); - } else if (gb_objects) { - ret = vmw_fill_compat_cap(dev_priv, bounce, size); - if (unlikely(ret != 0)) - goto out_err; - } else { - fifo_mem = dev_priv->fifo_mem; - memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); - } + ret = vmw_devcaps_copy(dev_priv, vmw_fp->gb_aware, bounce, size); + if (unlikely (ret != 0)) + goto out_err; ret = copy_to_user(buffer, bounce, size); if (ret) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index b9a9b7ddadbd..c5191de365ca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -25,6 +25,7 @@ * **************************************************************************/ +#include <linux/pci.h> #include <linux/sched/signal.h> #include "vmwgfx_drv.h" @@ -287,21 +288,18 @@ static void vmw_irq_preinstall(struct drm_device *dev) void vmw_irq_uninstall(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); uint32_t status; if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) return; - if (!dev->irq_enabled) - return; - vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); status = vmw_irq_status_read(dev_priv); vmw_irq_status_write(dev_priv, status); - dev->irq_enabled = false; - free_irq(dev->irq, dev); + free_irq(pdev->irq, dev); } /** @@ -313,20 +311,8 @@ void vmw_irq_uninstall(struct drm_device *dev) */ int vmw_irq_install(struct drm_device *dev, int irq) { - int ret; - - if (dev->irq_enabled) - return -EBUSY; - vmw_irq_preinstall(dev); - ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn, - IRQF_SHARED, VMWGFX_DRIVER_NAME, dev); - if (ret < 0) - return ret; - - dev->irq_enabled = true; - dev->irq = irq; - - return ret; + return request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn, + IRQF_SHARED, VMWGFX_DRIVER_NAME, dev); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 220f9fd0d420..338c6e2613ea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1487,7 +1487,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is * limit on primary bounding box */ - if (pixel_mem > dev_priv->prim_bb_mem) { + if (pixel_mem > dev_priv->max_primary_mem) { VMW_DEBUG_KMS("Combined output size too large.\n"); return -EINVAL; } @@ -1497,7 +1497,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) { bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4; - if (bb_mem > dev_priv->prim_bb_mem) { + if (bb_mem > dev_priv->max_primary_mem) { VMW_DEBUG_KMS("Topology is beyond supported limits.\n"); return -EINVAL; } @@ -1793,6 +1793,13 @@ int vmw_kms_init(struct vmw_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; int ret; + static const char *display_unit_names[] = { + "Invalid", + "Legacy", + "Screen Object", + "Screen Target", + "Invalid (max)" + }; drm_mode_config_init(dev); dev->mode_config.funcs = &vmw_kms_funcs; @@ -1810,6 +1817,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) if (ret) /* Fallback */ ret = vmw_kms_ldu_init_display(dev_priv); } + BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1)); + drm_info(&dev_priv->drm, "%s display unit initialized\n", + display_unit_names[dev_priv->active_display_unit]); return ret; } @@ -1897,7 +1907,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, { return ((u64) pitch * (u64) height) < (u64) ((dev_priv->active_display_unit == vmw_du_screen_target) ? - dev_priv->prim_bb_mem : dev_priv->vram_size); + dev_priv->max_primary_mem : dev_priv->vram_size); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index d85c7eab9469..fb58a71c458f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -493,8 +493,7 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv) struct drm_device *dev = &dev_priv->drm; int i, ret; - if (dev_priv->ldu_priv) { - DRM_INFO("ldu system already on\n"); + if (unlikely(dev_priv->ldu_priv)) { return -EINVAL; } @@ -527,8 +526,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv) drm_mode_config_reset(dev); - DRM_INFO("Legacy Display Unit initialized\n"); - return 0; err_free: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h b/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h new file mode 100644 index 000000000000..0509f55f07b4 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright 2021 VMware, Inc., Palo Alto, CA., USA + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef _VMWGFX_MKSSTAT_H_ +#define _VMWGFX_MKSSTAT_H_ + +#include <asm/page.h> + +/* Reservation marker for mksstat pid's */ +#define MKSSTAT_PID_RESERVED -1 + +#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) +/* + * Kernel-internal mksGuestStat counters. The order of this enum dictates the + * order of instantiation of these counters in the mksGuestStat pages. + */ + +typedef enum { + MKSSTAT_KERN_EXECBUF, /* vmw_execbuf_ioctl */ + + MKSSTAT_KERN_COUNT /* Reserved entry; always last */ +} mksstat_kern_stats_t; + +/** + * vmw_mksstat_get_kern_pstat: Computes the address of the MKSGuestStatCounterTime + * array from the address of the base page. + * + * @page_addr: Pointer to the base page. + * Return: Pointer to the MKSGuestStatCounterTime array. + */ + +static inline void *vmw_mksstat_get_kern_pstat(void *page_addr) +{ + return page_addr + PAGE_SIZE * 1; +} + +/** + * vmw_mksstat_get_kern_pinfo: Computes the address of the MKSGuestStatInfoEntry + * array from the address of the base page. + * + * @page_addr: Pointer to the base page. + * Return: Pointer to the MKSGuestStatInfoEntry array. + */ + +static inline void *vmw_mksstat_get_kern_pinfo(void *page_addr) +{ + return page_addr + PAGE_SIZE * 2; +} + +/** + * vmw_mksstat_get_kern_pstrs: Computes the address of the mksGuestStat strings + * sequence from the address of the base page. + * + * @page_addr: Pointer to the base page. + * Return: Pointer to the mksGuestStat strings sequence. + */ + +static inline void *vmw_mksstat_get_kern_pstrs(void *page_addr) +{ + return page_addr + PAGE_SIZE * 3; +} + +/* + * MKS_STAT_TIME_DECL/PUSH/POP macros to be used in timer-counted routines. + */ + +struct mksstat_timer_t { +/* mutable */ mksstat_kern_stats_t old_top; + const u64 t0; + const int slot; +}; + +#define MKS_STAT_TIME_DECL(kern_cntr) \ + struct mksstat_timer_t _##kern_cntr = { \ + .t0 = rdtsc(), \ + .slot = vmw_mksstat_get_kern_slot(current->pid, dev_priv) \ + } + +#define MKS_STAT_TIME_PUSH(kern_cntr) \ + do { \ + if (_##kern_cntr.slot >= 0) { \ + _##kern_cntr.old_top = dev_priv->mksstat_kern_top_timer[_##kern_cntr.slot]; \ + dev_priv->mksstat_kern_top_timer[_##kern_cntr.slot] = kern_cntr; \ + } \ + } while (0) + +#define MKS_STAT_TIME_POP(kern_cntr) \ + do { \ + if (_##kern_cntr.slot >= 0) { \ + const pid_t pid = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[_##kern_cntr.slot], current->pid, MKSSTAT_PID_RESERVED); \ + dev_priv->mksstat_kern_top_timer[_##kern_cntr.slot] = _##kern_cntr.old_top; \ + \ + if (pid == current->pid) { \ + const u64 dt = rdtsc() - _##kern_cntr.t0; \ + MKSGuestStatCounterTime *pstat; \ + \ + BUG_ON(!dev_priv->mksstat_kern_pages[_##kern_cntr.slot]); \ + \ + pstat = vmw_mksstat_get_kern_pstat(page_address(dev_priv->mksstat_kern_pages[_##kern_cntr.slot])); \ + \ + atomic64_inc(&pstat[kern_cntr].counter.count); \ + atomic64_add(dt, &pstat[kern_cntr].selfCycles); \ + atomic64_add(dt, &pstat[kern_cntr].totalCycles); \ + \ + if (_##kern_cntr.old_top != MKSSTAT_KERN_COUNT) \ + atomic64_sub(dt, &pstat[_##kern_cntr.old_top].selfCycles); \ + \ + atomic_set(&dev_priv->mksstat_kern_pids[_##kern_cntr.slot], current->pid); \ + } \ + } \ + } while (0) + +#else +#define MKS_STAT_TIME_DECL(kern_cntr) +#define MKS_STAT_TIME_PUSH(kern_cntr) +#define MKS_STAT_TIME_POP(kern_cntr) + +#endif /* IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS */ + +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index f2d625415458..c8e578f63c9c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -37,14 +37,14 @@ #ifdef CONFIG_64BIT #define VMW_PPN_SIZE 8 -#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 -#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 -#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 +#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0 +#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PT64_1 +#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PT64_2 #else #define VMW_PPN_SIZE 4 -#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 -#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 -#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 +#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT_0 +#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PT_1 +#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PT_2 #endif /* @@ -70,20 +70,20 @@ struct vmw_mob { * @page_table: Pointer to a struct vmw_mob holding the page table. */ static const struct vmw_otable pre_dx_tables[] = { - {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, - {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, - {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, - {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, - {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, + {VMWGFX_NUM_MOB * sizeof(SVGAOTableMobEntry), NULL, true}, + {VMWGFX_NUM_GB_SURFACE * sizeof(SVGAOTableSurfaceEntry), NULL, true}, + {VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true}, + {VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true}, + {VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry), NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE} }; static const struct vmw_otable dx_tables[] = { - {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, - {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, - {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, - {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, - {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, + {VMWGFX_NUM_MOB * sizeof(SVGAOTableMobEntry), NULL, true}, + {VMWGFX_NUM_GB_SURFACE * sizeof(SVGAOTableSurfaceEntry), NULL, true}, + {VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true}, + {VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true}, + {VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry), NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}, {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true}, }; @@ -155,7 +155,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, goto out_no_populate; vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); - mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; + mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PT_1; } cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); @@ -506,11 +506,13 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, { unsigned long num_pt_pages = 0; struct ttm_buffer_object *bo = mob->pt_bo; - struct vmw_piter save_pt_iter; + struct vmw_piter save_pt_iter = {0}; struct vmw_piter pt_iter; const struct vmw_sg_table *vsgt; int ret; + BUG_ON(num_data_pages == 0); + ret = ttm_bo_reserve(bo, false, true, NULL); BUG_ON(ret != 0); @@ -633,7 +635,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, vmw_mob_pt_setup(mob, data_iter, num_data_pages); pt_set_up = true; - mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; + mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PT_1; } vmw_fifo_resource_inc(dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 3d08f5700bdb..ed9c7b3a1e08 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -31,10 +31,12 @@ #include <linux/mem_encrypt.h> #include <asm/hypervisor.h> +#include <drm/drm_ioctl.h> #include "vmwgfx_drv.h" #include "vmwgfx_msg_x86.h" #include "vmwgfx_msg_arm64.h" +#include "vmwgfx_mksstat.h" #define MESSAGE_STATUS_SUCCESS 0x0001 #define MESSAGE_STATUS_DORECV 0x0002 @@ -56,6 +58,11 @@ #define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG) #define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG) +#define VMW_PORT_CMD_MKS_GUEST_STATS 85 +#define VMW_PORT_CMD_MKSGS_RESET (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS) +#define VMW_PORT_CMD_MKSGS_ADD_PPN (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS) +#define VMW_PORT_CMD_MKSGS_REMOVE_PPN (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS) + #define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16) #define MAX_USER_MSG_LENGTH PAGE_SIZE @@ -155,6 +162,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel, /* HB port can't access encrypted memory. */ if (hb && !mem_encrypt_active()) { unsigned long bp = channel->cookie_high; + u32 channel_id = (channel->channel_id << 16); si = (uintptr_t) msg; di = channel->cookie_low; @@ -162,7 +170,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel, VMW_PORT_HB_OUT( (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, msg_len, si, di, - VMWARE_HYPERVISOR_HB | (channel->channel_id << 16) | + VMWARE_HYPERVISOR_HB | channel_id | VMWARE_HYPERVISOR_OUT, VMW_HYPERVISOR_MAGIC, bp, eax, ebx, ecx, edx, si, di); @@ -210,6 +218,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, /* HB port can't access encrypted memory */ if (hb && !mem_encrypt_active()) { unsigned long bp = channel->cookie_low; + u32 channel_id = (channel->channel_id << 16); si = channel->cookie_high; di = (uintptr_t) reply; @@ -217,7 +226,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, VMW_PORT_HB_IN( (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, reply_len, si, di, - VMWARE_HYPERVISOR_HB | (channel->channel_id << 16), + VMWARE_HYPERVISOR_HB | channel_id, VMW_HYPERVISOR_MAGIC, bp, eax, ebx, ecx, edx, si, di); @@ -612,3 +621,569 @@ out_open: return -EINVAL; } + +/** + * reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content + * + * @arr: Array to reset. + * @size: Array length. + */ +static inline void reset_ppn_array(PPN64 *arr, size_t size) +{ + size_t i; + + BUG_ON(!arr || size == 0); + + for (i = 0; i < size; ++i) + arr[i] = INVALID_PPN64; +} + +/** + * hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from + * the hypervisor. All related pages should be subsequently unpinned or freed. + * + */ +static inline void hypervisor_ppn_reset_all(void) +{ + unsigned long eax, ebx, ecx, edx, si = 0, di = 0; + + VMW_PORT(VMW_PORT_CMD_MKSGS_RESET, + 0, si, di, + 0, + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); +} + +/** + * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the + * hypervisor. Any related userspace pages should be pinned in advance. + * + * @pfn: Physical page number of the instance descriptor + */ +static inline void hypervisor_ppn_add(PPN64 pfn) +{ + unsigned long eax, ebx, ecx, edx, si = 0, di = 0; + + VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN, + (unsigned long)pfn, si, di, + 0, + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); +} + +/** + * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from + * the hypervisor. All related pages should be subsequently unpinned or freed. + * + * @pfn: Physical page number of the instance descriptor + */ +static inline void hypervisor_ppn_remove(PPN64 pfn) +{ + unsigned long eax, ebx, ecx, edx, si = 0, di = 0; + + VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN, + (unsigned long)pfn, si, di, + 0, + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); +} + +#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) + +/* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */ +#define MKSSTAT_KERNEL_PAGES_ORDER 2 +/* Header to the text description of mksGuestStat instance descriptor */ +#define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx" + +/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */ +static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] = +{ + { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" }, +}; + +/** + * mksstat_init_record: Initializes an MKSGuestStatCounter-based record + * for the respective mksGuestStat index. + * + * @stat_idx: Index of the MKSGuestStatCounter-based mksGuestStat record. + * @pstat: Pointer to array of MKSGuestStatCounterTime. + * @pinfo: Pointer to array of MKSGuestStatInfoEntry. + * @pstrs: Pointer to current end of the name/description sequence. + * Return: Pointer to the new end of the names/description sequence. + */ + +static inline char *mksstat_init_record(mksstat_kern_stats_t stat_idx, + MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs) +{ + char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1; + strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]); + strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]); + + pinfo[stat_idx].name.s = pstrs; + pinfo[stat_idx].description.s = pstrd; + pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_NONE; + pinfo[stat_idx].stat.counter = (MKSGuestStatCounter *)&pstat[stat_idx]; + + return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1; +} + +/** + * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record + * for the respective mksGuestStat index. + * + * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record. + * @pstat: Pointer to array of MKSGuestStatCounterTime. + * @pinfo: Pointer to array of MKSGuestStatInfoEntry. + * @pstrs: Pointer to current end of the name/description sequence. + * Return: Pointer to the new end of the names/description sequence. + */ + +static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx, + MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs) +{ + char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1; + strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]); + strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]); + + pinfo[stat_idx].name.s = pstrs; + pinfo[stat_idx].description.s = pstrd; + pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME; + pinfo[stat_idx].stat.counterTime = &pstat[stat_idx]; + + return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1; +} + +/** + * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and + * kernel-internal counters. Adds PFN mapping to the hypervisor. + * + * Create a single mksGuestStat instance descriptor and corresponding structures + * for all kernel-internal counters. The corresponding PFNs are mapped with the + * hypervisor. + * + * @ppage: Output pointer to page containing the instance descriptor. + * Return: Zero on success, negative error code on error. + */ + +static int mksstat_init_kern_id(struct page **ppage) +{ + MKSGuestStatInstanceDescriptor *pdesc; + MKSGuestStatCounterTime *pstat; + MKSGuestStatInfoEntry *pinfo; + char *pstrs, *pstrs_acc; + + /* Allocate pages for the kernel-internal instance descriptor */ + struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER); + + if (!page) + return -ENOMEM; + + pdesc = page_address(page); + pstat = vmw_mksstat_get_kern_pstat(pdesc); + pinfo = vmw_mksstat_get_kern_pinfo(pdesc); + pstrs = vmw_mksstat_get_kern_pstrs(pdesc); + + /* Set up all kernel-internal counters and corresponding structures */ + pstrs_acc = pstrs; + pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc); + + /* Add new counters above, in their order of appearance in mksstat_kern_stats_t */ + + BUG_ON(pstrs_acc - pstrs > PAGE_SIZE); + + /* Set up the kernel-internal instance descriptor */ + pdesc->reservedMBZ = 0; + pdesc->statStartVA = (uintptr_t)pstat; + pdesc->strsStartVA = (uintptr_t)pstrs; + pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT; + pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT; + pdesc->strsLength = pstrs_acc - pstrs; + snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d", + MKSSTAT_KERNEL_DESCRIPTION, current->pid); + + pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat)); + reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1); + + pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo)); + reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1); + + pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs)); + reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1); + + *ppage = page; + + hypervisor_ppn_add((PPN64)page_to_pfn(page)); + + return 0; +} + +/** + * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal + * mksGuestStat instance descriptor. + * + * Find a slot for a single kernel-internal mksGuestStat instance descriptor. + * In case no such was already present, allocate a new one and set up a kernel- + * internal mksGuestStat instance descriptor for the former. + * + * @pid: Process for which a slot is sought. + * @dev_priv: Identifies the drm private device. + * Return: Non-negative slot on success, negative error code on error. + */ + +int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv) +{ + const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2); + size_t i; + + for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { + const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids); + + /* Check if an instance descriptor for this pid is already present */ + if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot])) + return (int)slot; + + /* Set up a new instance descriptor for this pid */ + if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) { + const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]); + + if (!ret) { + /* Reset top-timer tracking for this slot */ + dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT; + + atomic_set(&dev_priv->mksstat_kern_pids[slot], pid); + return (int)slot; + } + + atomic_set(&dev_priv->mksstat_kern_pids[slot], 0); + return ret; + } + } + + return -ENOSPC; +} + +#endif + +/** + * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating + * mksGuestStat instance-descriptor page and unpins all related user pages. + * + * Unpin all user pages realated to this instance descriptor and free + * the instance-descriptor page itself. + * + * @page: Page of the instance descriptor. + */ + +static void vmw_mksstat_cleanup_descriptor(struct page *page) +{ + MKSGuestStatInstanceDescriptor *pdesc = page_address(page); + size_t i; + + for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i) + unpin_user_page(pfn_to_page(pdesc->statPPNs[i])); + + for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i) + unpin_user_page(pfn_to_page(pdesc->infoPPNs[i])); + + for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i) + unpin_user_page(pfn_to_page(pdesc->strsPPNs[i])); + + __free_page(page); +} + +/** + * vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors + * from the hypervisor. + * + * Discard all hypervisor PFN mappings, containing active mksGuestState instance + * descriptors, unpin the related userspace pages and free the related kernel pages. + * + * @dev_priv: Identifies the drm private device. + * Return: Zero on success, negative error code on error. + */ + +int vmw_mksstat_remove_all(struct vmw_private *dev_priv) +{ + int ret = 0; + size_t i; + + /* Discard all PFN mappings with the hypervisor */ + hypervisor_ppn_reset_all(); + + /* Discard all userspace-originating instance descriptors and unpin all related pages */ + for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) { + const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]); + + if (!pid0) + continue; + + if (pid0 != MKSSTAT_PID_RESERVED) { + const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED); + + if (!pid1) + continue; + + if (pid1 == pid0) { + struct page *const page = dev_priv->mksstat_user_pages[i]; + + BUG_ON(!page); + + dev_priv->mksstat_user_pages[i] = NULL; + atomic_set(&dev_priv->mksstat_user_pids[i], 0); + + vmw_mksstat_cleanup_descriptor(page); + continue; + } + } + + ret = -EAGAIN; + } + +#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) + /* Discard all kernel-internal instance descriptors and free all related pages */ + for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { + const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]); + + if (!pid0) + continue; + + if (pid0 != MKSSTAT_PID_RESERVED) { + const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED); + + if (!pid1) + continue; + + if (pid1 == pid0) { + struct page *const page = dev_priv->mksstat_kern_pages[i]; + + BUG_ON(!page); + + dev_priv->mksstat_kern_pages[i] = NULL; + atomic_set(&dev_priv->mksstat_kern_pids[i], 0); + + __free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER); + continue; + } + } + + ret = -EAGAIN; + } + +#endif + return ret; +} + +/** + * vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors + * from the hypervisor. + * + * Discard all hypervisor PFN mappings, containing active mksGuestStat instance + * descriptors, unpin the related userspace pages and free the related kernel pages. + * + * @dev: Identifies the drm device. + * @data: Pointer to the ioctl argument. + * @file_priv: Identifies the caller; unused. + * Return: Zero on success, negative error code on error. + */ + +int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *const dev_priv = vmw_priv(dev); + return vmw_mksstat_remove_all(dev_priv); +} + +/** + * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat + * instance descriptor and registers that with the hypervisor. + * + * Create a hypervisor PFN mapping, containing a single mksGuestStat instance + * descriptor and pin the corresponding userspace pages. + * + * @dev: Identifies the drm device. + * @data: Pointer to the ioctl argument. + * @file_priv: Identifies the caller; unused. + * Return: Zero on success, negative error code on error. + */ + +int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_mksstat_add_arg *arg = + (struct drm_vmw_mksstat_add_arg *) data; + + struct vmw_private *const dev_priv = vmw_priv(dev); + + struct page *page; + MKSGuestStatInstanceDescriptor *pdesc; + const size_t num_pages_stat = vmw_num_pages(arg->stat_len); + const size_t num_pages_info = vmw_num_pages(arg->info_len); + const size_t num_pages_strs = vmw_num_pages(arg->strs_len); + long desc_len; + long nr_pinned_stat; + long nr_pinned_info; + long nr_pinned_strs; + struct page *pages_stat[ARRAY_SIZE(pdesc->statPPNs)]; + struct page *pages_info[ARRAY_SIZE(pdesc->infoPPNs)]; + struct page *pages_strs[ARRAY_SIZE(pdesc->strsPPNs)]; + size_t i, slot; + + arg->id = -1; + + if (!arg->stat || !arg->info || !arg->strs) + return -EINVAL; + + if (!arg->stat_len || !arg->info_len || !arg->strs_len) + return -EINVAL; + + if (!arg->description) + return -EINVAL; + + if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) || + num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) || + num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs)) + return -EINVAL; + + /* Find an available slot in the mksGuestStats user array and reserve it */ + for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot) + if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED)) + break; + + if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids)) + return -ENOSPC; + + BUG_ON(dev_priv->mksstat_user_pages[slot]); + + /* Allocate a page for the instance descriptor */ + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + + if (!page) { + atomic_set(&dev_priv->mksstat_user_pids[slot], 0); + return -ENOMEM; + } + + /* Set up the instance descriptor */ + pdesc = page_address(page); + + pdesc->reservedMBZ = 0; + pdesc->statStartVA = arg->stat; + pdesc->strsStartVA = arg->strs; + pdesc->statLength = arg->stat_len; + pdesc->infoLength = arg->info_len; + pdesc->strsLength = arg->strs_len; + desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description), + ARRAY_SIZE(pdesc->description) - 1); + + if (desc_len < 0) { + atomic_set(&dev_priv->mksstat_user_pids[slot], 0); + return -EFAULT; + } + + reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs)); + reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs)); + reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs)); + + /* Pin mksGuestStat user pages and store those in the instance descriptor */ + nr_pinned_stat = pin_user_pages(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat, NULL); + if (num_pages_stat != nr_pinned_stat) + goto err_pin_stat; + + for (i = 0; i < num_pages_stat; ++i) + pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]); + + nr_pinned_info = pin_user_pages(arg->info, num_pages_info, FOLL_LONGTERM, pages_info, NULL); + if (num_pages_info != nr_pinned_info) + goto err_pin_info; + + for (i = 0; i < num_pages_info; ++i) + pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]); + + nr_pinned_strs = pin_user_pages(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs, NULL); + if (num_pages_strs != nr_pinned_strs) + goto err_pin_strs; + + for (i = 0; i < num_pages_strs; ++i) + pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]); + + /* Send the descriptor to the host via a hypervisor call. The mksGuestStat + pages will remain in use until the user requests a matching remove stats + or a stats reset occurs. */ + hypervisor_ppn_add((PPN64)page_to_pfn(page)); + + dev_priv->mksstat_user_pages[slot] = page; + atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current)); + + arg->id = slot; + + DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot); + + return 0; + +err_pin_strs: + if (nr_pinned_strs > 0) + unpin_user_pages(pages_strs, nr_pinned_strs); + +err_pin_info: + if (nr_pinned_info > 0) + unpin_user_pages(pages_info, nr_pinned_info); + +err_pin_stat: + if (nr_pinned_stat > 0) + unpin_user_pages(pages_stat, nr_pinned_stat); + + atomic_set(&dev_priv->mksstat_user_pids[slot], 0); + __free_page(page); + return -ENOMEM; +} + +/** + * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat + * instance descriptor from the hypervisor. + * + * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance + * descriptor and unpin the corresponding userspace pages. + * + * @dev: Identifies the drm device. + * @data: Pointer to the ioctl argument. + * @file_priv: Identifies the caller; unused. + * Return: Zero on success, negative error code on error. + */ + +int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_mksstat_remove_arg *arg = + (struct drm_vmw_mksstat_remove_arg *) data; + + struct vmw_private *const dev_priv = vmw_priv(dev); + + const size_t slot = arg->id; + pid_t pgid, pid; + + if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids)) + return -EINVAL; + + DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot); + + pgid = task_pgrp_vnr(current); + pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED); + + if (!pid) + return 0; + + if (pid == pgid) { + struct page *const page = dev_priv->mksstat_user_pages[slot]; + + BUG_ON(!page); + + dev_priv->mksstat_user_pages[slot] = NULL; + atomic_set(&dev_priv->mksstat_user_pids[slot], 0); + + hypervisor_ppn_remove((PPN64)page_to_pfn(page)); + + vmw_mksstat_cleanup_descriptor(page); + return 0; + } + + return -EAGAIN; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 7b45393ad98e..3b6f6044c325 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -114,6 +114,7 @@ static void vmw_resource_release(struct kref *kref) container_of(kref, struct vmw_resource, kref); struct vmw_private *dev_priv = res->dev_priv; int id; + int ret; struct idr *idr = &dev_priv->res_idr[res->func->res_type]; spin_lock(&dev_priv->resource_lock); @@ -122,7 +123,8 @@ static void vmw_resource_release(struct kref *kref) if (res->backup) { struct ttm_buffer_object *bo = &res->backup->base; - ttm_bo_reserve(bo, false, false, NULL); + ret = ttm_bo_reserve(bo, false, false, NULL); + BUG_ON(ret); if (vmw_resource_mob_attached(res) && res->func->unbind != NULL) { struct ttm_validate_buffer val_buf; @@ -1001,7 +1003,9 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) if (res->backup) { vbo = res->backup; - ttm_bo_reserve(&vbo->base, interruptible, false, NULL); + ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL); + if (ret) + goto out_no_validate; if (!vbo->base.pin_count) { ret = ttm_bo_validate (&vbo->base, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 145430d14219..bd157fb21b45 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -954,8 +954,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) int i, ret; if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) { - DRM_INFO("Not using screen objects," - " missing cap SCREEN_OBJECT_2\n"); return -ENOSYS; } @@ -972,8 +970,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) drm_mode_config_reset(dev); - DRM_INFO("Screen Objects Display Unit initialized\n"); - return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c index c3a8d6e8380e..9efb4463ce99 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -539,7 +539,8 @@ const SVGACOTableType vmw_so_cotables[] = { [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL, [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE, [vmw_so_ss] = SVGA_COTABLE_SAMPLER, - [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT + [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT, + [vmw_so_max]= SVGA_COTABLE_MAX }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 9e236f9f8a8a..d85310b2608d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -33,7 +33,7 @@ #include <drm/drm_vblank.h> #include "vmwgfx_kms.h" -#include "device_include/svga3d_surfacedefs.h" +#include "vmw_surface_cache.h" #define vmw_crtc_to_stdu(x) \ container_of(x, struct vmw_screen_target_display_unit, base.crtc) @@ -1889,14 +1889,13 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) ret = vmw_stdu_init(dev_priv, i); if (unlikely(ret != 0)) { - DRM_ERROR("Failed to initialize STDU %d", i); + drm_err(&dev_priv->drm, + "Failed to initialize STDU %d", i); return ret; } } drm_mode_config_reset(dev); - DRM_INFO("Screen Target Display device initialized\n"); - return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 0835468bb2ee..0eba47762bed 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -31,6 +31,7 @@ #include "vmwgfx_resource_priv.h" #include "vmwgfx_so.h" #include "vmwgfx_binding.h" +#include "vmw_surface_cache.h" #include "device_include/svga3d_surfacedefs.h" #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32) @@ -78,7 +79,7 @@ struct vmw_surface_offset { * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. */ struct vmw_surface_dirty { - struct svga3dsurface_cache cache; + struct vmw_surface_cache cache; size_t size; u32 num_subres; SVGA3dBox boxes[]; @@ -307,8 +308,8 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf, { uint32_t i; struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; - const struct svga3d_surface_desc *desc = - svga3dsurface_get_desc(srf->metadata.format); + const struct SVGA3dSurfaceDesc *desc = + vmw_surface_get_desc(srf->metadata.format); for (i = 0; i < srf->metadata.num_sizes; ++i) { SVGA3dCmdHeader *header = &cmd->header; @@ -323,8 +324,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf, body->guest.ptr = *ptr; body->guest.ptr.offset += cur_offset->bo_offset; - body->guest.pitch = svga3dsurface_calculate_pitch(desc, - cur_size); + body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size); body->host.sid = srf->res.id; body->host.face = cur_offset->face; body->host.mipmap = cur_offset->mip; @@ -342,7 +342,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf, suffix->suffixSize = sizeof(*suffix); suffix->maximumOffset = - svga3dsurface_get_image_buffer_size(desc, cur_size, + vmw_surface_get_image_buffer_size(desc, cur_size, body->guest.pitch); suffix->flags.discard = 0; suffix->flags.unsynchronized = 0; @@ -432,7 +432,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res) goto out_no_id; } - if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { + if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) { ret = -EBUSY; goto out_no_fifo; } @@ -751,7 +751,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, struct vmw_surface_offset *cur_offset; uint32_t num_sizes; uint32_t size; - const struct svga3d_surface_desc *desc; + const SVGA3dSurfaceDesc *desc; if (unlikely(vmw_user_surface_size == 0)) vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + @@ -772,8 +772,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); - desc = svga3dsurface_get_desc(req->format); - if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { + desc = vmw_surface_get_desc(req->format); + if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) { VMW_DEBUG_USER("Invalid format %d for surface creation.\n", req->format); return -EINVAL; @@ -833,13 +833,13 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { for (j = 0; j < metadata->mip_levels[i]; ++j) { - uint32_t stride = svga3dsurface_calculate_pitch - (desc, cur_size); + uint32_t stride = vmw_surface_calculate_pitch( + desc, cur_size); cur_offset->face = i; cur_offset->mip = j; cur_offset->bo_offset = cur_bo_offset; - cur_bo_offset += svga3dsurface_get_image_buffer_size + cur_bo_offset += vmw_surface_get_image_buffer_size (desc, cur_size, stride); ++cur_offset; ++cur_size; @@ -1711,10 +1711,10 @@ out_bad_resource: * than partial z slices are dirtied. */ static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, - const struct svga3dsurface_loc *loc_start, - const struct svga3dsurface_loc *loc_end) + const struct vmw_surface_loc *loc_start, + const struct vmw_surface_loc *loc_end) { - const struct svga3dsurface_cache *cache = &dirty->cache; + const struct vmw_surface_cache *cache = &dirty->cache; SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource]; u32 mip = loc_start->sub_resource % cache->num_mip_levels; const struct drm_vmw_size *size = &cache->mip[mip].size; @@ -1760,7 +1760,7 @@ static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, */ static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres) { - const struct svga3dsurface_cache *cache = &dirty->cache; + const struct vmw_surface_cache *cache = &dirty->cache; u32 mip = subres % cache->num_mip_levels; const struct drm_vmw_size *size = &cache->mip[mip].size; SVGA3dBox *box = &dirty->boxes[subres]; @@ -1783,15 +1783,15 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res, struct vmw_surface_dirty *dirty = (struct vmw_surface_dirty *) res->dirty; size_t backup_end = res->backup_offset + res->backup_size; - struct svga3dsurface_loc loc1, loc2; - const struct svga3dsurface_cache *cache; + struct vmw_surface_loc loc1, loc2; + const struct vmw_surface_cache *cache; start = max_t(size_t, start, res->backup_offset) - res->backup_offset; end = min(end, backup_end) - res->backup_offset; cache = &dirty->cache; - svga3dsurface_get_loc(cache, &loc1, start); - svga3dsurface_get_loc(cache, &loc2, end - 1); - svga3dsurface_inc_loc(cache, &loc2); + vmw_surface_get_loc(cache, &loc1, start); + vmw_surface_get_loc(cache, &loc2, end - 1); + vmw_surface_inc_loc(cache, &loc2); if (loc1.sheet != loc2.sheet) { u32 sub_res; @@ -1811,12 +1811,12 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res, vmw_subres_dirty_add(dirty, &loc1, &loc2); } else { /* Dirty range covers multiple sub-resources */ - struct svga3dsurface_loc loc_min, loc_max; + struct vmw_surface_loc loc_min, loc_max; u32 sub_res; - svga3dsurface_max_loc(cache, loc1.sub_resource, &loc_max); + vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max); vmw_subres_dirty_add(dirty, &loc1, &loc_max); - svga3dsurface_min_loc(cache, loc2.sub_resource - 1, &loc_min); + vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min); vmw_subres_dirty_add(dirty, &loc_min, &loc2); for (sub_res = loc1.sub_resource + 1; sub_res < loc2.sub_resource - 1; ++sub_res) @@ -1833,7 +1833,7 @@ static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res, { struct vmw_surface_dirty *dirty = (struct vmw_surface_dirty *) res->dirty; - const struct svga3dsurface_cache *cache = &dirty->cache; + const struct vmw_surface_cache *cache = &dirty->cache; size_t backup_end = res->backup_offset + cache->mip_chain_bytes; SVGA3dBox *box = &dirty->boxes[0]; u32 box_c2; @@ -1872,12 +1872,11 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, static int vmw_surface_dirty_sync(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; - bool has_dx = 0; u32 i, num_dirty; struct vmw_surface_dirty *dirty = (struct vmw_surface_dirty *) res->dirty; size_t alloc_size; - const struct svga3dsurface_cache *cache = &dirty->cache; + const struct vmw_surface_cache *cache = &dirty->cache; struct { SVGA3dCmdHeader header; SVGA3dCmdDXUpdateSubResource body; @@ -1899,7 +1898,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res) if (!num_dirty) goto out; - alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2)); + alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2)); cmd = VMW_CMD_RESERVE(dev_priv, alloc_size); if (!cmd) return -ENOMEM; @@ -1917,7 +1916,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res) * DX_UPDATE_SUBRESOURCE is aware of array surfaces. * UPDATE_GB_IMAGE is not. */ - if (has_dx) { + if (has_sm4_context(dev_priv)) { cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE; cmd1->header.size = sizeof(cmd1->body); cmd1->body.sid = res->id; @@ -1990,7 +1989,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) } num_samples = max_t(u32, 1, metadata->multisample_count); - ret = svga3dsurface_setup_cache(&metadata->base_size, metadata->format, + ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format, num_mip, num_layers, num_samples, &dirty->cache); if (ret) @@ -2081,7 +2080,7 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, *srf_out = NULL; if (req->scanout) { - if (!svga3dsurface_is_screen_target_format(req->format)) { + if (!vmw_surface_is_screen_target_format(req->format)) { VMW_DEBUG_USER("Invalid Screen Target surface format."); return -EINVAL; } @@ -2096,10 +2095,10 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, return -EINVAL; } } else { - const struct svga3d_surface_desc *desc = - svga3dsurface_get_desc(req->format); + const SVGA3dSurfaceDesc *desc = + vmw_surface_get_desc(req->format); - if (desc->block_desc == SVGA3DBLOCKDESC_NONE) { + if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) { VMW_DEBUG_USER("Invalid surface format.\n"); return -EINVAL; } @@ -2148,11 +2147,12 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, sample_count = metadata->multisample_count; srf->res.backup_size = - svga3dsurface_get_serialized_size_extended(metadata->format, - metadata->base_size, - metadata->mip_levels[0], - num_layers, - sample_count); + vmw_surface_get_serialized_size_extended( + metadata->format, + metadata->base_size, + metadata->mip_levels[0], + num_layers, + sample_count); if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) srf->res.backup_size += sizeof(SVGA3dDXSOState); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 0488042fb287..b0973c27e774 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -222,36 +222,6 @@ static bool __vmw_piter_sg_next(struct vmw_piter *viter) } -/** - * __vmw_piter_non_sg_page: Helper functions to return a pointer - * to the current page. - * - * @viter: Pointer to the iterator - * - * These functions return a pointer to the page currently - * pointed to by @viter. Functions are selected depending on the - * current mapping mode. - */ -static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) -{ - return viter->pages[viter->i]; -} - -/** - * __vmw_piter_phys_addr: Helper functions to return the DMA - * address of the current page. - * - * @viter: Pointer to the iterator - * - * These functions return the DMA address of the page currently - * pointed to by @viter. Functions are selected depending on the - * current mapping mode. - */ -static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) -{ - return page_to_phys(viter->pages[viter->i]); -} - static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) { return viter->addrs[viter->i]; @@ -279,13 +249,8 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, { viter->i = p_offset - 1; viter->num_pages = vsgt->num_pages; - viter->page = &__vmw_piter_non_sg_page; viter->pages = vsgt->pages; switch (vsgt->mode) { - case vmw_dma_phys: - viter->next = &__vmw_piter_non_sg_next; - viter->dma_address = &__vmw_piter_phys_addr; - break; case vmw_dma_alloc_coherent: viter->next = &__vmw_piter_non_sg_next; viter->dma_address = &__vmw_piter_dma_addr; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index 8338b1d20f2a..b09094b50c5d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -586,13 +586,13 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) container_of(entry->base.bo, typeof(*vbo), base); if (entry->cpu_blit) { - struct ttm_operation_ctx ctx = { + struct ttm_operation_ctx ttm_ctx = { .interruptible = intr, .no_wait_gpu = false }; ret = ttm_bo_validate(entry->base.bo, - &vmw_nonfixed_placement, &ctx); + &vmw_nonfixed_placement, &ttm_ctx); } else { ret = vmw_validation_bo_validate_single (entry->base.bo, intr, entry->as_mob); diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index 371202ebe900..cfda74490765 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -302,7 +302,6 @@ static const struct drm_simple_display_pipe_funcs display_funcs = { .mode_valid = display_mode_valid, .enable = display_enable, .disable = display_disable, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, .check = display_check, .update = display_update, }; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index 0c1c50271a88..ac37053412a1 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -111,8 +111,6 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) if (ret) return ret; - drm->irq_enabled = 1; - drm_kms_helper_poll_init(drm); /* diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c index 5506336594e2..064056503ebb 100644 --- a/drivers/gpu/drm/zte/zx_drm_drv.c +++ b/drivers/gpu/drm/zte/zx_drm_drv.c @@ -75,12 +75,6 @@ static int zx_drm_bind(struct device *dev) goto out_unbind; } - /* - * We will manage irq handler on our own. In this case, irq_enabled - * need to be true for using vblank core support. - */ - drm->irq_enabled = true; - drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 949fde433ea2..569930552957 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -72,10 +72,7 @@ struct vga_device { unsigned int io_norm_cnt; /* normal IO count */ unsigned int mem_norm_cnt; /* normal MEM count */ bool bridge_has_one_vga; - /* allow IRQ enable/disable hook */ - void *cookie; - void (*irq_set_state)(void *cookie, bool enable); - unsigned int (*set_vga_decode)(void *cookie, bool decode); + unsigned int (*set_decode)(struct pci_dev *pdev, bool decode); }; static LIST_HEAD(vga_list); @@ -218,13 +215,6 @@ int vga_remove_vgacon(struct pci_dev *pdev) #endif EXPORT_SYMBOL(vga_remove_vgacon); -static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) -{ - if (vgadev->irq_set_state) - vgadev->irq_set_state(vgadev->cookie, state); -} - - /* If we don't ever use VGA arb we should avoid turning off anything anywhere due to old X servers getting confused about the boot device not being VGA */ @@ -284,12 +274,6 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, if (vgadev == conflict) continue; - /* Check if the architecture allows a conflict between those - * 2 devices or if they are on separate domains - */ - if (!vga_conflicts(vgadev->pdev, conflict->pdev)) - continue; - /* We have a possible conflict. before we go further, we must * check if we sit on the same bus as the conflicting device. * if we don't, then we must tie both IO and MEM resources @@ -331,10 +315,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO) pci_bits |= PCI_COMMAND_IO; - if (pci_bits) { - vga_irq_set_state(conflict, false); + if (pci_bits) flags |= PCI_VGA_STATE_CHANGE_DECODES; - } } if (change_bridge) @@ -371,9 +353,6 @@ enable_them: pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); - if (!vgadev->bridge_has_one_vga) - vga_irq_set_state(vgadev, true); - vgadev->owns |= wants; lock_them: vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); @@ -826,7 +805,7 @@ static void __vga_set_legacy_decoding(struct pci_dev *pdev, goto bail; /* don't let userspace futz with kernel driver decodes */ - if (userspace && vgadev->set_vga_decode) + if (userspace && vgadev->set_decode) goto bail; /* update the device decodes + counter */ @@ -840,6 +819,17 @@ bail: spin_unlock_irqrestore(&vga_lock, flags); } +/** + * vga_set_legacy_decoding + * @pdev: pci device of the VGA card + * @decodes: bit mask of what legacy regions the card decodes + * + * Indicates to the arbiter if the card decodes legacy VGA IOs, legacy VGA + * Memory, both, or none. All cards default to both, the card driver (fbdev for + * example) should tell the arbiter if it has disabled legacy decoding, so the + * card can be left out of the arbitration process (and can be safe to take + * interrupts at any time. + */ void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes) { __vga_set_legacy_decoding(pdev, decodes, false); @@ -849,17 +839,11 @@ EXPORT_SYMBOL(vga_set_legacy_decoding); /** * vga_client_register - register or unregister a VGA arbitration client * @pdev: pci device of the VGA client - * @cookie: client cookie to be used in callbacks - * @irq_set_state: irq state change callback - * @set_vga_decode: vga decode change callback + * @set_decode: vga decode change callback * * Clients have two callback mechanisms they can use. * - * @irq_set_state callback: If a client can't disable its GPUs VGA - * resources, then we need to be able to ask it to turn off its irqs when we - * turn off its mem and io decoding. - * - * @set_vga_decode callback: If a client can disable its GPU VGA resource, it + * @set_decode callback: If a client can disable its GPU VGA resource, it * will get a callback from this to set the encode/decode state. * * Rationale: we cannot disable VGA decode resources unconditionally some single @@ -872,15 +856,12 @@ EXPORT_SYMBOL(vga_set_legacy_decoding); * This function does not check whether a client for @pdev has been registered * already. * - * To unregister just call this function with @irq_set_state and @set_vga_decode - * both set to NULL for the same @pdev as originally used to register them. + * To unregister just call vga_client_unregister(). * * Returns: 0 on success, -1 on failure */ -int vga_client_register(struct pci_dev *pdev, void *cookie, - void (*irq_set_state)(void *cookie, bool state), - unsigned int (*set_vga_decode)(void *cookie, - bool decode)) +int vga_client_register(struct pci_dev *pdev, + unsigned int (*set_decode)(struct pci_dev *pdev, bool decode)) { int ret = -ENODEV; struct vga_device *vgadev; @@ -891,9 +872,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie, if (!vgadev) goto bail; - vgadev->irq_set_state = irq_set_state; - vgadev->set_vga_decode = set_vga_decode; - vgadev->cookie = cookie; + vgadev->set_decode = set_decode; ret = 0; bail: @@ -1403,9 +1382,9 @@ static void vga_arbiter_notify_clients(void) new_state = false; else new_state = true; - if (vgadev->set_vga_decode) { - new_decodes = vgadev->set_vga_decode(vgadev->cookie, - new_state); + if (vgadev->set_decode) { + new_decodes = vgadev->set_decode(vgadev->pdev, + new_state); vga_update_device_decodes(vgadev, new_decodes); } } diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 318864d52837..cf27df8048db 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -119,10 +119,9 @@ static bool vfio_pci_is_denylisted(struct pci_dev *pdev) * has no way to get to it and routing can be disabled externally at the * bridge. */ -static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga) +static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga) { - struct vfio_pci_device *vdev = opaque; - struct pci_dev *tmp = NULL, *pdev = vdev->pdev; + struct pci_dev *tmp = NULL; unsigned char max_busnr; unsigned int decodes; @@ -1954,10 +1953,10 @@ static int vfio_pci_vga_init(struct vfio_pci_device *vdev) if (!vfio_pci_is_vga(pdev)) return 0; - ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode); + ret = vga_client_register(pdev, vfio_pci_set_decode); if (ret) return ret; - vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false)); + vga_set_legacy_decoding(pdev, vfio_pci_set_decode(pdev, false)); return 0; } @@ -1967,7 +1966,7 @@ static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev) if (!vfio_pci_is_vga(pdev)) return; - vga_client_register(pdev, NULL, NULL, NULL); + vga_client_unregister(pdev); vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM | VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM); diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 1447324ed0b6..45e64016db32 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -446,7 +446,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf, /* modded from epson 1355 */ unsigned long p; - int err=-EINVAL; + int err; unsigned int fbmemlength,x,y,w,h, bitppos, startpos, endpos, bitcount; struct arcfb_par *par; unsigned int xres; diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c index 3e006da47752..84c56f525889 100644 --- a/drivers/video/fbdev/asiliantfb.c +++ b/drivers/video/fbdev/asiliantfb.c @@ -227,6 +227,9 @@ static int asiliantfb_check_var(struct fb_var_screeninfo *var, { unsigned long Ftarget, ratio, remainder; + if (!var->pixclock) + return -EINVAL; + ratio = 1000000 / var->pixclock; remainder = 1000000 % var->pixclock; Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock; diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 1c855145711b..71fb710f1ce3 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -67,7 +67,7 @@ static struct fb_info *get_fb_info(unsigned int idx) mutex_lock(®istration_lock); fb_info = registered_fb[idx]; if (fb_info) - atomic_inc(&fb_info->count); + refcount_inc(&fb_info->count); mutex_unlock(®istration_lock); return fb_info; @@ -75,7 +75,7 @@ static struct fb_info *get_fb_info(unsigned int idx) static void put_fb_info(struct fb_info *fb_info) { - if (!atomic_dec_and_test(&fb_info->count)) + if (!refcount_dec_and_test(&fb_info->count)) return; if (fb_info->fbops->fb_destroy) fb_info->fbops->fb_destroy(fb_info); @@ -1592,7 +1592,7 @@ static int do_register_framebuffer(struct fb_info *fb_info) if (!registered_fb[i]) break; fb_info->node = i; - atomic_set(&fb_info->count, 1); + refcount_set(&fb_info->count, 1); mutex_init(&fb_info->lock); mutex_init(&fb_info->mm_lock); diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c index ba33b4dce0df..2398b3d48fed 100644 --- a/drivers/video/fbdev/ep93xx-fb.c +++ b/drivers/video/fbdev/ep93xx-fb.c @@ -548,7 +548,7 @@ static int ep93xxfb_probe(struct platform_device *pdev) } ep93xxfb_set_par(info); - clk_enable(fbi->clk); + clk_prepare_enable(fbi->clk); err = register_framebuffer(info); if (err) @@ -577,7 +577,7 @@ static int ep93xxfb_remove(struct platform_device *pdev) struct ep93xx_fbi *fbi = info->par; unregister_framebuffer(info); - clk_disable(fbi->clk); + clk_disable_unprepare(fbi->clk); ep93xxfb_dealloc_videomem(info); fb_dealloc_cmap(&info->cmap); diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c index 8fbde92ae8b9..25801e8e3f74 100644 --- a/drivers/video/fbdev/kyro/fbdev.c +++ b/drivers/video/fbdev/kyro/fbdev.c @@ -372,6 +372,11 @@ static int kyro_dev_overlay_viewport_set(u32 x, u32 y, u32 ulWidth, u32 ulHeight /* probably haven't called CreateOverlay yet */ return -EINVAL; + if (ulWidth == 0 || ulWidth == 0xffffffff || + ulHeight == 0 || ulHeight == 0xffffffff || + (x < 2 && ulWidth + 2 == 0)) + return -EINVAL; + /* Stop Ramdac Output */ DisableRamdacOutput(deviceInfo.pSTGReg); @@ -394,6 +399,9 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct kyrofb_info *par = info->par; + if (!var->pixclock) + return -EINVAL; + if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) { printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel); return -EINVAL; diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c index c0f4f402da3f..966df2a07360 100644 --- a/drivers/video/fbdev/neofb.c +++ b/drivers/video/fbdev/neofb.c @@ -585,7 +585,7 @@ neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) DBG("neofb_check_var"); - if (PICOS2KHZ(var->pixclock) > par->maxClock) + if (var->pixclock && PICOS2KHZ(var->pixclock) > par->maxClock) return -EINVAL; /* Is the mode larger than the LCD panel? */ diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c index c71021091828..acca991c7540 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c @@ -108,7 +108,7 @@ static struct { } dss_data; /* protects dss_data */ -static spinlock_t data_lock; +static DEFINE_SPINLOCK(data_lock); /* lock for blocking functions */ static DEFINE_MUTEX(apply_lock); static DECLARE_COMPLETION(extra_updated_completion); @@ -131,8 +131,6 @@ static void apply_init_priv(void) struct mgr_priv_data *mp; int i; - spin_lock_init(&data_lock); - for (i = 0; i < num_ovls; ++i) { struct ovl_priv_data *op; diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c index 55554b0433cb..84d5e23ad7d3 100644 --- a/drivers/video/fbdev/riva/fbdev.c +++ b/drivers/video/fbdev/riva/fbdev.c @@ -1084,6 +1084,9 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) int mode_valid = 0; NVTRACE_ENTER(); + if (!var->pixclock) + return -EINVAL; + switch (var->bits_per_pixel) { case 1 ... 8: var->red.offset = var->green.offset = var->blue.offset = 0; diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index eda448b7a0c9..1e2f71c2f8a8 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -82,6 +82,11 @@ struct ssd1307fb_par { struct regulator *vbat_reg; u32 vcomh; u32 width; + /* Cached address ranges */ + u8 col_start; + u8 col_end; + u8 page_start; + u8 page_end; }; struct ssd1307fb_array { @@ -152,17 +157,72 @@ static inline int ssd1307fb_write_cmd(struct i2c_client *client, u8 cmd) return ret; } -static void ssd1307fb_update_display(struct ssd1307fb_par *par) +static int ssd1307fb_set_col_range(struct ssd1307fb_par *par, u8 col_start, + u8 cols) +{ + u8 col_end = col_start + cols - 1; + int ret; + + if (col_start == par->col_start && col_end == par->col_end) + return 0; + + ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_COL_RANGE); + if (ret < 0) + return ret; + + ret = ssd1307fb_write_cmd(par->client, col_start); + if (ret < 0) + return ret; + + ret = ssd1307fb_write_cmd(par->client, col_end); + if (ret < 0) + return ret; + + par->col_start = col_start; + par->col_end = col_end; + return 0; +} + +static int ssd1307fb_set_page_range(struct ssd1307fb_par *par, u8 page_start, + u8 pages) +{ + u8 page_end = page_start + pages - 1; + int ret; + + if (page_start == par->page_start && page_end == par->page_end) + return 0; + + ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_PAGE_RANGE); + if (ret < 0) + return ret; + + ret = ssd1307fb_write_cmd(par->client, page_start); + if (ret < 0) + return ret; + + ret = ssd1307fb_write_cmd(par->client, page_end); + if (ret < 0) + return ret; + + par->page_start = page_start; + par->page_end = page_end; + return 0; +} + +static int ssd1307fb_update_rect(struct ssd1307fb_par *par, unsigned int x, + unsigned int y, unsigned int width, + unsigned int height) { struct ssd1307fb_array *array; u8 *vmem = par->info->screen_buffer; unsigned int line_length = par->info->fix.line_length; - unsigned int pages = DIV_ROUND_UP(par->height, 8); - int i, j, k; + unsigned int pages = DIV_ROUND_UP(y % 8 + height, 8); + u32 array_idx = 0; + int ret, i, j, k; - array = ssd1307fb_alloc_array(par->width * pages, SSD1307FB_DATA); + array = ssd1307fb_alloc_array(width * pages, SSD1307FB_DATA); if (!array) - return; + return -ENOMEM; /* * The screen is divided in pages, each having a height of 8 @@ -193,27 +253,44 @@ static void ssd1307fb_update_display(struct ssd1307fb_par *par) * (5) A4 B4 C4 D4 E4 F4 G4 H4 */ - for (i = 0; i < pages; i++) { - for (j = 0; j < par->width; j++) { - int m = 8; - u32 array_idx = i * par->width + j; - array->data[array_idx] = 0; - /* Last page may be partial */ - if (i + 1 == pages && par->height % 8) - m = par->height % 8; + ret = ssd1307fb_set_col_range(par, par->col_offset + x, width); + if (ret < 0) + goto out_free; + + ret = ssd1307fb_set_page_range(par, par->page_offset + y / 8, pages); + if (ret < 0) + goto out_free; + + for (i = y / 8; i < y / 8 + pages; i++) { + int m = 8; + + /* Last page may be partial */ + if (8 * (i + 1) > par->height) + m = par->height % 8; + for (j = x; j < x + width; j++) { + u8 data = 0; + for (k = 0; k < m; k++) { u8 byte = vmem[(8 * i + k) * line_length + j / 8]; u8 bit = (byte >> (j % 8)) & 1; - array->data[array_idx] |= bit << k; + data |= bit << k; } + array->data[array_idx++] = data; } } - ssd1307fb_write_array(par->client, array, par->width * pages); + ret = ssd1307fb_write_array(par->client, array, width * pages); + +out_free: kfree(array); + return ret; } +static int ssd1307fb_update_display(struct ssd1307fb_par *par) +{ + return ssd1307fb_update_rect(par, 0, 0, par->width, par->height); +} static ssize_t ssd1307fb_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos) @@ -222,6 +299,7 @@ static ssize_t ssd1307fb_write(struct fb_info *info, const char __user *buf, unsigned long total_size; unsigned long p = *ppos; void *dst; + int ret; total_size = info->fix.smem_len; @@ -239,7 +317,9 @@ static ssize_t ssd1307fb_write(struct fb_info *info, const char __user *buf, if (copy_from_user(dst, buf, count)) return -EFAULT; - ssd1307fb_update_display(par); + ret = ssd1307fb_update_display(par); + if (ret < 0) + return ret; *ppos += count; @@ -260,21 +340,24 @@ static void ssd1307fb_fillrect(struct fb_info *info, const struct fb_fillrect *r { struct ssd1307fb_par *par = info->par; sys_fillrect(info, rect); - ssd1307fb_update_display(par); + ssd1307fb_update_rect(par, rect->dx, rect->dy, rect->width, + rect->height); } static void ssd1307fb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct ssd1307fb_par *par = info->par; sys_copyarea(info, area); - ssd1307fb_update_display(par); + ssd1307fb_update_rect(par, area->dx, area->dy, area->width, + area->height); } static void ssd1307fb_imageblit(struct fb_info *info, const struct fb_image *image) { struct ssd1307fb_par *par = info->par; sys_imageblit(info, image); - ssd1307fb_update_display(par); + ssd1307fb_update_rect(par, image->dx, image->dy, image->width, + image->height); } static const struct fb_ops ssd1307fb_ops = { @@ -454,37 +537,11 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) if (ret < 0) return ret; - /* Set column range */ - ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_COL_RANGE); - if (ret < 0) - return ret; - - ret = ssd1307fb_write_cmd(par->client, par->col_offset); - if (ret < 0) - return ret; - - ret = ssd1307fb_write_cmd(par->client, par->col_offset + par->width - 1); - if (ret < 0) - return ret; - - /* Set page range */ - ret = ssd1307fb_write_cmd(par->client, SSD1307FB_SET_PAGE_RANGE); - if (ret < 0) - return ret; - - ret = ssd1307fb_write_cmd(par->client, par->page_offset); - if (ret < 0) - return ret; - - ret = ssd1307fb_write_cmd(par->client, - par->page_offset + - DIV_ROUND_UP(par->height, 8) - 1); + /* Clear the screen */ + ret = ssd1307fb_update_display(par); if (ret < 0) return ret; - /* Clear the screen */ - ssd1307fb_update_display(par); - /* Turn on the display */ ret = ssd1307fb_write_cmd(par->client, SSD1307FB_DISPLAY_ON); if (ret < 0) |