diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
26 files changed, 697 insertions, 1821 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index a4fabe208d9f..faddae3d6ac2 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -16,13 +16,6 @@ config DRM_VMWGFX virtual hardware. The compiled module will be called "vmwgfx.ko". -config DRM_VMWGFX_FBCON - depends on DRM_VMWGFX && DRM_FBDEV_EMULATION - bool "Enable framebuffer console under vmwgfx by default" - help - Choose this option if you are shipping a new vmwgfx - userspace driver that supports using the kernel driver. - config DRM_VMWGFX_MKSSTATS bool "Enable mksGuestStats instrumentation of vmwgfx by default" depends on DRM_VMWGFX diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index eee73b9aa404..2a644f035597 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \ +vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \ @@ -12,6 +12,4 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \ vmwgfx_gem.o -vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o - obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 26a55fef1ab5..932b125ebf3d 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA + * Copyright (c) 2009-2022 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -44,16 +44,20 @@ #define pr_fmt(fmt) "[TTM] " fmt +#include "ttm_object.h" +#include "vmwgfx_drv.h" + #include <linux/list.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/module.h> -#include "ttm_object.h" -#include "vmwgfx_drv.h" +#include <linux/hashtable.h> MODULE_IMPORT_NS(DMA_BUF); +#define VMW_TTM_OBJECT_REF_HT_ORDER 10 + /** * struct ttm_object_file * @@ -74,16 +78,14 @@ struct ttm_object_file { struct ttm_object_device *tdev; spinlock_t lock; struct list_head ref_list; - struct vmwgfx_open_hash ref_hash; + DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER); struct kref refcount; }; /* * struct ttm_object_device * - * @object_lock: lock that protects the object_hash hash table. - * - * @object_hash: hash table for fast lookup of object global names. + * @object_lock: lock that protects idr. * * @object_count: Per device object count. * @@ -92,7 +94,6 @@ struct ttm_object_file { struct ttm_object_device { spinlock_t object_lock; - struct vmwgfx_open_hash object_hash; atomic_t object_count; struct dma_buf_ops ops; void (*dmabuf_release)(struct dma_buf *dma_buf); @@ -138,6 +139,36 @@ ttm_object_file_ref(struct ttm_object_file *tfile) return tfile; } +static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile, + uint64_t key, + struct vmwgfx_hash_item **p_hash) +{ + struct vmwgfx_hash_item *hash; + + hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) { + if (hash->key == key) { + *p_hash = hash; + return 0; + } + } + return -EINVAL; +} + +static int ttm_tfile_find_ref(struct ttm_object_file *tfile, + uint64_t key, + struct vmwgfx_hash_item **p_hash) +{ + struct vmwgfx_hash_item *hash; + + hash_for_each_possible(tfile->ref_hash, hash, head, key) { + if (hash->key == key) { + *p_hash = hash; + return 0; + } + } + return -EINVAL; +} + static void ttm_object_file_destroy(struct kref *kref) { struct ttm_object_file *tfile = @@ -240,37 +271,35 @@ void ttm_base_object_unref(struct ttm_base_object **p_base) * Return: A pointer to the object if successful or NULL otherwise. */ struct ttm_base_object * -ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) +ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key) { struct vmwgfx_hash_item *hash; - struct vmwgfx_open_hash *ht = &tfile->ref_hash; int ret; rcu_read_lock(); - ret = vmwgfx_ht_find_item_rcu(ht, key, &hash); + ret = ttm_tfile_find_ref_rcu(tfile, key, &hash); if (ret) { rcu_read_unlock(); return NULL; } __release(RCU); - return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; + return hlist_entry(hash, struct ttm_ref_object, hash)->obj; } EXPORT_SYMBOL(ttm_base_object_noref_lookup); struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, - uint32_t key) + uint64_t key) { struct ttm_base_object *base = NULL; struct vmwgfx_hash_item *hash; - struct vmwgfx_open_hash *ht = &tfile->ref_hash; int ret; rcu_read_lock(); - ret = vmwgfx_ht_find_item_rcu(ht, key, &hash); + ret = ttm_tfile_find_ref_rcu(tfile, key, &hash); if (likely(ret == 0)) { - base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; + base = hlist_entry(hash, struct ttm_ref_object, hash)->obj; if (!kref_get_unless_zero(&base->refcount)) base = NULL; } @@ -280,7 +309,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, } struct ttm_base_object * -ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) +ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key) { struct ttm_base_object *base; @@ -299,7 +328,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, bool *existed, bool require_existed) { - struct vmwgfx_open_hash *ht = &tfile->ref_hash; struct ttm_ref_object *ref; struct vmwgfx_hash_item *hash; int ret = -EINVAL; @@ -312,10 +340,10 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, while (ret == -EINVAL) { rcu_read_lock(); - ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash); + ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash); if (ret == 0) { - ref = drm_hash_entry(hash, struct ttm_ref_object, hash); + ref = hlist_entry(hash, struct ttm_ref_object, hash); if (kref_get_unless_zero(&ref->kref)) { rcu_read_unlock(); break; @@ -337,21 +365,14 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, kref_init(&ref->kref); spin_lock(&tfile->lock); - ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash); - - if (likely(ret == 0)) { - list_add_tail(&ref->head, &tfile->ref_list); - kref_get(&base->refcount); - spin_unlock(&tfile->lock); - if (existed != NULL) - *existed = false; - break; - } + hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key); + ret = 0; + list_add_tail(&ref->head, &tfile->ref_list); + kref_get(&base->refcount); spin_unlock(&tfile->lock); - BUG_ON(ret != -EINVAL); - - kfree(ref); + if (existed != NULL) + *existed = false; } return ret; @@ -363,10 +384,8 @@ ttm_ref_object_release(struct kref *kref) struct ttm_ref_object *ref = container_of(kref, struct ttm_ref_object, kref); struct ttm_object_file *tfile = ref->tfile; - struct vmwgfx_open_hash *ht; - ht = &tfile->ref_hash; - (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash); + hash_del_rcu(&ref->hash.head); list_del(&ref->head); spin_unlock(&tfile->lock); @@ -378,18 +397,17 @@ ttm_ref_object_release(struct kref *kref) int ttm_ref_object_base_unref(struct ttm_object_file *tfile, unsigned long key) { - struct vmwgfx_open_hash *ht = &tfile->ref_hash; struct ttm_ref_object *ref; struct vmwgfx_hash_item *hash; int ret; spin_lock(&tfile->lock); - ret = vmwgfx_ht_find_item(ht, key, &hash); + ret = ttm_tfile_find_ref(tfile, key, &hash); if (unlikely(ret != 0)) { spin_unlock(&tfile->lock); return -EINVAL; } - ref = drm_hash_entry(hash, struct ttm_ref_object, hash); + ref = hlist_entry(hash, struct ttm_ref_object, hash); kref_put(&ref->kref, ttm_ref_object_release); spin_unlock(&tfile->lock); return 0; @@ -416,16 +434,13 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) } spin_unlock(&tfile->lock); - vmwgfx_ht_remove(&tfile->ref_hash); ttm_object_file_unref(&tfile); } -struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, - unsigned int hash_order) +struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev) { struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); - int ret; if (unlikely(tfile == NULL)) return NULL; @@ -435,34 +450,21 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, kref_init(&tfile->refcount); INIT_LIST_HEAD(&tfile->ref_list); - ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order); - if (ret) - goto out_err; + hash_init(tfile->ref_hash); return tfile; -out_err: - vmwgfx_ht_remove(&tfile->ref_hash); - - kfree(tfile); - - return NULL; } struct ttm_object_device * -ttm_object_device_init(unsigned int hash_order, - const struct dma_buf_ops *ops) +ttm_object_device_init(const struct dma_buf_ops *ops) { struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); - int ret; if (unlikely(tdev == NULL)) return NULL; spin_lock_init(&tdev->object_lock); atomic_set(&tdev->object_count, 0); - ret = vmwgfx_ht_create(&tdev->object_hash, hash_order); - if (ret != 0) - goto out_no_object_hash; /* * Our base is at VMWGFX_NUM_MOB + 1 because we want to create @@ -477,10 +479,6 @@ ttm_object_device_init(unsigned int hash_order, tdev->dmabuf_release = tdev->ops.release; tdev->ops.release = ttm_prime_dmabuf_release; return tdev; - -out_no_object_hash: - kfree(tdev); - return NULL; } void ttm_object_device_release(struct ttm_object_device **p_tdev) @@ -491,7 +489,6 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); idr_destroy(&tdev->idr); - vmwgfx_ht_remove(&tdev->object_hash); kfree(tdev); } diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h index 1a2fa0f83f5f..f0ebbe340ad6 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.h +++ b/drivers/gpu/drm/vmwgfx/ttm_object.h @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * Copyright (c) 2006-2022 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -42,8 +42,6 @@ #include <linux/list.h> #include <linux/rcupdate.h> -#include "vmwgfx_hashtab.h" - /** * enum ttm_object_type * @@ -104,7 +102,7 @@ struct ttm_base_object { struct ttm_object_file *tfile; struct kref refcount; void (*refcount_release) (struct ttm_base_object **base); - u32 handle; + u64 handle; enum ttm_object_type object_type; u32 shareable; }; @@ -164,7 +162,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile, */ extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file - *tfile, uint32_t key); + *tfile, uint64_t key); /** * ttm_base_object_lookup_for_ref @@ -178,7 +176,7 @@ extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file */ extern struct ttm_base_object * -ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key); +ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key); /** * ttm_base_object_unref @@ -237,14 +235,12 @@ extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, * ttm_object_file_init - initialize a struct ttm_object file * * @tdev: A struct ttm_object device this file is initialized on. - * @hash_order: Order of the hash table used to hold the reference objects. * * This is typically called by the file_ops::open function. */ extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device - *tdev, - unsigned int hash_order); + *tdev); /** * ttm_object_file_release - release data held by a ttm_object_file @@ -262,7 +258,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); /** * ttm_object device init - initialize a struct ttm_object_device * - * @hash_order: Order of hash table used to hash the base objects. * @ops: DMA buf ops for prime objects of this device. * * This function is typically called on device initialization to prepare @@ -270,8 +265,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); */ extern struct ttm_object_device * -ttm_object_device_init(unsigned int hash_order, - const struct dma_buf_ops *ops); +ttm_object_device_init(const struct dma_buf_ops *ops); /** * ttm_object_device_release - release data held by a ttm_object_device @@ -314,7 +308,7 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, kfree_rcu(__obj, __prime.base.rhead) struct ttm_base_object * -ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key); +ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key); /** * ttm_base_object_noref_release - release a base object pointer looked up diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 09fe20e918f9..c52c7bf1485b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -483,8 +483,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, d.src_addr = NULL; d.dst_pages = dst->ttm->pages; d.src_pages = src->ttm->pages; - d.dst_num_pages = dst->resource->num_pages; - d.src_num_pages = src->resource->num_pages; + d.dst_num_pages = PFN_UP(dst->resource->size); + d.src_num_pages = PFN_UP(src->resource->size); d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL); d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL); d.diff = diff; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 822251aaab0a..321c551784a1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -194,7 +194,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, int ret = 0; place = vmw_vram_placement.placement[0]; - place.lpfn = bo->resource->num_pages; + place.lpfn = PFN_UP(bo->resource->size); placement.num_placement = 1; placement.placement = &place; placement.num_busy_placement = 1; @@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, * that situation. */ if (bo->resource->mem_type == TTM_PL_VRAM && - bo->resource->start < bo->resource->num_pages && + bo->resource->start < PFN_UP(bo->resource->size) && bo->resource->start > 0 && buf->base.pin_count == 0) { ctx.interruptible = false; @@ -352,7 +352,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) if (virtual) return virtual; - ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map); + ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map); if (ret) DRM_ERROR("Buffer object map failed: %d.\n", ret); @@ -807,9 +807,23 @@ int vmw_dumb_create(struct drm_file *file_priv, { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_buffer_object *vbo; + int cpp = DIV_ROUND_UP(args->bpp, 8); int ret; - args->pitch = args->width * ((args->bpp + 7) / 8); + switch (cpp) { + case 1: /* DRM_FORMAT_C8 */ + case 2: /* DRM_FORMAT_RGB565 */ + case 4: /* DRM_FORMAT_XRGB8888 */ + break; + default: + /* + * Dumb buffers don't allow anything else. + * This is tested via IGT's dumb_buffers + */ + return -EINVAL; + } + + args->pitch = args->width * cpp; args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 82ef58ccdd42..47bc0b411055 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2014-2022 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -28,6 +28,8 @@ #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" +#include <linux/hashtable.h> + #define VMW_CMDBUF_RES_MAN_HT_ORDER 12 /** @@ -59,7 +61,7 @@ struct vmw_cmdbuf_res { * @resources and @list are protected by the cmdbuf mutex for now. */ struct vmw_cmdbuf_res_manager { - struct vmwgfx_open_hash resources; + DECLARE_HASHTABLE(resources, VMW_CMDBUF_RES_MAN_HT_ORDER); struct list_head list; struct vmw_private *dev_priv; }; @@ -82,14 +84,13 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, u32 user_key) { struct vmwgfx_hash_item *hash; - int ret; unsigned long key = user_key | (res_type << 24); - ret = vmwgfx_ht_find_item(&man->resources, key, &hash); - if (unlikely(ret != 0)) - return ERR_PTR(ret); - - return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res; + hash_for_each_possible_rcu(man->resources, hash, head, key) { + if (hash->key == key) + return hlist_entry(hash, struct vmw_cmdbuf_res, hash)->res; + } + return ERR_PTR(-EINVAL); } /** @@ -105,7 +106,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man, struct vmw_cmdbuf_res *entry) { list_del(&entry->head); - WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash)); + hash_del_rcu(&entry->hash.head); vmw_resource_unreference(&entry->res); kfree(entry); } @@ -159,7 +160,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list) void vmw_cmdbuf_res_revert(struct list_head *list) { struct vmw_cmdbuf_res *entry, *next; - int ret; list_for_each_entry_safe(entry, next, list, head) { switch (entry->state) { @@ -167,8 +167,8 @@ void vmw_cmdbuf_res_revert(struct list_head *list) vmw_cmdbuf_res_free(entry->man, entry); break; case VMW_CMDBUF_RES_DEL: - ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash); - BUG_ON(ret); + hash_add_rcu(entry->man->resources, &entry->hash.head, + entry->hash.key); list_move_tail(&entry->head, &entry->man->list); entry->state = VMW_CMDBUF_RES_COMMITTED; break; @@ -199,26 +199,20 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, struct list_head *list) { struct vmw_cmdbuf_res *cres; - int ret; cres = kzalloc(sizeof(*cres), GFP_KERNEL); if (unlikely(!cres)) return -ENOMEM; cres->hash.key = user_key | (res_type << 24); - ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash); - if (unlikely(ret != 0)) { - kfree(cres); - goto out_invalid_key; - } + hash_add_rcu(man->resources, &cres->hash.head, cres->hash.key); cres->state = VMW_CMDBUF_RES_ADD; cres->res = vmw_resource_reference(res); cres->man = man; list_add_tail(&cres->head, list); -out_invalid_key: - return ret; + return 0; } /** @@ -243,24 +237,26 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, struct list_head *list, struct vmw_resource **res_p) { - struct vmw_cmdbuf_res *entry; + struct vmw_cmdbuf_res *entry = NULL; struct vmwgfx_hash_item *hash; - int ret; + unsigned long key = user_key | (res_type << 24); - ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24), - &hash); - if (likely(ret != 0)) + hash_for_each_possible_rcu(man->resources, hash, head, key) { + if (hash->key == key) { + entry = hlist_entry(hash, struct vmw_cmdbuf_res, hash); + break; + } + } + if (unlikely(!entry)) return -EINVAL; - entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash); - switch (entry->state) { case VMW_CMDBUF_RES_ADD: vmw_cmdbuf_res_free(man, entry); *res_p = NULL; break; case VMW_CMDBUF_RES_COMMITTED: - (void) vmwgfx_ht_remove_item(&man->resources, &entry->hash); + hash_del_rcu(&entry->hash.head); list_del(&entry->head); entry->state = VMW_CMDBUF_RES_DEL; list_add_tail(&entry->head, list); @@ -287,7 +283,6 @@ struct vmw_cmdbuf_res_manager * vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) { struct vmw_cmdbuf_res_manager *man; - int ret; man = kzalloc(sizeof(*man), GFP_KERNEL); if (!man) @@ -295,12 +290,8 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) man->dev_priv = dev_priv; INIT_LIST_HEAD(&man->list); - ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); - if (ret == 0) - return man; - - kfree(man); - return ERR_PTR(ret); + hash_init(man->resources); + return man; } /** @@ -320,7 +311,6 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_free(man, entry); - vmwgfx_ht_remove(&man->resources); kfree(man); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 79b30dc9d825..b78a10312fad 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -33,6 +33,7 @@ #include <drm/ttm/ttm_placement.h> #include "vmwgfx_drv.h" +#include "vmwgfx_mksstat.h" #include "vmwgfx_resource_priv.h" #include "vmwgfx_so.h" @@ -72,12 +73,24 @@ struct vmw_cotable_info { bool); }; + +/* + * Getting the initial size right is difficult because it all depends + * on what the userspace is doing. The sizes will be aligned up to + * a PAGE_SIZE so we just want to make sure that for majority of apps + * the initial number of entries doesn't require an immediate resize. + * For all cotables except SVGACOTableDXElementLayoutEntry and + * SVGACOTableDXBlendStateEntry the initial number of entries fits + * within the PAGE_SIZE. For SVGACOTableDXElementLayoutEntry and + * SVGACOTableDXBlendStateEntry we want to reserve two pages, + * because that's what all apps will require initially. + */ static const struct vmw_cotable_info co_info[] = { {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy}, {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy}, {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy}, - {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL}, - {1, sizeof(SVGACOTableDXBlendStateEntry), NULL}, + {PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL}, + {PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL}, {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL}, {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL}, {1, sizeof(SVGACOTableDXSamplerEntry), NULL}, @@ -395,9 +408,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) int ret; size_t i; + MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE); + MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE); + ret = vmw_cotable_readback(res); if (ret) - return ret; + goto out_done; cur_size_read_back = vcotbl->size_read_back; vcotbl->size_read_back = old_size_read_back; @@ -411,7 +427,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) true, true, vmw_bo_bo_free, &buf); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); - return ret; + goto out_done; } bo = &buf->base; @@ -427,7 +443,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * Do a page by page copy of COTables. This eliminates slow vmap()s. * This should really be a TTM utility. */ - for (i = 0; i < old_bo->resource->num_pages; ++i) { + for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) { bool dummy; ret = ttm_bo_kmap(old_bo, i, 1, &old_map); @@ -485,6 +501,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) /* Release the pin acquired in vmw_bo_init */ ttm_bo_unpin(bo); + MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE); + return 0; out_map_new: @@ -494,6 +512,9 @@ out_wait: ttm_bo_unreserve(bo); vmw_bo_unreference(&buf); +out_done: + MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE); + return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index d7bd5eb1d3ac..bd02cb0e6837 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -25,13 +25,17 @@ * **************************************************************************/ -#include <linux/dma-mapping.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/cc_platform.h> + +#include "vmwgfx_drv.h" + +#include "vmwgfx_devcaps.h" +#include "vmwgfx_mksstat.h" +#include "vmwgfx_binding.h" +#include "ttm_object.h" #include <drm/drm_aperture.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_module.h> @@ -41,17 +45,14 @@ #include <drm/ttm/ttm_placement.h> #include <generated/utsrelease.h> -#include "ttm_object.h" -#include "vmwgfx_binding.h" -#include "vmwgfx_devcaps.h" -#include "vmwgfx_drv.h" -#include "vmwgfx_mksstat.h" +#include <linux/cc_platform.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/version.h> #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" -#define VMW_MIN_INITIAL_WIDTH 800 -#define VMW_MIN_INITIAL_HEIGHT 600 - /* * Fully encoded drm commands. Might move to vmw_drm.h */ @@ -262,7 +263,6 @@ static const struct pci_device_id vmw_pci_id_list[] = { }; MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); -static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); static int vmw_restrict_iommu; static int vmw_force_coherent; static int vmw_restrict_dma_mask; @@ -272,8 +272,6 @@ static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr); -MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); -module_param_named(enable_fbdev, enable_fbdev, int, 0600); MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); @@ -623,8 +621,8 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) width = vmw_read(dev_priv, SVGA_REG_WIDTH); height = vmw_read(dev_priv, SVGA_REG_HEIGHT); - width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); - height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); + width = max_t(uint32_t, width, VMWGFX_MIN_INITIAL_WIDTH); + height = max_t(uint32_t, height, VMWGFX_MIN_INITIAL_HEIGHT); if (width > dev_priv->fb_max_width || height > dev_priv->fb_max_height) { @@ -633,8 +631,8 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) * This is a host error and shouldn't occur. */ - width = VMW_MIN_INITIAL_WIDTH; - height = VMW_MIN_INITIAL_HEIGHT; + width = VMWGFX_MIN_INITIAL_WIDTH; + height = VMWGFX_MIN_INITIAL_HEIGHT; } dev_priv->initial_width = width; @@ -806,6 +804,43 @@ static int vmw_detect_version(struct vmw_private *dev) return 0; } +static void vmw_write_driver_id(struct vmw_private *dev) +{ + if ((dev->capabilities2 & SVGA_CAP2_DX2) != 0) { + vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID, + SVGA_REG_GUEST_DRIVER_ID_LINUX); + + vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION1, + LINUX_VERSION_MAJOR << 24 | + LINUX_VERSION_PATCHLEVEL << 16 | + LINUX_VERSION_SUBLEVEL); + vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION2, + VMWGFX_DRIVER_MAJOR << 24 | + VMWGFX_DRIVER_MINOR << 16 | + VMWGFX_DRIVER_PATCHLEVEL); + vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION3, 0); + + vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID, + SVGA_REG_GUEST_DRIVER_ID_SUBMIT); + } +} + +static void vmw_sw_context_init(struct vmw_private *dev_priv) +{ + struct vmw_sw_context *sw_context = &dev_priv->ctx; + + hash_init(sw_context->res_ht); +} + +static void vmw_sw_context_fini(struct vmw_private *dev_priv) +{ + struct vmw_sw_context *sw_context = &dev_priv->ctx; + + vfree(sw_context->cmd_bounce); + if (sw_context->staged_bindings) + vmw_binding_state_free(sw_context->staged_bindings); +} + static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) { int ret; @@ -815,6 +850,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->drm.dev_private = dev_priv; + vmw_sw_context_init(dev_priv); + mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->binding_mutex); spin_lock_init(&dev_priv->resource_lock); @@ -844,9 +881,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->assume_16bpp = !!vmw_assume_16bpp; - dev_priv->enable_fb = enable_fbdev; - - dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); vmw_print_bitmap(&dev_priv->drm, "Capabilities", dev_priv->capabilities, @@ -970,7 +1004,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) goto out_err0; } - dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops); + dev_priv->tdev = ttm_object_device_init(&vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { drm_err(&dev_priv->drm, @@ -1091,12 +1125,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)", VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE); - - if (dev_priv->enable_fb) { - vmw_fifo_resource_inc(dev_priv); - vmw_svga_enable(dev_priv); - vmw_fb_init(dev_priv); - } + vmw_write_driver_id(dev_priv); dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; register_pm_notifier(&dev_priv->pm_nb); @@ -1143,15 +1172,10 @@ static void vmw_driver_unload(struct drm_device *dev) unregister_pm_notifier(&dev_priv->pm_nb); - if (dev_priv->ctx.res_ht_initialized) - vmwgfx_ht_remove(&dev_priv->ctx.res_ht); - vfree(dev_priv->ctx.cmd_bounce); - if (dev_priv->enable_fb) { - vmw_fb_off(dev_priv); - vmw_fb_close(dev_priv); - vmw_fifo_resource_dec(dev_priv); - vmw_svga_disable(dev_priv); - } + vmw_sw_context_fini(dev_priv); + vmw_fifo_resource_dec(dev_priv); + + vmw_svga_disable(dev_priv); vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); @@ -1173,8 +1197,6 @@ static void vmw_driver_unload(struct drm_device *dev) vmw_irq_uninstall(&dev_priv->drm); ttm_object_device_release(&dev_priv->tdev); - if (dev_priv->ctx.staged_bindings) - vmw_binding_state_free(dev_priv->ctx.staged_bindings); for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); @@ -1203,7 +1225,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) if (unlikely(!vmw_fp)) return ret; - vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); + vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev); if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; @@ -1291,8 +1313,6 @@ static void vmw_master_drop(struct drm_device *dev, struct vmw_private *dev_priv = vmw_priv(dev); vmw_kms_legacy_hotspot_clear(dev_priv); - if (!dev_priv->enable_fb) - vmw_svga_disable(dev_priv); } /** @@ -1485,25 +1505,19 @@ static int vmw_pm_freeze(struct device *kdev) DRM_ERROR("Failed to freeze modesetting.\n"); return ret; } - if (dev_priv->enable_fb) - vmw_fb_off(dev_priv); vmw_execbuf_release_pinned_bo(dev_priv); vmw_resource_evict_all(dev_priv); vmw_release_device_early(dev_priv); while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0); - if (dev_priv->enable_fb) - vmw_fifo_resource_dec(dev_priv); + vmw_fifo_resource_dec(dev_priv); if (atomic_read(&dev_priv->num_fifo_resources) != 0) { DRM_ERROR("Can't hibernate while 3D resources are active.\n"); - if (dev_priv->enable_fb) - vmw_fifo_resource_inc(dev_priv); + vmw_fifo_resource_inc(dev_priv); WARN_ON(vmw_request_device_late(dev_priv)); dev_priv->suspend_locked = false; if (dev_priv->suspend_state) vmw_kms_resume(dev); - if (dev_priv->enable_fb) - vmw_fb_on(dev_priv); return -EBUSY; } @@ -1523,24 +1537,19 @@ static int vmw_pm_restore(struct device *kdev) vmw_detect_version(dev_priv); - if (dev_priv->enable_fb) - vmw_fifo_resource_inc(dev_priv); + vmw_fifo_resource_inc(dev_priv); ret = vmw_request_device(dev_priv); if (ret) return ret; - if (dev_priv->enable_fb) - __vmw_svga_enable(dev_priv); + __vmw_svga_enable(dev_priv); vmw_fence_fifo_up(dev_priv->fman); dev_priv->suspend_locked = false; if (dev_priv->suspend_state) vmw_kms_resume(&dev_priv->drm); - if (dev_priv->enable_fb) - vmw_fb_on(dev_priv); - return 0; } @@ -1631,6 +1640,10 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_unload; + vmw_fifo_resource_inc(vmw); + vmw_svga_enable(vmw); + drm_fbdev_generic_setup(&vmw->drm, 0); + vmw_debugfs_gem_init(vmw); vmw_debugfs_resource_managers_init(vmw); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 09e2d738aa87..b062b020b378 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -30,6 +30,7 @@ #include <linux/suspend.h> #include <linux/sync_file.h> +#include <linux/hashtable.h> #include <drm/drm_auth.h> #include <drm/drm_device.h> @@ -42,7 +43,6 @@ #include "ttm_object.h" #include "vmwgfx_fence.h" -#include "vmwgfx_hashtab.h" #include "vmwgfx_reg.h" #include "vmwgfx_validation.h" @@ -62,6 +62,9 @@ #define VMWGFX_MAX_DISPLAYS 16 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 +#define VMWGFX_MIN_INITIAL_WIDTH 1280 +#define VMWGFX_MIN_INITIAL_HEIGHT 800 + #define VMWGFX_PCI_ID_SVGA2 0x0405 #define VMWGFX_PCI_ID_SVGA3 0x0406 @@ -93,6 +96,11 @@ #define VMW_RES_STREAM ttm_driver_type2 #define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_SHADER ttm_driver_type4 +#define VMW_RES_HT_ORDER 12 + +#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8 +#define VMW_CURSOR_SNOOP_WIDTH 64 +#define VMW_CURSOR_SNOOP_HEIGHT 64 #define MKSSTAT_CAPACITY_LOG2 5U #define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2) @@ -102,6 +110,11 @@ struct vmw_fpriv { bool gb_aware; /* user-space is guest-backed aware */ }; +struct vmwgfx_hash_item { + struct hlist_node head; + unsigned long key; +}; + /** * struct vmw_buffer_object - TTM buffer object with vmwgfx additions * @base: The TTM buffer object @@ -425,8 +438,7 @@ struct vmw_ctx_validation_info; * @ctx: The validation context */ struct vmw_sw_context{ - struct vmwgfx_open_hash res_ht; - bool res_ht_initialized; + DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER); bool kernel; struct vmw_fpriv *fp; struct drm_file *filp; @@ -546,7 +558,6 @@ struct vmw_private { * Framebuffer info. */ - void *fb_info; enum vmw_display_unit_type active_display_unit; struct vmw_legacy_display *ldu_priv; struct vmw_overlay *overlay_priv; @@ -605,8 +616,6 @@ struct vmw_private { struct mutex cmdbuf_mutex; struct mutex binding_mutex; - bool enable_fb; - /** * PM management. */ @@ -1184,35 +1193,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, u32 flag, int *waiter_count); - -/** - * Kernel framebuffer - vmwgfx_fb.c - */ - -#ifdef CONFIG_DRM_FBDEV_EMULATION -int vmw_fb_init(struct vmw_private *vmw_priv); -int vmw_fb_close(struct vmw_private *dev_priv); -int vmw_fb_off(struct vmw_private *vmw_priv); -int vmw_fb_on(struct vmw_private *vmw_priv); -#else -static inline int vmw_fb_init(struct vmw_private *vmw_priv) -{ - return 0; -} -static inline int vmw_fb_close(struct vmw_private *dev_priv) -{ - return 0; -} -static inline int vmw_fb_off(struct vmw_private *vmw_priv) -{ - return 0; -} -static inline int vmw_fb_on(struct vmw_private *vmw_priv) -{ - return 0; -} -#endif - /** * Kernel modesetting - vmwgfx_kms.c */ @@ -1232,9 +1212,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv, bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, uint32_t pitch, uint32_t height); -u32 vmw_get_vblank_counter(struct drm_crtc *crtc); -int vmw_enable_vblank(struct drm_crtc *crtc); -void vmw_disable_vblank(struct drm_crtc *crtc); int vmw_kms_present(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f085dbd4736d..a5379f6fb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,6 +25,7 @@ * **************************************************************************/ #include <linux/sync_file.h> +#include <linux/hashtable.h> #include "vmwgfx_drv.h" #include "vmwgfx_reg.h" @@ -34,7 +35,6 @@ #include "vmwgfx_binding.h" #include "vmwgfx_mksstat.h" -#define VMW_RES_HT_ORDER 12 /* * Helper macro to get dx_ctx_node if available otherwise print an error @@ -1047,7 +1047,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, if (unlikely(new_query_bo != sw_context->cur_query_bo)) { - if (unlikely(new_query_bo->base.resource->num_pages > 4)) { + if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) { VMW_DEBUG_USER("Query buffer too large.\n"); return -EINVAL; } @@ -3869,7 +3869,6 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, * @fence: Pointer to the fenc object. * @fence_handle: User-space fence handle. * @out_fence_fd: exported file descriptor for the fence. -1 if not used - * @sync_file: Only used to clean up in case of an error in this function. * * This function copies fence information to user-space. If copying fails, the * user-space struct drm_vmw_fence_rep::error member is hopefully left @@ -4101,7 +4100,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, int ret; int32_t out_fence_fd = -1; struct sync_file *sync_file = NULL; - DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); + DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1); if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); @@ -4164,14 +4163,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (sw_context->staged_bindings) vmw_binding_state_reset(sw_context->staged_bindings); - if (!sw_context->res_ht_initialized) { - ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); - if (unlikely(ret != 0)) - goto out_unlock; - - sw_context->res_ht_initialized = true; - } - INIT_LIST_HEAD(&sw_context->staged_cmd_res); sw_context->ctx = &val_ctx; ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c deleted file mode 100644 index 5b85b477e4c6..000000000000 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ /dev/null @@ -1,831 +0,0 @@ -/************************************************************************** - * - * Copyright © 2007 David Airlie - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#include <linux/fb.h> -#include <linux/pci.h> - -#include <drm/drm_fourcc.h> -#include <drm/ttm/ttm_placement.h> - -#include "vmwgfx_drv.h" -#include "vmwgfx_kms.h" - -#define VMW_DIRTY_DELAY (HZ / 30) - -struct vmw_fb_par { - struct vmw_private *vmw_priv; - - void *vmalloc; - - struct mutex bo_mutex; - struct vmw_buffer_object *vmw_bo; - unsigned bo_size; - struct drm_framebuffer *set_fb; - struct drm_display_mode *set_mode; - u32 fb_x; - u32 fb_y; - bool bo_iowrite; - - u32 pseudo_palette[17]; - - unsigned max_width; - unsigned max_height; - - struct { - spinlock_t lock; - bool active; - unsigned x1; - unsigned y1; - unsigned x2; - unsigned y2; - } dirty; - - struct drm_crtc *crtc; - struct drm_connector *con; - struct delayed_work local_work; -}; - -static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, - unsigned blue, unsigned transp, - struct fb_info *info) -{ - struct vmw_fb_par *par = info->par; - u32 *pal = par->pseudo_palette; - - if (regno > 15) { - DRM_ERROR("Bad regno %u.\n", regno); - return 1; - } - - switch (par->set_fb->format->depth) { - case 24: - case 32: - pal[regno] = ((red & 0xff00) << 8) | - (green & 0xff00) | - ((blue & 0xff00) >> 8); - break; - default: - DRM_ERROR("Bad depth %u, bpp %u.\n", - par->set_fb->format->depth, - par->set_fb->format->cpp[0] * 8); - return 1; - } - - return 0; -} - -static int vmw_fb_check_var(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - int depth = var->bits_per_pixel; - struct vmw_fb_par *par = info->par; - struct vmw_private *vmw_priv = par->vmw_priv; - - switch (var->bits_per_pixel) { - case 32: - depth = (var->transp.length > 0) ? 32 : 24; - break; - default: - DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); - return -EINVAL; - } - - switch (depth) { - case 24: - var->red.offset = 16; - var->green.offset = 8; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 32: - var->red.offset = 16; - var->green.offset = 8; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 8; - var->transp.offset = 24; - break; - default: - DRM_ERROR("Bad depth %u.\n", depth); - return -EINVAL; - } - - if ((var->xoffset + var->xres) > par->max_width || - (var->yoffset + var->yres) > par->max_height) { - DRM_ERROR("Requested geom can not fit in framebuffer\n"); - return -EINVAL; - } - - if (!vmw_kms_validate_mode_vram(vmw_priv, - var->xres * var->bits_per_pixel/8, - var->yoffset + var->yres)) { - DRM_ERROR("Requested geom can not fit in framebuffer\n"); - return -EINVAL; - } - - return 0; -} - -static int vmw_fb_blank(int blank, struct fb_info *info) -{ - return 0; -} - -/** - * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer - * - * @work: The struct work_struct associated with this task. - * - * This function flushes the dirty regions of the vmalloc framebuffer to the - * kms framebuffer, and if the kms framebuffer is visible, also updated the - * corresponding displays. Note that this function runs even if the kms - * framebuffer is not bound to a crtc and thus not visible, but it's turned - * off during hibernation using the par->dirty.active bool. - */ -static void vmw_fb_dirty_flush(struct work_struct *work) -{ - struct vmw_fb_par *par = container_of(work, struct vmw_fb_par, - local_work.work); - struct vmw_private *vmw_priv = par->vmw_priv; - struct fb_info *info = vmw_priv->fb_info; - unsigned long irq_flags; - s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0; - u32 cpp, max_x, max_y; - struct drm_clip_rect clip; - struct drm_framebuffer *cur_fb; - u8 *src_ptr, *dst_ptr; - struct vmw_buffer_object *vbo = par->vmw_bo; - void *virtual; - - if (!READ_ONCE(par->dirty.active)) - return; - - mutex_lock(&par->bo_mutex); - cur_fb = par->set_fb; - if (!cur_fb) - goto out_unlock; - - (void) ttm_bo_reserve(&vbo->base, false, false, NULL); - virtual = vmw_bo_map_and_cache(vbo); - if (!virtual) - goto out_unreserve; - - spin_lock_irqsave(&par->dirty.lock, irq_flags); - if (!par->dirty.active) { - spin_unlock_irqrestore(&par->dirty.lock, irq_flags); - goto out_unreserve; - } - - /* - * Handle panning when copying from vmalloc to framebuffer. - * Clip dirty area to framebuffer. - */ - cpp = cur_fb->format->cpp[0]; - max_x = par->fb_x + cur_fb->width; - max_y = par->fb_y + cur_fb->height; - - dst_x1 = par->dirty.x1 - par->fb_x; - dst_y1 = par->dirty.y1 - par->fb_y; - dst_x1 = max_t(s32, dst_x1, 0); - dst_y1 = max_t(s32, dst_y1, 0); - - dst_x2 = par->dirty.x2 - par->fb_x; - dst_y2 = par->dirty.y2 - par->fb_y; - dst_x2 = min_t(s32, dst_x2, max_x); - dst_y2 = min_t(s32, dst_y2, max_y); - w = dst_x2 - dst_x1; - h = dst_y2 - dst_y1; - w = max_t(s32, 0, w); - h = max_t(s32, 0, h); - - par->dirty.x1 = par->dirty.x2 = 0; - par->dirty.y1 = par->dirty.y2 = 0; - spin_unlock_irqrestore(&par->dirty.lock, irq_flags); - - if (w && h) { - dst_ptr = (u8 *)virtual + - (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp); - src_ptr = (u8 *)par->vmalloc + - ((dst_y1 + par->fb_y) * info->fix.line_length + - (dst_x1 + par->fb_x) * cpp); - - while (h-- > 0) { - memcpy(dst_ptr, src_ptr, w*cpp); - dst_ptr += par->set_fb->pitches[0]; - src_ptr += info->fix.line_length; - } - - clip.x1 = dst_x1; - clip.x2 = dst_x2; - clip.y1 = dst_y1; - clip.y2 = dst_y2; - } - -out_unreserve: - ttm_bo_unreserve(&vbo->base); - if (w && h) { - WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0, - &clip, 1)); - vmw_cmd_flush(vmw_priv, false); - } -out_unlock: - mutex_unlock(&par->bo_mutex); -} - -static void vmw_fb_dirty_mark(struct vmw_fb_par *par, - unsigned x1, unsigned y1, - unsigned width, unsigned height) -{ - unsigned long flags; - unsigned x2 = x1 + width; - unsigned y2 = y1 + height; - - spin_lock_irqsave(&par->dirty.lock, flags); - if (par->dirty.x1 == par->dirty.x2) { - par->dirty.x1 = x1; - par->dirty.y1 = y1; - par->dirty.x2 = x2; - par->dirty.y2 = y2; - /* if we are active start the dirty work - * we share the work with the defio system */ - if (par->dirty.active) - schedule_delayed_work(&par->local_work, - VMW_DIRTY_DELAY); - } else { - if (x1 < par->dirty.x1) - par->dirty.x1 = x1; - if (y1 < par->dirty.y1) - par->dirty.y1 = y1; - if (x2 > par->dirty.x2) - par->dirty.x2 = x2; - if (y2 > par->dirty.y2) - par->dirty.y2 = y2; - } - spin_unlock_irqrestore(&par->dirty.lock, flags); -} - -static int vmw_fb_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - struct vmw_fb_par *par = info->par; - - if ((var->xoffset + var->xres) > var->xres_virtual || - (var->yoffset + var->yres) > var->yres_virtual) { - DRM_ERROR("Requested panning can not fit in framebuffer\n"); - return -EINVAL; - } - - mutex_lock(&par->bo_mutex); - par->fb_x = var->xoffset; - par->fb_y = var->yoffset; - if (par->set_fb) - vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width, - par->set_fb->height); - mutex_unlock(&par->bo_mutex); - - return 0; -} - -static void vmw_deferred_io(struct fb_info *info, struct list_head *pagereflist) -{ - struct vmw_fb_par *par = info->par; - unsigned long start, end, min, max; - unsigned long flags; - struct fb_deferred_io_pageref *pageref; - int y1, y2; - - min = ULONG_MAX; - max = 0; - list_for_each_entry(pageref, pagereflist, list) { - start = pageref->offset; - end = start + PAGE_SIZE - 1; - min = min(min, start); - max = max(max, end); - } - - if (min < max) { - y1 = min / info->fix.line_length; - y2 = (max / info->fix.line_length) + 1; - - spin_lock_irqsave(&par->dirty.lock, flags); - par->dirty.x1 = 0; - par->dirty.y1 = y1; - par->dirty.x2 = info->var.xres; - par->dirty.y2 = y2; - spin_unlock_irqrestore(&par->dirty.lock, flags); - - /* - * Since we've already waited on this work once, try to - * execute asap. - */ - cancel_delayed_work(&par->local_work); - schedule_delayed_work(&par->local_work, 0); - } -}; - -static struct fb_deferred_io vmw_defio = { - .delay = VMW_DIRTY_DELAY, - .deferred_io = vmw_deferred_io, -}; - -/* - * Draw code - */ - -static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) -{ - cfb_fillrect(info, rect); - vmw_fb_dirty_mark(info->par, rect->dx, rect->dy, - rect->width, rect->height); -} - -static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) -{ - cfb_copyarea(info, region); - vmw_fb_dirty_mark(info->par, region->dx, region->dy, - region->width, region->height); -} - -static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) -{ - cfb_imageblit(info, image); - vmw_fb_dirty_mark(info->par, image->dx, image->dy, - image->width, image->height); -} - -/* - * Bring up code - */ - -static int vmw_fb_create_bo(struct vmw_private *vmw_priv, - size_t size, struct vmw_buffer_object **out) -{ - struct vmw_buffer_object *vmw_bo; - int ret; - - ret = vmw_bo_create(vmw_priv, size, - &vmw_sys_placement, - false, false, - &vmw_bo_bo_free, &vmw_bo); - if (unlikely(ret != 0)) - return ret; - - *out = vmw_bo; - - return ret; -} - -static int vmw_fb_compute_depth(struct fb_var_screeninfo *var, - int *depth) -{ - switch (var->bits_per_pixel) { - case 32: - *depth = (var->transp.length > 0) ? 32 : 24; - break; - default: - DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); - return -EINVAL; - } - - return 0; -} - -static int vmwgfx_set_config_internal(struct drm_mode_set *set) -{ - struct drm_crtc *crtc = set->crtc; - struct drm_modeset_acquire_ctx ctx; - int ret; - - drm_modeset_acquire_init(&ctx, 0); - -restart: - ret = crtc->funcs->set_config(set, &ctx); - - if (ret == -EDEADLK) { - drm_modeset_backoff(&ctx); - goto restart; - } - - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - - return ret; -} - -static int vmw_fb_kms_detach(struct vmw_fb_par *par, - bool detach_bo, - bool unref_bo) -{ - struct drm_framebuffer *cur_fb = par->set_fb; - int ret; - - /* Detach the KMS framebuffer from crtcs */ - if (par->set_mode) { - struct drm_mode_set set; - - set.crtc = par->crtc; - set.x = 0; - set.y = 0; - set.mode = NULL; - set.fb = NULL; - set.num_connectors = 0; - set.connectors = &par->con; - ret = vmwgfx_set_config_internal(&set); - if (ret) { - DRM_ERROR("Could not unset a mode.\n"); - return ret; - } - drm_mode_destroy(&par->vmw_priv->drm, par->set_mode); - par->set_mode = NULL; - } - - if (cur_fb) { - drm_framebuffer_put(cur_fb); - par->set_fb = NULL; - } - - if (par->vmw_bo && detach_bo && unref_bo) - vmw_bo_unreference(&par->vmw_bo); - - return 0; -} - -static int vmw_fb_kms_framebuffer(struct fb_info *info) -{ - struct drm_mode_fb_cmd2 mode_cmd = {0}; - struct vmw_fb_par *par = info->par; - struct fb_var_screeninfo *var = &info->var; - struct drm_framebuffer *cur_fb; - struct vmw_framebuffer *vfb; - int ret = 0, depth; - size_t new_bo_size; - - ret = vmw_fb_compute_depth(var, &depth); - if (ret) - return ret; - - mode_cmd.width = var->xres; - mode_cmd.height = var->yres; - mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width; - mode_cmd.pixel_format = - drm_mode_legacy_fb_format(var->bits_per_pixel, depth); - - cur_fb = par->set_fb; - if (cur_fb && cur_fb->width == mode_cmd.width && - cur_fb->height == mode_cmd.height && - cur_fb->format->format == mode_cmd.pixel_format && - cur_fb->pitches[0] == mode_cmd.pitches[0]) - return 0; - - /* Need new buffer object ? */ - new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height; - ret = vmw_fb_kms_detach(par, - par->bo_size < new_bo_size || - par->bo_size > 2*new_bo_size, - true); - if (ret) - return ret; - - if (!par->vmw_bo) { - ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size, - &par->vmw_bo); - if (ret) { - DRM_ERROR("Failed creating a buffer object for " - "fbdev.\n"); - return ret; - } - par->bo_size = new_bo_size; - } - - vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL, - true, &mode_cmd); - if (IS_ERR(vfb)) - return PTR_ERR(vfb); - - par->set_fb = &vfb->base; - - return 0; -} - -static int vmw_fb_set_par(struct fb_info *info) -{ - struct vmw_fb_par *par = info->par; - struct vmw_private *vmw_priv = par->vmw_priv; - struct drm_mode_set set; - struct fb_var_screeninfo *var = &info->var; - struct drm_display_mode new_mode = { DRM_MODE("fb_mode", - DRM_MODE_TYPE_DRIVER, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) - }; - struct drm_display_mode *mode; - int ret; - - mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode); - if (!mode) { - DRM_ERROR("Could not create new fb mode.\n"); - return -ENOMEM; - } - - mode->hdisplay = var->xres; - mode->vdisplay = var->yres; - vmw_guess_mode_timing(mode); - - if (!vmw_kms_validate_mode_vram(vmw_priv, - mode->hdisplay * - DIV_ROUND_UP(var->bits_per_pixel, 8), - mode->vdisplay)) { - drm_mode_destroy(&vmw_priv->drm, mode); - return -EINVAL; - } - - mutex_lock(&par->bo_mutex); - ret = vmw_fb_kms_framebuffer(info); - if (ret) - goto out_unlock; - - par->fb_x = var->xoffset; - par->fb_y = var->yoffset; - - set.crtc = par->crtc; - set.x = 0; - set.y = 0; - set.mode = mode; - set.fb = par->set_fb; - set.num_connectors = 1; - set.connectors = &par->con; - - ret = vmwgfx_set_config_internal(&set); - if (ret) - goto out_unlock; - - vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, - par->set_fb->width, par->set_fb->height); - - /* If there already was stuff dirty we wont - * schedule a new work, so lets do it now */ - - schedule_delayed_work(&par->local_work, 0); - -out_unlock: - if (par->set_mode) - drm_mode_destroy(&vmw_priv->drm, par->set_mode); - par->set_mode = mode; - - mutex_unlock(&par->bo_mutex); - - return ret; -} - - -static const struct fb_ops vmw_fb_ops = { - .owner = THIS_MODULE, - .fb_check_var = vmw_fb_check_var, - .fb_set_par = vmw_fb_set_par, - .fb_setcolreg = vmw_fb_setcolreg, - .fb_fillrect = vmw_fb_fillrect, - .fb_copyarea = vmw_fb_copyarea, - .fb_imageblit = vmw_fb_imageblit, - .fb_pan_display = vmw_fb_pan_display, - .fb_blank = vmw_fb_blank, - .fb_mmap = fb_deferred_io_mmap, -}; - -int vmw_fb_init(struct vmw_private *vmw_priv) -{ - struct device *device = vmw_priv->drm.dev; - struct vmw_fb_par *par; - struct fb_info *info; - unsigned fb_width, fb_height; - unsigned int fb_bpp, fb_pitch, fb_size; - struct drm_display_mode *init_mode; - int ret; - - fb_bpp = 32; - - /* XXX As shouldn't these be as well. */ - fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); - fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); - - fb_pitch = fb_width * fb_bpp / 8; - fb_size = fb_pitch * fb_height; - - info = framebuffer_alloc(sizeof(*par), device); - if (!info) - return -ENOMEM; - - /* - * Par - */ - vmw_priv->fb_info = info; - par = info->par; - memset(par, 0, sizeof(*par)); - INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush); - par->vmw_priv = vmw_priv; - par->vmalloc = NULL; - par->max_width = fb_width; - par->max_height = fb_height; - - ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, - par->max_height, &par->con, - &par->crtc, &init_mode); - if (ret) - goto err_kms; - - info->var.xres = init_mode->hdisplay; - info->var.yres = init_mode->vdisplay; - - /* - * Create buffers and alloc memory - */ - par->vmalloc = vzalloc(fb_size); - if (unlikely(par->vmalloc == NULL)) { - ret = -ENOMEM; - goto err_free; - } - - /* - * Fixed and var - */ - strcpy(info->fix.id, "svgadrmfb"); - info->fix.type = FB_TYPE_PACKED_PIXELS; - info->fix.visual = FB_VISUAL_TRUECOLOR; - info->fix.type_aux = 0; - info->fix.xpanstep = 1; /* doing it in hw */ - info->fix.ypanstep = 1; /* doing it in hw */ - info->fix.ywrapstep = 0; - info->fix.accel = FB_ACCEL_NONE; - info->fix.line_length = fb_pitch; - - info->fix.smem_start = 0; - info->fix.smem_len = fb_size; - - info->pseudo_palette = par->pseudo_palette; - info->screen_base = (char __iomem *)par->vmalloc; - info->screen_size = fb_size; - - info->fbops = &vmw_fb_ops; - - /* 24 depth per default */ - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - - info->var.xres_virtual = fb_width; - info->var.yres_virtual = fb_height; - info->var.bits_per_pixel = fb_bpp; - info->var.xoffset = 0; - info->var.yoffset = 0; - info->var.activate = FB_ACTIVATE_NOW; - info->var.height = -1; - info->var.width = -1; - - /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto err_aper; - } - info->apertures->ranges[0].base = vmw_priv->vram_start; - info->apertures->ranges[0].size = vmw_priv->vram_size; - - /* - * Dirty & Deferred IO - */ - par->dirty.x1 = par->dirty.x2 = 0; - par->dirty.y1 = par->dirty.y2 = 0; - par->dirty.active = true; - spin_lock_init(&par->dirty.lock); - mutex_init(&par->bo_mutex); - info->fbdefio = &vmw_defio; - fb_deferred_io_init(info); - - ret = register_framebuffer(info); - if (unlikely(ret != 0)) - goto err_defio; - - vmw_fb_set_par(info); - - return 0; - -err_defio: - fb_deferred_io_cleanup(info); -err_aper: -err_free: - vfree(par->vmalloc); -err_kms: - framebuffer_release(info); - vmw_priv->fb_info = NULL; - - return ret; -} - -int vmw_fb_close(struct vmw_private *vmw_priv) -{ - struct fb_info *info; - struct vmw_fb_par *par; - - if (!vmw_priv->fb_info) - return 0; - - info = vmw_priv->fb_info; - par = info->par; - - /* ??? order */ - fb_deferred_io_cleanup(info); - cancel_delayed_work_sync(&par->local_work); - unregister_framebuffer(info); - - mutex_lock(&par->bo_mutex); - (void) vmw_fb_kms_detach(par, true, true); - mutex_unlock(&par->bo_mutex); - - vfree(par->vmalloc); - framebuffer_release(info); - - return 0; -} - -int vmw_fb_off(struct vmw_private *vmw_priv) -{ - struct fb_info *info; - struct vmw_fb_par *par; - unsigned long flags; - - if (!vmw_priv->fb_info) - return -EINVAL; - - info = vmw_priv->fb_info; - par = info->par; - - spin_lock_irqsave(&par->dirty.lock, flags); - par->dirty.active = false; - spin_unlock_irqrestore(&par->dirty.lock, flags); - - flush_delayed_work(&info->deferred_work); - flush_delayed_work(&par->local_work); - - return 0; -} - -int vmw_fb_on(struct vmw_private *vmw_priv) -{ - struct fb_info *info; - struct vmw_fb_par *par; - unsigned long flags; - - if (!vmw_priv->fb_info) - return -EINVAL; - - info = vmw_priv->fb_info; - par = info->par; - - spin_lock_irqsave(&par->dirty.lock, flags); - par->dirty.active = true; - spin_unlock_irqrestore(&par->dirty.lock, flags); - - /* - * Need to reschedule a dirty update, because otherwise that's - * only done in dirty_mark() if the previous coalesced - * dirty region was empty. - */ - schedule_delayed_work(&par->local_work, 0); - - return 0; -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 60e3cc537f36..abd5e3323ebf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -71,7 +71,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, spin_lock(&gman->lock); if (gman->max_gmr_pages > 0) { - gman->used_gmr_pages += (*res)->num_pages; + gman->used_gmr_pages += PFN_UP((*res)->size); /* * Because the graphics memory is a soft limit we can try to * expand it instead of letting the userspace apps crash. @@ -114,7 +114,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, return 0; nospace: - gman->used_gmr_pages -= (*res)->num_pages; + gman->used_gmr_pages -= PFN_UP((*res)->size); spin_unlock(&gman->lock); ida_free(&gman->gmr_ida, id); ttm_resource_fini(man, *res); @@ -129,7 +129,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, ida_free(&gman->gmr_ida, res->start); spin_lock(&gman->lock); - gman->used_gmr_pages -= res->num_pages; + gman->used_gmr_pages -= PFN_UP(res->size); spin_unlock(&gman->lock); ttm_resource_fini(man, res); kfree(res); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c deleted file mode 100644 index 06aebc12774e..000000000000 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * Simple open hash tab implementation. - * - * Authors: - * Thomas Hellström <thomas-at-tungstengraphics-dot-com> - */ - -#include <linux/export.h> -#include <linux/hash.h> -#include <linux/mm.h> -#include <linux/rculist.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> - -#include <drm/drm_print.h> - -#include "vmwgfx_hashtab.h" - -int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order) -{ - unsigned int size = 1 << order; - - ht->order = order; - ht->table = NULL; - if (size <= PAGE_SIZE / sizeof(*ht->table)) - ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); - else - ht->table = vzalloc(array_size(size, sizeof(*ht->table))); - if (!ht->table) { - DRM_ERROR("Out of memory for hash table\n"); - return -ENOMEM; - } - return 0; -} - -void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key) -{ - struct vmwgfx_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - int count = 0; - - hashed_key = hash_long(key, ht->order); - DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, h_list, head) - DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); -} - -static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key) -{ - struct vmwgfx_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, h_list, head) { - if (entry->key == key) - return &entry->head; - if (entry->key > key) - break; - } - return NULL; -} - -static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key) -{ - struct vmwgfx_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry_rcu(entry, h_list, head) { - if (entry->key == key) - return &entry->head; - if (entry->key > key) - break; - } - return NULL; -} - -int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item) -{ - struct vmwgfx_hash_item *entry; - struct hlist_head *h_list; - struct hlist_node *parent; - unsigned int hashed_key; - unsigned long key = item->key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - parent = NULL; - hlist_for_each_entry(entry, h_list, head) { - if (entry->key == key) - return -EINVAL; - if (entry->key > key) - break; - parent = &entry->head; - } - if (parent) - hlist_add_behind_rcu(&item->head, parent); - else - hlist_add_head_rcu(&item->head, h_list); - return 0; -} - -/* - * Just insert an item and return any "bits" bit key that hasn't been - * used before. - */ -int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add) -{ - int ret; - unsigned long mask = (1UL << bits) - 1; - unsigned long first, unshifted_key; - - unshifted_key = hash_long(seed, bits); - first = unshifted_key; - do { - item->key = (unshifted_key << shift) + add; - ret = vmwgfx_ht_insert_item(ht, item); - if (ret) - unshifted_key = (unshifted_key + 1) & mask; - } while (ret && (unshifted_key != first)); - - if (ret) { - DRM_ERROR("Available key bit space exhausted\n"); - return -EINVAL; - } - return 0; -} - -int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key, - struct vmwgfx_hash_item **item) -{ - struct hlist_node *list; - - list = vmwgfx_ht_find_key_rcu(ht, key); - if (!list) - return -EINVAL; - - *item = hlist_entry(list, struct vmwgfx_hash_item, head); - return 0; -} - -int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key) -{ - struct hlist_node *list; - - list = vmwgfx_ht_find_key(ht, key); - if (list) { - hlist_del_init_rcu(list); - return 0; - } - return -EINVAL; -} - -int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item) -{ - hlist_del_init_rcu(&item->head); - return 0; -} - -void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht) -{ - if (ht->table) { - kvfree(ht->table); - ht->table = NULL; - } -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h deleted file mode 100644 index a9ce12922e21..000000000000 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * Simple open hash tab implementation. - * - * Authors: - * Thomas Hellström <thomas-at-tungstengraphics-dot-com> - */ - -/* - * TODO: Replace this hashtable with Linux' generic implementation - * from <linux/hashtable.h>. - */ - -#ifndef VMWGFX_HASHTAB_H -#define VMWGFX_HASHTAB_H - -#include <linux/list.h> - -#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) - -struct vmwgfx_hash_item { - struct hlist_node head; - unsigned long key; -}; - -struct vmwgfx_open_hash { - struct hlist_head *table; - u8 order; -}; - -int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order); -int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item); -int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add); -int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key, - struct vmwgfx_hash_item **item); - -void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key); -int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key); -int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item); -void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht); - -/* - * RCU-safe interface - * - * The user of this API needs to make sure that two or more instances of the - * hash table manipulation functions are never run simultaneously. - * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously - * with any of the manipulation functions as long as it's called from within - * an RCU read-locked section. - */ -#define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item -#define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please -#define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key -#define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item -#define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item - -#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 214829c32ed8..257f090071f1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -25,15 +25,15 @@ * **************************************************************************/ +#include "vmwgfx_kms.h" +#include "vmw_surface_cache.h" + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_rect.h> #include <drm/drm_sysfs.h> -#include <drm/drm_vblank.h> - -#include "vmwgfx_kms.h" void vmw_du_cleanup(struct vmw_display_unit *du) { @@ -52,9 +52,9 @@ void vmw_du_cleanup(struct vmw_display_unit *du) * Display Unit Cursor functions */ +static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps); static void vmw_cursor_update_mob(struct vmw_private *dev_priv, - struct ttm_buffer_object *bo, - struct ttm_bo_kmap_obj *map, + struct vmw_plane_state *vps, u32 *image, u32 width, u32 height, u32 hotspotX, u32 hotspotY); @@ -63,23 +63,23 @@ struct vmw_svga_fifo_cmd_define_cursor { SVGAFifoCmdDefineAlphaCursor cursor; }; -static void vmw_cursor_update_image(struct vmw_private *dev_priv, - struct ttm_buffer_object *cm_bo, - struct ttm_bo_kmap_obj *cm_map, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY) +/** + * vmw_send_define_cursor_cmd - queue a define cursor command + * @dev_priv: the private driver struct + * @image: buffer which holds the cursor image + * @width: width of the mouse cursor image + * @height: height of the mouse cursor image + * @hotspotX: the horizontal position of mouse hotspot + * @hotspotY: the vertical position of mouse hotspot + */ +static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv, + u32 *image, u32 width, u32 height, + u32 hotspotX, u32 hotspotY) { struct vmw_svga_fifo_cmd_define_cursor *cmd; const u32 image_size = width * height * sizeof(*image); const u32 cmd_size = sizeof(*cmd) + image_size; - if (cm_bo != NULL) { - vmw_cursor_update_mob(dev_priv, cm_bo, cm_map, image, - width, height, - hotspotX, hotspotY); - return; - } - /* Try to reserve fifocmd space and swallow any failures; such reservations cannot be left unconsumed for long under the risk of clogging other fifocmd users, so @@ -87,7 +87,7 @@ static void vmw_cursor_update_image(struct vmw_private *dev_priv, other fallible KMS-atomic resources at prepare_fb */ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size); - if (unlikely(cmd == NULL)) + if (unlikely(!cmd)) return; memset(cmd, 0, sizeof(*cmd)); @@ -105,11 +105,39 @@ static void vmw_cursor_update_image(struct vmw_private *dev_priv, } /** + * vmw_cursor_update_image - update the cursor image on the provided plane + * @dev_priv: the private driver struct + * @vps: the plane state of the cursor plane + * @image: buffer which holds the cursor image + * @width: width of the mouse cursor image + * @height: height of the mouse cursor image + * @hotspotX: the horizontal position of mouse hotspot + * @hotspotY: the vertical position of mouse hotspot + */ +static void vmw_cursor_update_image(struct vmw_private *dev_priv, + struct vmw_plane_state *vps, + u32 *image, u32 width, u32 height, + u32 hotspotX, u32 hotspotY) +{ + if (vps->cursor.bo) + vmw_cursor_update_mob(dev_priv, vps, image, + vps->base.crtc_w, vps->base.crtc_h, + hotspotX, hotspotY); + + else + vmw_send_define_cursor_cmd(dev_priv, image, width, height, + hotspotX, hotspotY); +} + + +/** * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism * + * Called from inside vmw_du_cursor_plane_atomic_update to actually + * make the cursor-image live. + * * @dev_priv: device to work with - * @bo: BO for the MOB - * @map: kmap obj for the BO + * @vps: the plane state of the cursor plane * @image: cursor source data to fill the MOB with * @width: source data width * @height: source data height @@ -117,8 +145,7 @@ static void vmw_cursor_update_image(struct vmw_private *dev_priv, * @hotspotY: cursor hotspot Y */ static void vmw_cursor_update_mob(struct vmw_private *dev_priv, - struct ttm_buffer_object *bo, - struct ttm_bo_kmap_obj *map, + struct vmw_plane_state *vps, u32 *image, u32 width, u32 height, u32 hotspotX, u32 hotspotY) { @@ -127,11 +154,11 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv, const u32 image_size = width * height * sizeof(*image); bool dummy; - BUG_ON(!image); - - header = (SVGAGBCursorHeader *)ttm_kmap_obj_virtual(map, &dummy); + header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy); alpha_header = &header->header.alphaHeader; + memset(header, 0, sizeof(*header)); + header->type = SVGA_ALPHA_CURSOR; header->sizeInBytes = image_size; @@ -141,100 +168,159 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv, alpha_header->height = height; memcpy(header + 1, image, image_size); - - vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, bo->resource->start); + vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, + vps->cursor.bo->resource->start); } -void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp) -{ - size_t i; - for (i = 0; i < ARRAY_SIZE(vcp->cursor_mob); i++) { - if (vcp->cursor_mob[i] != NULL) { - ttm_bo_unpin(vcp->cursor_mob[i]); - ttm_bo_put(vcp->cursor_mob[i]); - kfree(vcp->cursor_mob[i]); - vcp->cursor_mob[i] = NULL; - } - } +static u32 vmw_du_cursor_mob_size(u32 w, u32 h) +{ + return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader); } -#define CURSOR_MOB_SIZE(dimension) \ - ((dimension) * (dimension) * sizeof(u32) + sizeof(SVGAGBCursorHeader)) +/** + * vmw_du_cursor_plane_acquire_image -- Acquire the image data + * @vps: cursor plane state + */ +static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps) +{ + bool dummy; + if (vps->surf) { + if (vps->surf_mapped) + return vmw_bo_map_and_cache(vps->surf->res.backup); + return vps->surf->snooper.image; + } else if (vps->bo) + return ttm_kmap_obj_virtual(&vps->bo->map, &dummy); + return NULL; +} -int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *cursor) +static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, + struct vmw_plane_state *new_vps) { - struct vmw_private *dev_priv = cursor->base.dev->dev_private; - uint32_t cursor_max_dim, mob_max_size; - int ret = 0; - size_t i; + void *old_image; + void *new_image; + u32 size; + bool changed; - if (!dev_priv->has_mob || (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0) - return -ENOSYS; + if (old_vps->base.crtc_w != new_vps->base.crtc_w || + old_vps->base.crtc_h != new_vps->base.crtc_h) + return true; - mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); - cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION); + if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x || + old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y) + return true; - if (CURSOR_MOB_SIZE(cursor_max_dim) > mob_max_size) - cursor_max_dim = 64; /* Mandatorily-supported cursor dimension */ + size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32); - for (i = 0; i < ARRAY_SIZE(cursor->cursor_mob); i++) { - struct ttm_buffer_object **const bo = &cursor->cursor_mob[i]; + old_image = vmw_du_cursor_plane_acquire_image(old_vps); + new_image = vmw_du_cursor_plane_acquire_image(new_vps); - ret = vmw_bo_create_kernel(dev_priv, - CURSOR_MOB_SIZE(cursor_max_dim), - &vmw_mob_placement, bo); + changed = false; + if (old_image && new_image) + changed = memcmp(old_image, new_image, size) != 0; - if (ret != 0) - goto teardown; + return changed; +} - if ((*bo)->resource->mem_type != VMW_PL_MOB) { - DRM_ERROR("Obtained buffer object is not a MOB.\n"); - ret = -ENOSYS; - goto teardown; - } +static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo) +{ + if (!(*bo)) + return; - /* Fence the mob creation so we are guarateed to have the mob */ - ret = ttm_bo_reserve(*bo, false, false, NULL); + ttm_bo_unpin(*bo); + ttm_bo_put(*bo); + kfree(*bo); + *bo = NULL; +} - if (ret != 0) - goto teardown; +static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, + struct vmw_plane_state *vps) +{ + u32 i; - vmw_bo_fence_single(*bo, NULL); + if (!vps->cursor.bo) + return; - ttm_bo_unreserve(*bo); + vmw_du_cursor_plane_unmap_cm(vps); - drm_info(&dev_priv->drm, "Using CursorMob mobid %lu, max dimension %u\n", - (*bo)->resource->start, cursor_max_dim); + /* Look for a free slot to return this mob to the cache. */ + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { + if (!vcp->cursor_mobs[i]) { + vcp->cursor_mobs[i] = vps->cursor.bo; + vps->cursor.bo = NULL; + return; + } } - return 0; - -teardown: - vmw_du_destroy_cursor_mob_array(cursor); + /* Cache is full: See if this mob is bigger than an existing mob. */ + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { + if (vcp->cursor_mobs[i]->base.size < + vps->cursor.bo->base.size) { + vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); + vcp->cursor_mobs[i] = vps->cursor.bo; + vps->cursor.bo = NULL; + return; + } + } - return ret; + /* Destroy it if it's not worth caching. */ + vmw_du_destroy_cursor_mob(&vps->cursor.bo); } -#undef CURSOR_MOB_SIZE - -static void vmw_cursor_update_bo(struct vmw_private *dev_priv, - struct ttm_buffer_object *cm_bo, - struct ttm_bo_kmap_obj *cm_map, - struct vmw_buffer_object *bo, - u32 width, u32 height, - u32 hotspotX, u32 hotspotY) +static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, + struct vmw_plane_state *vps) { - void *virtual; - bool dummy; + struct vmw_private *dev_priv = vcp->base.dev->dev_private; + u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); + u32 i; + u32 cursor_max_dim, mob_max_size; + int ret; + + if (!dev_priv->has_mob || + (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0) + return -EINVAL; + + mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); + cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION); + + if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim || + vps->base.crtc_h > cursor_max_dim) + return -EINVAL; - virtual = ttm_kmap_obj_virtual(&bo->map, &dummy); - if (virtual) { - vmw_cursor_update_image(dev_priv, cm_bo, cm_map, virtual, - width, height, - hotspotX, hotspotY); - atomic_dec(&bo->base_mapped_count); + if (vps->cursor.bo) { + if (vps->cursor.bo->base.size >= size) + return 0; + vmw_du_put_cursor_mob(vcp, vps); + } + + /* Look for an unused mob in the cache. */ + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { + if (vcp->cursor_mobs[i] && + vcp->cursor_mobs[i]->base.size >= size) { + vps->cursor.bo = vcp->cursor_mobs[i]; + vcp->cursor_mobs[i] = NULL; + return 0; + } } + /* Create a new mob if we can't find an existing one. */ + ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement, + &vps->cursor.bo); + + if (ret != 0) + return ret; + + /* Fence the mob creation so we are guarateed to have the mob */ + ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL); + if (ret != 0) + goto teardown; + + vmw_bo_fence_single(vps->cursor.bo, NULL); + ttm_bo_unreserve(vps->cursor.bo); + return 0; + +teardown: + vmw_du_destroy_cursor_mob(&vps->cursor.bo); + return ret; } @@ -266,7 +352,6 @@ static void vmw_cursor_update_position(struct vmw_private *dev_priv, spin_unlock(&dev_priv->cursor_lock); } - void vmw_kms_cursor_snoop(struct vmw_surface *srf, struct ttm_object_file *tfile, struct ttm_buffer_object *bo, @@ -284,10 +369,13 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, SVGA3dCmdSurfaceDMA dma; } *cmd; int i, ret; + const struct SVGA3dSurfaceDesc *desc = + vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); + const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock; cmd = container_of(header, struct vmw_dma_cmd, header); - /* No snooper installed */ + /* No snooper installed, nothing to copy */ if (!srf->snooper.image) return; @@ -308,7 +396,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, if (cmd->dma.guest.ptr.offset % PAGE_SIZE || box->x != 0 || box->y != 0 || box->z != 0 || box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || - box->d != 1 || box_count != 1) { + box->d != 1 || box_count != 1 || + box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) { /* TODO handle none page aligned offsets */ /* TODO handle more dst & src != 0 */ /* TODO handle more then one copy */ @@ -322,7 +411,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, } kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; - kmap_num = (64*64*4) >> PAGE_SHIFT; + kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT; ret = ttm_bo_reserve(bo, true, false, NULL); if (unlikely(ret != 0)) { @@ -336,14 +425,15 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, virtual = ttm_kmap_obj_virtual(&map, &dummy); - if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { - memcpy(srf->snooper.image, virtual, 64*64*4); + if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) { + memcpy(srf->snooper.image, virtual, + VMW_CURSOR_SNOOP_HEIGHT*image_pitch); } else { /* Image is unsigned pointer. */ for (i = 0; i < box->h; i++) - memcpy(srf->snooper.image + i * 64, + memcpy(srf->snooper.image + i * image_pitch, virtual + i * cmd->dma.guest.pitch, - box->w * 4); + box->w * desc->pitchBytesPerBlock); } srf->snooper.age++; @@ -387,15 +477,17 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { du = vmw_crtc_to_du(crtc); if (!du->cursor_surface || - du->cursor_age == du->cursor_surface->snooper.age) + du->cursor_age == du->cursor_surface->snooper.age || + !du->cursor_surface->snooper.image) continue; du->cursor_age = du->cursor_surface->snooper.age; - vmw_cursor_update_image(dev_priv, NULL, NULL, - du->cursor_surface->snooper.image, - 64, 64, - du->hotspot_x + du->core_hotspot_x, - du->hotspot_y + du->core_hotspot_y); + vmw_send_define_cursor_cmd(dev_priv, + du->cursor_surface->snooper.image, + VMW_CURSOR_SNOOP_WIDTH, + VMW_CURSOR_SNOOP_HEIGHT, + du->hotspot_x + du->core_hotspot_x, + du->hotspot_y + du->core_hotspot_y); } mutex_unlock(&dev->mode_config.mutex); @@ -404,8 +496,14 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) void vmw_du_cursor_plane_destroy(struct drm_plane *plane) { + struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); + u32 i; + vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); - vmw_du_destroy_cursor_mob_array(vmw_plane_to_vcp(plane)); + + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) + vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); + drm_plane_cleanup(plane); } @@ -463,6 +561,87 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane, /** + * vmw_du_cursor_plane_map_cm - Maps the cursor mobs. + * + * @vps: plane_state + * + * Returns 0 on success + */ + +static int +vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps) +{ + int ret; + u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); + struct ttm_buffer_object *bo = vps->cursor.bo; + + if (!bo) + return -EINVAL; + + if (bo->base.size < size) + return -EINVAL; + + if (vps->cursor.mapped) + return 0; + + ret = ttm_bo_reserve(bo, false, false, NULL); + + if (unlikely(ret != 0)) + return -ENOMEM; + + ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map); + + /* + * We just want to try to get mob bind to finish + * so that the first write to SVGA_REG_CURSOR_MOBID + * is done with a buffer that the device has already + * seen + */ + (void) ttm_bo_wait(bo, false, false); + + ttm_bo_unreserve(bo); + + if (unlikely(ret != 0)) + return -ENOMEM; + + vps->cursor.mapped = true; + + return 0; +} + + +/** + * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs. + * + * @vps: state of the cursor plane + * + * Returns 0 on success + */ + +static int +vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps) +{ + int ret = 0; + struct ttm_buffer_object *bo = vps->cursor.bo; + + if (!vps->cursor.mapped) + return 0; + + if (!bo) + return 0; + + ret = ttm_bo_reserve(bo, true, false, NULL); + if (likely(ret == 0)) { + ttm_bo_kunmap(&vps->cursor.map); + ttm_bo_unreserve(bo); + vps->cursor.mapped = false; + } + + return ret; +} + + +/** * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface * * @plane: cursor plane @@ -476,10 +655,16 @@ void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { + struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); bool dummy; - if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) { + if (vps->surf_mapped) { + vmw_bo_unmap(vps->surf->res.backup); + vps->surf_mapped = false; + } + + if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) { const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL); if (likely(ret == 0)) { @@ -489,14 +674,8 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, } } - if (vps->cm_bo != NULL && ttm_kmap_obj_virtual(&vps->cm_map, &dummy) != NULL) { - const int ret = ttm_bo_reserve(vps->cm_bo, true, false, NULL); - - if (likely(ret == 0)) { - ttm_bo_kunmap(&vps->cm_map); - ttm_bo_unreserve(vps->cm_bo); - } - } + vmw_du_cursor_plane_unmap_cm(vps); + vmw_du_put_cursor_mob(vcp, vps); vmw_du_plane_unpin_surf(vps, false); @@ -511,6 +690,7 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, } } + /** * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it * @@ -526,8 +706,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, struct drm_framebuffer *fb = new_state->fb; struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); - struct ttm_buffer_object *cm_bo = NULL; - bool dummy; int ret = 0; if (vps->surf) { @@ -550,13 +728,14 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, } } - vps->cm_bo = NULL; - - if (vps->surf == NULL && vps->bo != NULL) { + if (!vps->surf && vps->bo) { const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32); - /* Not using vmw_bo_map_and_cache() helper here as we need to reserve - the ttm_buffer_object first which wmw_bo_map_and_cache() omits. */ + /* + * Not using vmw_bo_map_and_cache() helper here as we need to + * reserve the ttm_buffer_object first which + * vmw_bo_map_and_cache() omits. + */ ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL); if (unlikely(ret != 0)) @@ -571,69 +750,24 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, if (unlikely(ret != 0)) return -ENOMEM; + } else if (vps->surf && !vps->bo && vps->surf->res.backup) { + + WARN_ON(vps->surf->snooper.image); + ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false, + NULL); + if (unlikely(ret != 0)) + return -ENOMEM; + vmw_bo_map_and_cache(vps->surf->res.backup); + ttm_bo_unreserve(&vps->surf->res.backup->base); + vps->surf_mapped = true; } if (vps->surf || vps->bo) { - unsigned cursor_mob_idx = vps->cursor_mob_idx; - - /* Lazily set up cursor MOBs just once -- no reattempts. */ - if (cursor_mob_idx == 0 && vcp->cursor_mob[0] == NULL) - if (vmw_du_create_cursor_mob_array(vcp) != 0) - vps->cursor_mob_idx = cursor_mob_idx = -1U; - - if (cursor_mob_idx < ARRAY_SIZE(vcp->cursor_mob)) { - const u32 size = sizeof(SVGAGBCursorHeader) + - new_state->crtc_w * new_state->crtc_h * sizeof(u32); - - cm_bo = vcp->cursor_mob[cursor_mob_idx]; - - if (cm_bo->resource->num_pages * PAGE_SIZE < size) { - ret = -EINVAL; - goto error_bo_unmap; - } - - ret = ttm_bo_reserve(cm_bo, false, false, NULL); - - if (unlikely(ret != 0)) { - ret = -ENOMEM; - goto error_bo_unmap; - } - - ret = ttm_bo_kmap(cm_bo, 0, PFN_UP(size), &vps->cm_map); - - /* - * We just want to try to get mob bind to finish - * so that the first write to SVGA_REG_CURSOR_MOBID - * is done with a buffer that the device has already - * seen - */ - (void) ttm_bo_wait(cm_bo, false, false); - - ttm_bo_unreserve(cm_bo); - - if (unlikely(ret != 0)) { - ret = -ENOMEM; - goto error_bo_unmap; - } - - vps->cursor_mob_idx = cursor_mob_idx ^ 1; - vps->cm_bo = cm_bo; - } + vmw_du_get_cursor_mob(vcp, vps); + vmw_du_cursor_plane_map_cm(vps); } return 0; - -error_bo_unmap: - if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) { - const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL); - if (likely(ret == 0)) { - atomic_dec(&vps->bo->base_mapped_count); - ttm_bo_kunmap(&vps->bo->map); - ttm_bo_unreserve(&vps->bo->base); - } - } - - return ret; } @@ -649,7 +783,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct vmw_display_unit *du = vmw_crtc_to_du(crtc); struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); + struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state); s32 hotspot_x, hotspot_y; + bool dummy; hotspot_x = du->hotspot_x; hotspot_y = du->hotspot_y; @@ -662,23 +798,38 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, du->cursor_surface = vps->surf; du->cursor_bo = vps->bo; + if (!vps->surf && !vps->bo) { + vmw_cursor_update_position(dev_priv, false, 0, 0); + return; + } + + vps->cursor.hotspot_x = hotspot_x; + vps->cursor.hotspot_y = hotspot_y; + if (vps->surf) { du->cursor_age = du->cursor_surface->snooper.age; + } - vmw_cursor_update_image(dev_priv, vps->cm_bo, &vps->cm_map, - vps->surf->snooper.image, - new_state->crtc_w, - new_state->crtc_h, - hotspot_x, hotspot_y); - } else if (vps->bo) { - vmw_cursor_update_bo(dev_priv, vps->cm_bo, &vps->cm_map, - vps->bo, - new_state->crtc_w, - new_state->crtc_h, - hotspot_x, hotspot_y); + if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) { + /* + * If it hasn't changed, avoid making the device do extra + * work by keeping the old cursor active. + */ + struct vmw_cursor_plane_state tmp = old_vps->cursor; + old_vps->cursor = vps->cursor; + vps->cursor = tmp; } else { - vmw_cursor_update_position(dev_priv, false, 0, 0); - return; + void *image = vmw_du_cursor_plane_acquire_image(vps); + if (image) + vmw_cursor_update_image(dev_priv, vps, image, + new_state->crtc_w, + new_state->crtc_h, + hotspot_x, hotspot_y); + } + + if (vps->bo) { + if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) + atomic_dec(&vps->bo->base_mapped_count); } du->cursor_x = new_state->crtc_x + du->set_gui_x; @@ -778,12 +929,16 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - if (!vmw_framebuffer_to_vfb(fb)->bo) + if (!vmw_framebuffer_to_vfb(fb)->bo) { surface = vmw_framebuffer_to_vfbs(fb)->surface; - if (surface && !surface->snooper.image) { - DRM_ERROR("surface not suitable for cursor\n"); - return -EINVAL; + WARN_ON(!surface); + + if (!surface || + (!surface->snooper.image && !surface->res.backup)) { + DRM_ERROR("surface not suitable for cursor\n"); + return -EINVAL; + } } return 0; @@ -831,15 +986,6 @@ void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct drm_pending_vblank_event *event = crtc->state->event; - - if (event) { - crtc->state->event = NULL; - - spin_lock_irq(&crtc->dev->event_lock); - drm_crtc_send_vblank_event(crtc, event); - spin_unlock_irq(&crtc->dev->event_lock); - } } @@ -943,6 +1089,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) vps->pinned = 0; vps->cpp = 0; + memset(&vps->cursor, 0, sizeof(vps->cursor)); + /* Each ref counted resource needs to be acquired again */ if (vps->surf) (void) vmw_surface_reference(vps->surf); @@ -997,7 +1145,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane, { struct vmw_plane_state *vps = vmw_plane_state_to_vps(state); - /* Should have been freed by cleanup_fb */ if (vps->surf) vmw_surface_unreference(&vps->surf); @@ -1664,7 +1811,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, if (IS_ERR(vfb)) { ret = PTR_ERR(vfb); goto err_out; - } + } err_out: /* vmw_user_lookup_handle takes one ref so does new_fb */ @@ -2052,6 +2199,8 @@ int vmw_kms_init(struct vmw_private *dev_priv) dev->mode_config.min_height = 1; dev->mode_config.max_width = dev_priv->texture_max_width; dev->mode_config.max_height = dev_priv->texture_max_height; + dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32; + dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob; drm_mode_create_suggested_offset_properties(dev); vmw_kms_create_hotplug_mode_update_property(dev_priv); @@ -2093,7 +2242,6 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, struct drm_crtc *crtc; int ret = 0; - mutex_lock(&dev->mode_config.mutex); if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { @@ -2155,30 +2303,6 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, dev_priv->max_primary_mem : dev_priv->vram_size); } - -/* - * Function called by DRM code called with vbl_lock held. - */ -u32 vmw_get_vblank_counter(struct drm_crtc *crtc) -{ - return 0; -} - -/* - * Function called by DRM code called with vbl_lock held. - */ -int vmw_enable_vblank(struct drm_crtc *crtc) -{ - return -EINVAL; -} - -/* - * Function called by DRM code called with vbl_lock held. - */ -void vmw_disable_vblank(struct drm_crtc *crtc) -{ -} - /** * vmw_du_update_layout - Update the display unit with topology from resolution * plugin and generate DRM uevent @@ -2207,7 +2331,7 @@ retry: if (ret == -EDEADLK) { drm_modeset_backoff(&ctx); goto retry; - } + } goto out_fini; } } @@ -2222,8 +2346,8 @@ retry: du->gui_x = rects[du->unit].x1; du->gui_y = rects[du->unit].y1; } else { - du->pref_width = 800; - du->pref_height = 600; + du->pref_width = VMWGFX_MIN_INITIAL_WIDTH; + du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT; du->pref_active = false; du->gui_x = 0; du->gui_y = 0; @@ -2250,13 +2374,13 @@ retry: } con->status = vmw_du_connector_detect(con, true); } - - drm_sysfs_hotplug_event(dev); out_fini: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); mutex_unlock(&dev->mode_config.mutex); + drm_sysfs_hotplug_event(dev); + return 0; } @@ -2536,10 +2660,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, int ret, i; if (!arg->num_outputs) { - struct drm_rect def_rect = {0, 0, 800, 600}; - VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n", - def_rect.x1, def_rect.y1, - def_rect.x2, def_rect.y2); + struct drm_rect def_rect = {0, 0, + VMWGFX_MIN_INITIAL_WIDTH, + VMWGFX_MIN_INITIAL_HEIGHT}; vmw_du_update_layout(dev_priv, 1, &def_rect); return 0; } @@ -2834,68 +2957,6 @@ int vmw_kms_update_proxy(struct vmw_resource *res, return 0; } -int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, - unsigned unit, - u32 max_width, - u32 max_height, - struct drm_connector **p_con, - struct drm_crtc **p_crtc, - struct drm_display_mode **p_mode) -{ - struct drm_connector *con; - struct vmw_display_unit *du; - struct drm_display_mode *mode; - int i = 0; - int ret = 0; - - mutex_lock(&dev_priv->drm.mode_config.mutex); - list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list, - head) { - if (i == unit) - break; - - ++i; - } - - if (&con->head == &dev_priv->drm.mode_config.connector_list) { - DRM_ERROR("Could not find initial display unit.\n"); - ret = -EINVAL; - goto out_unlock; - } - - if (list_empty(&con->modes)) - (void) vmw_du_connector_fill_modes(con, max_width, max_height); - - if (list_empty(&con->modes)) { - DRM_ERROR("Could not find initial display mode.\n"); - ret = -EINVAL; - goto out_unlock; - } - - du = vmw_connector_to_du(con); - *p_con = con; - *p_crtc = &du->crtc; - - list_for_each_entry(mode, &con->modes, head) { - if (mode->type & DRM_MODE_TYPE_PREFERRED) - break; - } - - if (&mode->head == &con->modes) { - WARN_ONCE(true, "Could not find initial preferred mode.\n"); - *p_mode = list_first_entry(&con->modes, - struct drm_display_mode, - head); - } else { - *p_mode = mode; - } - - out_unlock: - mutex_unlock(&dev_priv->drm.mode_config.mutex); - - return ret; -} - /** * vmw_kms_create_implicit_placement_property - Set up the implicit placement * property. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 85f86faa3243..4d6e7b555db7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -272,6 +272,14 @@ struct vmw_crtc_state { struct drm_crtc_state base; }; +struct vmw_cursor_plane_state { + struct ttm_buffer_object *bo; + struct ttm_bo_kmap_obj map; + bool mapped; + s32 hotspot_x; + s32 hotspot_y; +}; + /** * Derived class for plane state object * @@ -295,13 +303,8 @@ struct vmw_plane_state { /* For CPU Blit */ unsigned int cpp; - /* CursorMob flipping index; -1 if cursor mobs not used */ - unsigned int cursor_mob_idx; - /* Currently-active CursorMob */ - struct ttm_buffer_object *cm_bo; - /* CursorMob kmap_obj; expected valid at cursor_plane_atomic_update - IFF currently-active CursorMob above is valid */ - struct ttm_bo_kmap_obj cm_map; + bool surf_mapped; + struct vmw_cursor_plane_state cursor; }; @@ -338,11 +341,12 @@ struct vmw_connector_state { * Derived class for cursor plane object * * @base DRM plane object - * @cursor_mob array of two MOBs for CursorMob flipping + * @cursor.cursor_mobs Cursor mobs available for re-use */ struct vmw_cursor_plane { struct drm_plane base; - struct ttm_buffer_object *cursor_mob[2]; + + struct ttm_buffer_object *cursor_mobs[3]; }; /** @@ -458,13 +462,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, struct vmw_surface *surface, bool only_2d, const struct drm_mode_fb_cmd2 *mode_cmd); -int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, - unsigned unit, - u32 max_width, - u32 max_height, - struct drm_connector **p_con, - struct drm_crtc **p_crtc, - struct drm_display_mode **p_mode); void vmw_guess_mode_timing(struct drm_display_mode *mode); void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv); void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv); @@ -472,8 +469,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv); /* Universal Plane Helpers */ void vmw_du_primary_plane_destroy(struct drm_plane *plane); void vmw_du_cursor_plane_destroy(struct drm_plane *plane); -int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *vcp); -void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp); /* Atomic Helpers */ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index b8761f16dd78..a56e5d0ca3c6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -28,7 +28,6 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_vblank.h> #include "vmwgfx_kms.h" @@ -235,9 +234,6 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = { .atomic_duplicate_state = vmw_du_crtc_duplicate_state, .atomic_destroy_state = vmw_du_crtc_destroy_state, .set_config = drm_atomic_helper_set_config, - .get_vblank_counter = vmw_get_vblank_counter, - .enable_vblank = vmw_enable_vblank, - .disable_vblank = vmw_disable_vblank, }; @@ -507,10 +503,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv) dev_priv->ldu_priv->last_num_active = 0; dev_priv->ldu_priv->fb = NULL; - ret = drm_vblank_init(dev, num_display_units); - if (ret != 0) - goto err_free; - vmw_kms_create_implicit_placement_property(dev_priv); for (i = 0; i < num_display_units; ++i) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h b/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h index 0509f55f07b4..ede74c7fdbbf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h @@ -29,6 +29,7 @@ #define _VMWGFX_MKSSTAT_H_ #include <asm/page.h> +#include <linux/kconfig.h> /* Reservation marker for mksstat pid's */ #define MKSSTAT_PID_RESERVED -1 @@ -41,6 +42,7 @@ typedef enum { MKSSTAT_KERN_EXECBUF, /* vmw_execbuf_ioctl */ + MKSSTAT_KERN_COTABLE_RESIZE, MKSSTAT_KERN_COUNT /* Reserved entry; always last */ } mksstat_kern_stats_t; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 50fa3df0bc0c..e76976a95a1e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -85,7 +85,14 @@ struct rpc_channel { u32 cookie_low; }; - +#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) +/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */ +static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] = +{ + { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" }, + { "vmw_cotable_resize", "vmw_cotable_resize" }, +}; +#endif /** * vmw_open_channel @@ -695,12 +702,6 @@ static inline void hypervisor_ppn_remove(PPN64 pfn) /* Header to the text description of mksGuestStat instance descriptor */ #define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx" -/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */ -static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] = -{ - { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" }, -}; - /** * mksstat_init_record: Initializes an MKSGuestStatCounter-based record * for the respective mksGuestStat index. @@ -786,6 +787,7 @@ static int mksstat_init_kern_id(struct page **ppage) /* Set up all kernel-internal counters and corresponding structures */ pstrs_acc = pstrs; pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc); + pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc); /* Add new counters above, in their order of appearance in mksstat_kern_stats_t */ @@ -1014,8 +1016,6 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, struct vmw_private *const dev_priv = vmw_priv(dev); - struct page *page; - MKSGuestStatInstanceDescriptor *pdesc; const size_t num_pages_stat = PFN_UP(arg->stat_len); const size_t num_pages_info = PFN_UP(arg->info_len); const size_t num_pages_strs = PFN_UP(arg->strs_len); @@ -1023,10 +1023,13 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, long nr_pinned_stat; long nr_pinned_info; long nr_pinned_strs; - struct page *pages_stat[ARRAY_SIZE(pdesc->statPPNs)]; - struct page *pages_info[ARRAY_SIZE(pdesc->infoPPNs)]; - struct page *pages_strs[ARRAY_SIZE(pdesc->strsPPNs)]; + MKSGuestStatInstanceDescriptor *pdesc; + struct page *page = NULL; + struct page **pages_stat = NULL; + struct page **pages_info = NULL; + struct page **pages_strs = NULL; size_t i, slot; + int ret_err = -ENOMEM; arg->id = -1; @@ -1054,13 +1057,23 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, BUG_ON(dev_priv->mksstat_user_pages[slot]); + /* Allocate statically-sized temp arrays for pages -- too big to keep in frame */ + pages_stat = (struct page **)kmalloc_array( + ARRAY_SIZE(pdesc->statPPNs) + + ARRAY_SIZE(pdesc->infoPPNs) + + ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL); + + if (!pages_stat) + goto err_nomem; + + pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs); + pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs); + /* Allocate a page for the instance descriptor */ page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) { - atomic_set(&dev_priv->mksstat_user_pids[slot], 0); - return -ENOMEM; - } + if (!page) + goto err_nomem; /* Set up the instance descriptor */ pdesc = page_address(page); @@ -1075,9 +1088,8 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, ARRAY_SIZE(pdesc->description) - 1); if (desc_len < 0) { - atomic_set(&dev_priv->mksstat_user_pids[slot], 0); - __free_page(page); - return -EFAULT; + ret_err = -EFAULT; + goto err_nomem; } reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs)); @@ -1118,6 +1130,7 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot); + kfree(pages_stat); return 0; err_pin_strs: @@ -1132,9 +1145,13 @@ err_pin_stat: if (nr_pinned_stat > 0) unpin_user_pages(pages_stat, nr_pinned_stat); +err_nomem: atomic_set(&dev_priv->mksstat_user_pids[slot], 0); - __free_page(page); - return -ENOMEM; + if (page) + __free_page(page); + kfree(pages_stat); + + return ret_err; } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 7bc99b1279f7..f41f041559f4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -230,7 +230,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; - pgoff_t num_pages = vbo->base.resource->num_pages; + pgoff_t num_pages = PFN_UP(vbo->base.resource->size); size_t size; int ret; @@ -395,7 +395,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) return ret; page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->resource->num_pages)) { + if (unlikely(page_offset >= PFN_UP(bo->resource->size))) { ret = VM_FAULT_SIGBUS; goto out_unlock; } @@ -438,7 +438,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); - if (page_offset >= bo->resource->num_pages || + if (page_offset >= PFN_UP(bo->resource->size) || vmw_resources_clean(vbo, page_offset, page_offset + PAGE_SIZE, &allowed_prefault)) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 9c79873f62f0..e1f36a09c59c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -29,7 +29,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_vblank.h> #include "vmwgfx_kms.h" @@ -320,9 +319,6 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { .atomic_destroy_state = vmw_du_crtc_destroy_state, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, - .get_vblank_counter = vmw_get_vblank_counter, - .enable_vblank = vmw_enable_vblank, - .disable_vblank = vmw_disable_vblank, }; /* @@ -730,7 +726,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct drm_crtc *crtc = new_state->crtc; - struct drm_pending_vblank_event *event = NULL; struct vmw_fence_obj *fence = NULL; int ret; @@ -754,24 +749,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, return; } - /* For error case vblank event is send from vmw_du_crtc_atomic_flush */ - event = crtc->state->event; - if (event && fence) { - struct drm_file *file_priv = event->base.file_priv; - - ret = vmw_event_fence_action_queue(file_priv, - fence, - &event->base, - &event->event.vbl.tv_sec, - &event->event.vbl.tv_usec, - true); - - if (unlikely(ret != 0)) - DRM_ERROR("Failed to queue event on fence.\n"); - else - crtc->state->event = NULL; - } - if (fence) vmw_fence_obj_unreference(&fence); } @@ -947,7 +924,7 @@ err_free: int vmw_kms_sou_init_display(struct vmw_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; - int i, ret; + int i; /* Screen objects won't work if GMR's aren't available */ if (!dev_priv->has_gmr) @@ -957,12 +934,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) return -ENOSYS; } - ret = -ENOMEM; - - ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); - if (unlikely(ret != 0)) - return ret; - for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) vmw_sou_init(dev_priv, i); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 8650c3aea8f0..0090abe89254 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -29,7 +29,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_vblank.h> #include "vmwgfx_kms.h" #include "vmw_surface_cache.h" @@ -925,9 +924,6 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = { .atomic_destroy_state = vmw_du_crtc_destroy_state, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, - .get_vblank_counter = vmw_get_vblank_counter, - .enable_vblank = vmw_enable_vblank, - .disable_vblank = vmw_disable_vblank, }; @@ -1591,7 +1587,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct drm_crtc *crtc = new_state->crtc; struct vmw_screen_target_display_unit *stdu; - struct drm_pending_vblank_event *event; struct vmw_fence_obj *fence = NULL; struct vmw_private *dev_priv; int ret; @@ -1640,23 +1635,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, return; } - /* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */ - event = crtc->state->event; - if (event && fence) { - struct drm_file *file_priv = event->base.file_priv; - - ret = vmw_event_fence_action_queue(file_priv, - fence, - &event->base, - &event->event.vbl.tv_sec, - &event->event.vbl.tv_usec, - true); - if (ret) - DRM_ERROR("Failed to queue event on fence.\n"); - else - crtc->state->event = NULL; - } - if (fence) vmw_fence_obj_unreference(&fence); } @@ -1883,10 +1861,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) return -ENOSYS; - ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); - if (unlikely(ret != 0)) - return ret; - dev_priv->active_display_unit = vmw_du_screen_target; for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index ace7ca150b03..3bc63ae768f3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -815,11 +815,15 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, res->backup_size = cur_bo_offset; if (metadata->scanout && metadata->num_sizes == 1 && - metadata->sizes[0].width == 64 && - metadata->sizes[0].height == 64 && - metadata->format == SVGA3D_A8R8G8B8) { - - srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL); + metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH && + metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT && + metadata->format == VMW_CURSOR_SNOOP_FORMAT) { + const struct SVGA3dSurfaceDesc *desc = + vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); + const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH * + VMW_CURSOR_SNOOP_HEIGHT * + desc->pitchBytesPerBlock; + srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL); if (!srf->snooper.image) { DRM_ERROR("Failed to allocate cursor_image\n"); ret = -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index f46891012be3..f5c4a40fb16d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -180,11 +180,16 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, if (!ctx->merge_dups) return NULL; - if (ctx->ht) { + if (ctx->sw_context) { struct vmwgfx_hash_item *hash; + unsigned long key = (unsigned long) vbo; - if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) - bo_node = container_of(hash, typeof(*bo_node), hash); + hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) { + if (hash->key == key) { + bo_node = container_of(hash, typeof(*bo_node), hash); + break; + } + } } else { struct vmw_validation_bo_node *entry; @@ -217,11 +222,16 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx, if (!ctx->merge_dups) return NULL; - if (ctx->ht) { + if (ctx->sw_context) { struct vmwgfx_hash_item *hash; + unsigned long key = (unsigned long) res; - if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash)) - res_node = container_of(hash, typeof(*res_node), hash); + hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) { + if (hash->key == key) { + res_node = container_of(hash, typeof(*res_node), hash); + break; + } + } } else { struct vmw_validation_res_node *entry; @@ -269,20 +279,15 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx, } } else { struct ttm_validate_buffer *val_buf; - int ret; bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node)); if (!bo_node) return -ENOMEM; - if (ctx->ht) { + if (ctx->sw_context) { bo_node->hash.key = (unsigned long) vbo; - ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash); - if (ret) { - DRM_ERROR("Failed to initialize a buffer " - "validation entry.\n"); - return ret; - } + hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head, + bo_node->hash.key); } val_buf = &bo_node->base; val_buf->bo = ttm_bo_get_unless_zero(&vbo->base); @@ -316,7 +321,6 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx, bool *first_usage) { struct vmw_validation_res_node *node; - int ret; node = vmw_validation_find_res_dup(ctx, res); if (node) { @@ -330,14 +334,9 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx, return -ENOMEM; } - if (ctx->ht) { + if (ctx->sw_context) { node->hash.key = (unsigned long) res; - ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash); - if (ret) { - DRM_ERROR("Failed to initialize a resource validation " - "entry.\n"); - return ret; - } + hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key); } node->res = vmw_resource_reference_unless_doomed(res); if (!node->res) @@ -681,19 +680,19 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx) struct vmw_validation_bo_node *entry; struct vmw_validation_res_node *val; - if (!ctx->ht) + if (!ctx->sw_context) return; list_for_each_entry(entry, &ctx->bo_list, base.head) - (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash); + hash_del_rcu(&entry->hash.head); list_for_each_entry(val, &ctx->resource_list, head) - (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash); + hash_del_rcu(&val->hash.head); list_for_each_entry(val, &ctx->resource_ctx_list, head) - (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash); + hash_del_rcu(&entry->hash.head); - ctx->ht = NULL; + ctx->sw_context = NULL; } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index f21df053882b..ab9ec226f433 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -29,12 +29,11 @@ #define _VMWGFX_VALIDATION_H_ #include <linux/list.h> +#include <linux/hashtable.h> #include <linux/ww_mutex.h> #include <drm/ttm/ttm_execbuf_util.h> -#include "vmwgfx_hashtab.h" - #define VMW_RES_DIRTY_NONE 0 #define VMW_RES_DIRTY_SET BIT(0) #define VMW_RES_DIRTY_CLEAR BIT(1) @@ -59,7 +58,7 @@ * @total_mem: Amount of reserved memory. */ struct vmw_validation_context { - struct vmwgfx_open_hash *ht; + struct vmw_sw_context *sw_context; struct list_head resource_list; struct list_head resource_ctx_list; struct list_head bo_list; @@ -82,16 +81,16 @@ struct vmw_fence_obj; /** * DECLARE_VAL_CONTEXT - Declare a validation context with initialization * @_name: The name of the variable - * @_ht: The hash table used to find dups or NULL if none + * @_sw_context: Contains the hash table used to find dups or NULL if none * @_merge_dups: Whether to merge duplicate buffer object- or resource * entries. If set to true, ideally a hash table pointer should be supplied * as well unless the number of resources and buffer objects per validation * is known to be very small */ #endif -#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups) \ +#define DECLARE_VAL_CONTEXT(_name, _sw_context, _merge_dups) \ struct vmw_validation_context _name = \ - { .ht = _ht, \ + { .sw_context = _sw_context, \ .resource_list = LIST_HEAD_INIT((_name).resource_list), \ .resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \ .bo_list = LIST_HEAD_INIT((_name).bo_list), \ @@ -115,19 +114,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx) } /** - * vmw_validation_set_ht - Register a hash table for duplicate finding - * @ctx: The validation context - * @ht: Pointer to a hash table to use for duplicate finding - * This function is intended to be used if the hash table wasn't - * available at validation context declaration time - */ -static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx, - struct vmwgfx_open_hash *ht) -{ - ctx->ht = ht; -} - -/** * vmw_validation_bo_reserve - Reserve buffer objects registered with a * validation context * @ctx: The validation context |