diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 94 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 |
4 files changed, 58 insertions, 67 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 427a7c5ba93d..957934926b24 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -303,7 +303,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, switch (bo->tbo.mem.mem_type) { case TTM_PL_TT: - sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, + sgt = drm_prime_pages_to_sg(obj->dev, + bo->tbo.ttm->pages, bo->tbo.num_pages); if (IS_ERR(sgt)) return sgt; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 887a4da5eefc..ac043baac05d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -136,8 +136,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + places[c].mem_type = TTM_PL_VRAM; + places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) places[c].lpfn = visible_pfn; @@ -152,7 +152,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_TT; + places[c].mem_type = TTM_PL_TT; + places[c].flags = 0; if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; @@ -164,7 +165,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_CPU) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_SYSTEM; + places[c].mem_type = TTM_PL_SYSTEM; + places[c].flags = 0; if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; @@ -176,28 +178,32 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_GDS) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; + places[c].mem_type = AMDGPU_PL_GDS; + places[c].flags = TTM_PL_FLAG_UNCACHED; c++; } if (domain & AMDGPU_GEM_DOMAIN_GWS) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; + places[c].mem_type = AMDGPU_PL_GWS; + places[c].flags = TTM_PL_FLAG_UNCACHED; c++; } if (domain & AMDGPU_GEM_DOMAIN_OA) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; + places[c].mem_type = AMDGPU_PL_OA; + places[c].flags = TTM_PL_FLAG_UNCACHED; c++; } if (!c) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + places[c].mem_type = TTM_PL_SYSTEM; + places[c].flags = TTM_PL_MASK_CACHING; c++; } @@ -594,7 +600,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && - bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { + bo->tbo.mem.mem_type == TTM_PL_VRAM) { struct dma_fence *fence; r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index a18dc878339a..6fc3af082f6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -88,7 +88,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_MASK_CACHING }; /* Don't handle scatter gather BOs */ @@ -175,24 +176,6 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) } /** - * amdgpu_move_null - Register memory for a buffer object - * - * @bo: The bo to assign the memory to - * @new_mem: The memory to be assigned. - * - * Assign the memory from new_mem to the memory of the buffer object bo. - */ -static void amdgpu_move_null(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; -} - -/** * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. * * @bo: The bo to assign the memory to. @@ -551,7 +534,8 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = TTM_PL_MASK_CACHING; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { pr_err("Failed to find GTT space for blit from VRAM\n"); @@ -565,7 +549,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, } /* Bind the memory to the GTT space */ - r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); + r = ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem, ctx); if (unlikely(r)) { goto out_cleanup; } @@ -607,7 +591,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = TTM_PL_MASK_CACHING; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { pr_err("Failed to find GTT space for blit to VRAM\n"); @@ -676,7 +661,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, adev = amdgpu_ttm_adev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - amdgpu_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } if ((old_mem->mem_type == TTM_PL_TT && @@ -684,7 +669,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT)) { /* bind is enough */ - amdgpu_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } if (old_mem->mem_type == AMDGPU_PL_GDS || @@ -694,7 +679,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, new_mem->mem_type == AMDGPU_PL_GWS || new_mem->mem_type == AMDGPU_PL_OA) { /* Nothing to save here */ - amdgpu_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } @@ -773,7 +758,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + mem->bus.offset; - mem->bus.base = adev->gmc.aper_base; + mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; break; default: @@ -785,12 +770,13 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); uint64_t offset = (page_offset << PAGE_SHIFT); struct drm_mm_node *mm; mm = amdgpu_find_mm_node(&bo->mem, &offset); - return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + - (offset >> PAGE_SHIFT); + offset += adev->gmc.aper_base; + return mm->start + (offset >> PAGE_SHIFT); } /** @@ -991,9 +977,10 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) * * Called by amdgpu_ttm_backend_bind() **/ -static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) +static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; @@ -1027,9 +1014,10 @@ release_sg: /** * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages */ -static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); @@ -1110,16 +1098,17 @@ gart_bind_fail: * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). * This handles binding GTT memory to the device address space. */ -static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, +static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_resource *bo_mem) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void*)ttm; uint64_t flags; int r = 0; if (gtt->userptr) { - r = amdgpu_ttm_tt_pin_userptr(ttm); + r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); if (r) { DRM_ERROR("failed to pin userptr\n"); return r; @@ -1185,8 +1174,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | - TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = bo->mem.placement; r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); if (unlikely(r)) @@ -1237,15 +1226,16 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * ttm_tt_destroy(). */ -static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) - amdgpu_ttm_tt_unpin_userptr(ttm); + amdgpu_ttm_tt_unpin_userptr(bdev, ttm); if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) return; @@ -1257,7 +1247,8 @@ static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) gtt->ttm.ttm.num_pages, gtt->offset); } -static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1268,12 +1259,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) kfree(gtt); } -static struct ttm_backend_func amdgpu_backend_func = { - .bind = &amdgpu_ttm_backend_bind, - .unbind = &amdgpu_ttm_backend_unbind, - .destroy = &amdgpu_ttm_backend_destroy, -}; - /** * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO * @@ -1290,7 +1275,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, if (gtt == NULL) { return NULL; } - gtt->ttm.ttm.func = &amdgpu_backend_func; gtt->gobj = &bo->base; /* allocate space for the uninitialized page entries */ @@ -1307,10 +1291,11 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, * Map the pages of a ttm_tt object to an address space visible * to the underlying device. */ -static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx) +static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ @@ -1361,7 +1346,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, * Unmaps pages of a ttm_tt object from the device address space and * unpopulates the page array backing it. */ -static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_device *adev; @@ -1385,7 +1370,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) if (ttm->page_flags & TTM_PAGE_FLAG_SG) return; - adev = amdgpu_ttm_adev(ttm->bdev); + adev = amdgpu_ttm_adev(bdev); #ifdef CONFIG_SWIOTLB if (adev->need_swiotlb && swiotlb_nr_tbl()) { @@ -1691,6 +1676,9 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, + .ttm_tt_bind = &amdgpu_ttm_backend_bind, + .ttm_tt_unbind = &amdgpu_ttm_backend_unbind, + .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 68c5e3662b87..a87951b2f06d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -32,10 +32,6 @@ #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) -#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0) -#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) -#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) - #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 |