diff options
author | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2020-02-12 14:08:59 +0100 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2020-02-12 14:08:59 +0100 |
commit | 74c12ee02af109adcde36ec184fa59c0afb0edaa (patch) | |
tree | dfe4615e0a5e3f8583e685aacdd9a0908e9ffbe3 /drivers/gpu/drm/nouveau/nvkm/subdev | |
parent | drm/bridge: ti-tfp410: Update drm_connector_init_with_ddc() error message (diff) | |
parent | Linux 5.6-rc1 (diff) | |
download | linux-74c12ee02af109adcde36ec184fa59c0afb0edaa.tar.xz linux-74c12ee02af109adcde36ec184fa59c0afb0edaa.zip |
Merge v5.6-rc1 into drm-misc-fixes
We're based on v5.6, need v5.6-rc1 at least. :)
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev')
86 files changed, 3197 insertions, 4703 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild index 4e136f3d7c28..fb4fff1222af 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild @@ -1,4 +1,5 @@ # SPDX-License-Identifier: MIT +include $(src)/nvkm/subdev/acr/Kbuild include $(src)/nvkm/subdev/bar/Kbuild include $(src)/nvkm/subdev/bios/Kbuild include $(src)/nvkm/subdev/bus/Kbuild @@ -19,7 +20,6 @@ include $(src)/nvkm/subdev/mmu/Kbuild include $(src)/nvkm/subdev/mxm/Kbuild include $(src)/nvkm/subdev/pci/Kbuild include $(src)/nvkm/subdev/pmu/Kbuild -include $(src)/nvkm/subdev/secboot/Kbuild include $(src)/nvkm/subdev/therm/Kbuild include $(src)/nvkm/subdev/timer/Kbuild include $(src)/nvkm/subdev/top/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild new file mode 100644 index 000000000000..5b9f64a8957f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/acr/base.o +nvkm-y += nvkm/subdev/acr/hsfw.o +nvkm-y += nvkm/subdev/acr/lsfw.o +nvkm-y += nvkm/subdev/acr/gm200.o +nvkm-y += nvkm/subdev/acr/gm20b.o +nvkm-y += nvkm/subdev/acr/gp102.o +nvkm-y += nvkm/subdev/acr/gp108.o +nvkm-y += nvkm/subdev/acr/gp10b.o +nvkm-y += nvkm/subdev/acr/tu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c new file mode 100644 index 000000000000..8eb2a930a9b5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c @@ -0,0 +1,411 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/mmu.h> + +static struct nvkm_acr_hsf * +nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name) +{ + struct nvkm_acr_hsf *hsf; + list_for_each_entry(hsf, &acr->hsf, head) { + if (!strcmp(hsf->name, name)) + return hsf; + } + return NULL; +} + +int +nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_acr_hsf *hsf; + int ret; + + hsf = nvkm_acr_hsf_find(acr, name); + if (!hsf) + return -EINVAL; + + nvkm_debug(subdev, "executing %s binary\n", hsf->name); + ret = nvkm_falcon_get(hsf->falcon, subdev); + if (ret) + return ret; + + ret = hsf->func->boot(acr, hsf); + nvkm_falcon_put(hsf->falcon, subdev); + if (ret) { + nvkm_error(subdev, "%s binary failed\n", hsf->name); + return ret; + } + + nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name); + return 0; +} + +static void +nvkm_acr_unload(struct nvkm_acr *acr) +{ + if (acr->done) { + nvkm_acr_hsf_boot(acr, "unload"); + acr->done = false; + } +} + +static int +nvkm_acr_load(struct nvkm_acr *acr) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_acr_lsf *lsf; + u64 start, limit; + int ret; + + if (list_empty(&acr->lsf)) { + nvkm_debug(subdev, "No LSF(s) present.\n"); + return 0; + } + + ret = acr->func->init(acr); + if (ret) + return ret; + + acr->func->wpr_check(acr, &start, &limit); + + if (start != acr->wpr_start || limit != acr->wpr_end) { + nvkm_error(subdev, "WPR not configured as expected: " + "%016llx-%016llx vs %016llx-%016llx\n", + acr->wpr_start, acr->wpr_end, start, limit); + return -EIO; + } + + acr->done = true; + + list_for_each_entry(lsf, &acr->lsf, head) { + if (lsf->func->boot) { + ret = lsf->func->boot(lsf->falcon); + if (ret) + break; + } + } + + return ret; +} + +static int +nvkm_acr_reload(struct nvkm_acr *acr) +{ + nvkm_acr_unload(acr); + return nvkm_acr_load(acr); +} + +static struct nvkm_acr_lsf * +nvkm_acr_falcon(struct nvkm_device *device) +{ + struct nvkm_acr *acr = device->acr; + struct nvkm_acr_lsf *lsf; + + if (acr) { + list_for_each_entry(lsf, &acr->lsf, head) { + if (lsf->func->bootstrap_falcon) + return lsf; + } + } + + return NULL; +} + +int +nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask) +{ + struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device); + struct nvkm_acr *acr = device->acr; + unsigned long id; + + if (!acrflcn) { + int ret = nvkm_acr_reload(acr); + if (ret) + return ret; + + return acr->done ? 0 : -EINVAL; + } + + if (acrflcn->func->bootstrap_multiple_falcons) { + return acrflcn->func-> + bootstrap_multiple_falcons(acrflcn->falcon, mask); + } + + for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) { + int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id); + if (ret) + return ret; + } + + return 0; +} + +bool +nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id) +{ + struct nvkm_acr *acr = device->acr; + struct nvkm_acr_lsf *lsf; + + if (acr) { + list_for_each_entry(lsf, &acr->lsf, head) { + if (lsf->id == id) + return true; + } + } + + return false; +} + +static int +nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) +{ + nvkm_acr_unload(nvkm_acr(subdev)); + return 0; +} + +static int +nvkm_acr_init(struct nvkm_subdev *subdev) +{ + if (!nvkm_acr_falcon(subdev->device)) + return 0; + + return nvkm_acr_load(nvkm_acr(subdev)); +} + +static void +nvkm_acr_cleanup(struct nvkm_acr *acr) +{ + nvkm_acr_lsfw_del_all(acr); + nvkm_acr_hsfw_del_all(acr); + nvkm_firmware_put(acr->wpr_fw); + acr->wpr_fw = NULL; +} + +static int +nvkm_acr_oneinit(struct nvkm_subdev *subdev) +{ + struct nvkm_device *device = subdev->device; + struct nvkm_acr *acr = nvkm_acr(subdev); + struct nvkm_acr_hsfw *hsfw; + struct nvkm_acr_lsfw *lsfw, *lsft; + struct nvkm_acr_lsf *lsf; + u32 wpr_size = 0; + int ret, i; + + if (list_empty(&acr->hsfw)) { + nvkm_debug(subdev, "No HSFW(s)\n"); + nvkm_acr_cleanup(acr); + return 0; + } + + /* Determine layout/size of WPR image up-front, as we need to know + * it to allocate memory before we begin constructing it. + */ + list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { + /* Cull unknown falcons that are present in WPR image. */ + if (acr->wpr_fw) { + if (!lsfw->func) { + nvkm_acr_lsfw_del(lsfw); + continue; + } + + wpr_size = acr->wpr_fw->size; + } + + /* Ensure we've fetched falcon configuration. */ + ret = nvkm_falcon_get(lsfw->falcon, subdev); + if (ret) + return ret; + + nvkm_falcon_put(lsfw->falcon, subdev); + + if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL))) + return -ENOMEM; + lsf->func = lsfw->func; + lsf->falcon = lsfw->falcon; + lsf->id = lsfw->id; + list_add_tail(&lsf->head, &acr->lsf); + } + + if (!acr->wpr_fw || acr->wpr_comp) + wpr_size = acr->func->wpr_layout(acr); + + /* Allocate/Locate WPR + fill ucode blob pointer. + * + * dGPU: allocate WPR + shadow blob + * Tegra: locate WPR with regs, ensure size is sufficient, + * allocate ucode blob. + */ + ret = acr->func->wpr_alloc(acr, wpr_size); + if (ret) + return ret; + + nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n", + acr->wpr_start, acr->wpr_end, acr->shadow_start); + + /* Write WPR to ucode blob. */ + nvkm_kmap(acr->wpr); + if (acr->wpr_fw && !acr->wpr_comp) + nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size); + + if (!acr->wpr_fw || acr->wpr_comp) + acr->func->wpr_build(acr, nvkm_acr_falcon(device)); + acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev); + + if (acr->wpr_fw && acr->wpr_comp) { + nvkm_kmap(acr->wpr); + for (i = 0; i < acr->wpr_fw->size; i += 4) { + u32 us = nvkm_ro32(acr->wpr, i); + u32 fw = ((u32 *)acr->wpr_fw->data)[i/4]; + if (fw != us) { + nvkm_warn(subdev, "%08x: %08x %08x\n", + i, us, fw); + } + } + return -EINVAL; + } + nvkm_done(acr->wpr); + + /* Allocate instance block for ACR-related stuff. */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, + &acr->inst); + if (ret) + return ret; + + ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm); + if (ret) + return ret; + + acr->vmm->debug = acr->subdev.debug; + + ret = nvkm_vmm_join(acr->vmm, acr->inst); + if (ret) + return ret; + + /* Load HS firmware blobs into ACR VMM. */ + list_for_each_entry(hsfw, &acr->hsfw, head) { + nvkm_debug(subdev, "loading %s fw\n", hsfw->name); + ret = hsfw->func->load(acr, hsfw); + if (ret) + return ret; + } + + /* Kill temporary data. */ + nvkm_acr_cleanup(acr); + return 0; +} + +static void * +nvkm_acr_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_acr *acr = nvkm_acr(subdev); + struct nvkm_acr_hsf *hsf, *hst; + struct nvkm_acr_lsf *lsf, *lst; + + list_for_each_entry_safe(hsf, hst, &acr->hsf, head) { + nvkm_vmm_put(acr->vmm, &hsf->vma); + nvkm_memory_unref(&hsf->ucode); + kfree(hsf->imem); + list_del(&hsf->head); + kfree(hsf); + } + + nvkm_vmm_part(acr->vmm, acr->inst); + nvkm_vmm_unref(&acr->vmm); + nvkm_memory_unref(&acr->inst); + + nvkm_memory_unref(&acr->wpr); + + list_for_each_entry_safe(lsf, lst, &acr->lsf, head) { + list_del(&lsf->head); + kfree(lsf); + } + + nvkm_acr_cleanup(acr); + return acr; +} + +static const struct nvkm_subdev_func +nvkm_acr = { + .dtor = nvkm_acr_dtor, + .oneinit = nvkm_acr_oneinit, + .init = nvkm_acr_init, + .fini = nvkm_acr_fini, +}; + +static int +nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_device *device = subdev->device; + int ret; + + ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw); + if (ret < 0) + return ret; + + /* Pre-add LSFs in the order they appear in the FW WPR image so that + * we're able to do a binary comparison with our own generator. + */ + ret = acr->func->wpr_parse(acr); + if (ret) + return ret; + + acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false); + acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0); + return 0; +} + +int +nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_acr **pacr) +{ + struct nvkm_acr *acr; + long wprfw; + + if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL))) + return -ENOMEM; + nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev); + INIT_LIST_HEAD(&acr->hsfw); + INIT_LIST_HEAD(&acr->lsfw); + INIT_LIST_HEAD(&acr->hsf); + INIT_LIST_HEAD(&acr->lsf); + + fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + acr->func = fwif->func; + + wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1); + if (wprfw >= 0) { + int ret = nvkm_acr_ctor_wpr(acr, wprfw); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c new file mode 100644 index 000000000000..9a6394085cf0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c @@ -0,0 +1,470 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/falcon.h> +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/mc.h> +#include <subdev/mmu.h> +#include <subdev/pmu.h> +#include <subdev/timer.h> + +#include <nvfw/acr.h> +#include <nvfw/flcn.h> + +int +gm200_acr_init(struct nvkm_acr *acr) +{ + return nvkm_acr_hsf_boot(acr, "load"); +} + +void +gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit) +{ + struct nvkm_device *device = acr->subdev.device; + + nvkm_wr32(device, 0x100cd4, 2); + *start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8; + nvkm_wr32(device, 0x100cd4, 3); + *limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8; + *limit = *limit + 0x20000; +} + +void +gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct wpr_header hdr; + struct lsb_header lsb; + struct nvkm_acr_lsf *lsfw; + u32 offset = 0; + + do { + nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr)); + wpr_header_dump(subdev, &hdr); + + list_for_each_entry(lsfw, &acr->lsfw, head) { + if (lsfw->id != hdr.falcon_id) + continue; + + nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb)); + lsb_header_dump(subdev, &lsb); + + lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust); + break; + } + offset += sizeof(hdr); + } while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID); +} + +void +gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw, + struct lsb_header_tail *hdr) +{ + hdr->ucode_off = lsfw->offset.img; + hdr->ucode_size = lsfw->ucode_size; + hdr->data_size = lsfw->data_size; + hdr->bl_code_size = lsfw->bootloader_size; + hdr->bl_imem_off = lsfw->bootloader_imem_offset; + hdr->bl_data_off = lsfw->offset.bld; + hdr->bl_data_size = lsfw->bl_data_size; + hdr->app_code_off = lsfw->app_start_offset + + lsfw->app_resident_code_offset; + hdr->app_code_size = lsfw->app_resident_code_size; + hdr->app_data_off = lsfw->app_start_offset + + lsfw->app_resident_data_offset; + hdr->app_data_size = lsfw->app_resident_data_size; + hdr->flags = lsfw->func->flags; +} + +static int +gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw) +{ + struct lsb_header hdr; + + if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature))) + return -EINVAL; + + memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size); + gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail); + + nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr)); + return 0; +} + +int +gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) +{ + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + int ret; + + /* Fill per-LSF structures. */ + list_for_each_entry(lsfw, &acr->lsfw, head) { + struct wpr_header hdr = { + .falcon_id = lsfw->id, + .lsb_offset = lsfw->offset.lsb, + .bootstrap_owner = NVKM_ACR_LSF_PMU, + .lazy_bootstrap = rtos && lsfw->id != rtos->id, + .status = WPR_HEADER_V0_STATUS_COPY, + }; + + /* Write WPR header. */ + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); + offset += sizeof(hdr); + + /* Write LSB header. */ + ret = gm200_acr_wpr_build_lsb(acr, lsfw); + if (ret) + return ret; + + /* Write ucode image. */ + nvkm_wobj(acr->wpr, lsfw->offset.img, + lsfw->img.data, + lsfw->img.size); + + /* Write bootloader data. */ + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); + } + + /* Finalise WPR. */ + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID); + return 0; +} + +static int +gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) +{ + int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST, + ALIGN(wpr_size, 0x40000), 0x40000, true, + &acr->wpr); + if (ret) + return ret; + + acr->wpr_start = nvkm_memory_addr(acr->wpr); + acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr); + return 0; +} + +u32 +gm200_acr_wpr_layout(struct nvkm_acr *acr) +{ + struct nvkm_acr_lsfw *lsfw; + u32 wpr = 0; + + wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header); + + list_for_each_entry(lsfw, &acr->lsfw, head) { + wpr = ALIGN(wpr, 256); + lsfw->offset.lsb = wpr; + wpr += sizeof(struct lsb_header); + + wpr = ALIGN(wpr, 4096); + lsfw->offset.img = wpr; + wpr += lsfw->img.size; + + wpr = ALIGN(wpr, 256); + lsfw->offset.bld = wpr; + lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256); + wpr += lsfw->bl_data_size; + } + + return wpr; +} + +int +gm200_acr_wpr_parse(struct nvkm_acr *acr) +{ + const struct wpr_header *hdr = (void *)acr->wpr_fw->data; + + while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) { + wpr_header_dump(&acr->subdev, hdr); + if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) + return -ENOMEM; + } + + return 0; +} + +void +gm200_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + struct flcn_bl_dmem_desc_v1 hsdesc = { + .ctx_dma = FALCON_DMAIDX_VIRT, + .code_dma_base = hsf->vma->addr, + .non_sec_code_off = hsf->non_sec_addr, + .non_sec_code_size = hsf->non_sec_size, + .sec_code_off = hsf->sec_addr, + .sec_code_size = hsf->sec_size, + .code_entry_point = 0, + .data_dma_base = hsf->vma->addr + hsf->data_addr, + .data_size = hsf->data_size, + }; + + flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hsdesc); + + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); +} + +int +gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf, + u32 intr_clear, u32 mbox0_ok) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_falcon *falcon = hsf->falcon; + u32 mbox0, mbox1; + int ret; + + /* Reset falcon. */ + nvkm_falcon_reset(falcon); + nvkm_falcon_bind_context(falcon, acr->inst); + + /* Load bootloader into IMEM. */ + nvkm_falcon_load_imem(falcon, hsf->imem, + falcon->code.limit - hsf->imem_size, + hsf->imem_size, + hsf->imem_tag, + 0, false); + + /* Load bootloader data into DMEM. */ + hsf->func->bld(acr, hsf); + + /* Boot the falcon. */ + nvkm_mc_intr_mask(device, falcon->owner->index, false); + + nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); + nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8); + nvkm_falcon_start(falcon); + ret = nvkm_falcon_wait_for_halt(falcon, 100); + if (ret) + return ret; + + /* Check for successful completion. */ + mbox0 = nvkm_falcon_rd32(falcon, 0x040); + mbox1 = nvkm_falcon_rd32(falcon, 0x044); + nvkm_debug(subdev, "mailbox %08x %08x\n", mbox0, mbox1); + if (mbox0 && mbox0 != mbox0_ok) + return -EIO; + + nvkm_falcon_clear_interrupt(falcon, intr_clear); + nvkm_mc_intr_mask(device, falcon->owner->index, true); + return ret; +} + +int +gm200_acr_hsfw_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw, + struct nvkm_falcon *falcon) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_acr_hsf *hsf; + int ret; + + /* Patch the appropriate signature (production/debug) into the FW + * image, as determined by the mode the falcon is in. + */ + ret = nvkm_falcon_get(falcon, subdev); + if (ret) + return ret; + + if (hsfw->sig.patch_loc) { + if (!falcon->debug) { + nvkm_debug(subdev, "patching production signature\n"); + memcpy(hsfw->image + hsfw->sig.patch_loc, + hsfw->sig.prod.data, + hsfw->sig.prod.size); + } else { + nvkm_debug(subdev, "patching debug signature\n"); + memcpy(hsfw->image + hsfw->sig.patch_loc, + hsfw->sig.dbg.data, + hsfw->sig.dbg.size); + } + } + + nvkm_falcon_put(falcon, subdev); + + if (!(hsf = kzalloc(sizeof(*hsf), GFP_KERNEL))) + return -ENOMEM; + hsf->func = hsfw->func; + hsf->name = hsfw->name; + list_add_tail(&hsf->head, &acr->hsf); + + hsf->imem_size = hsfw->imem_size; + hsf->imem_tag = hsfw->imem_tag; + hsf->imem = kmemdup(hsfw->imem, hsfw->imem_size, GFP_KERNEL); + if (!hsf->imem) + return -ENOMEM; + + hsf->non_sec_addr = hsfw->non_sec_addr; + hsf->non_sec_size = hsfw->non_sec_size; + hsf->sec_addr = hsfw->sec_addr; + hsf->sec_size = hsfw->sec_size; + hsf->data_addr = hsfw->data_addr; + hsf->data_size = hsfw->data_size; + + /* Make the FW image accessible to the HS bootloader. */ + ret = nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST, + hsfw->image_size, 0x1000, false, &hsf->ucode); + if (ret) + return ret; + + nvkm_kmap(hsf->ucode); + nvkm_wobj(hsf->ucode, 0, hsfw->image, hsfw->image_size); + nvkm_done(hsf->ucode); + + ret = nvkm_vmm_get(acr->vmm, 12, nvkm_memory_size(hsf->ucode), + &hsf->vma); + if (ret) + return ret; + + ret = nvkm_memory_map(hsf->ucode, 0, acr->vmm, hsf->vma, NULL, 0); + if (ret) + return ret; + + hsf->falcon = falcon; + return 0; +} + +int +gm200_acr_unload_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + return gm200_acr_hsfw_boot(acr, hsf, 0, 0x1d); +} + +int +gm200_acr_unload_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); +} + +const struct nvkm_acr_hsf_func +gm200_acr_unload_0 = { + .load = gm200_acr_unload_load, + .boot = gm200_acr_unload_boot, + .bld = gm200_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +gm200_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 }, + {} +}; + +int +gm200_acr_load_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + return gm200_acr_hsfw_boot(acr, hsf, 0x10, 0); +} + +static int +gm200_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr]; + + desc->wpr_region_id = 1; + desc->regions.no_regions = 2; + desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; + desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; + desc->regions.region_props[0].region_id = 1; + desc->regions.region_props[0].read_mask = 0xf; + desc->regions.region_props[0].write_mask = 0xc; + desc->regions.region_props[0].client_mask = 0x2; + flcn_acr_desc_dump(&acr->subdev, desc); + + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); +} + +static const struct nvkm_acr_hsf_func +gm200_acr_load_0 = { + .load = gm200_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gm200_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); + +static const struct nvkm_acr_hsf_fwif +gm200_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm200_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gm200_acr = { + .load = gm200_acr_load_fwif, + .unload = gm200_acr_unload_fwif, + .wpr_parse = gm200_acr_wpr_parse, + .wpr_layout = gm200_acr_wpr_layout, + .wpr_alloc = gm200_acr_wpr_alloc, + .wpr_build = gm200_acr_wpr_build, + .wpr_patch = gm200_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +static int +gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", + acr, "acr/bl", "acr/ucode_load", "load"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", + acr, "acr/bl", "acr/ucode_unload", + "unload"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +gm200_acr_fwif[] = { + { 0, gm200_acr_load, &gm200_acr }, + {} +}; + +int +gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c new file mode 100644 index 000000000000..034a6ede70c7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c @@ -0,0 +1,134 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/mmu.h> +#include <subdev/pmu.h> + +#include <nvfw/acr.h> +#include <nvfw/flcn.h> + +int +gm20b_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) +{ + struct nvkm_subdev *subdev = &acr->subdev; + + acr->func->wpr_check(acr, &acr->wpr_start, &acr->wpr_end); + + if ((acr->wpr_end - acr->wpr_start) < wpr_size) { + nvkm_error(subdev, "WPR image too big for WPR!\n"); + return -ENOSPC; + } + + return nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST, + wpr_size, 0, true, &acr->wpr); +} + +static void +gm20b_acr_load_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + struct flcn_bl_dmem_desc hsdesc = { + .ctx_dma = FALCON_DMAIDX_VIRT, + .code_dma_base = hsf->vma->addr >> 8, + .non_sec_code_off = hsf->non_sec_addr, + .non_sec_code_size = hsf->non_sec_size, + .sec_code_off = hsf->sec_addr, + .sec_code_size = hsf->sec_size, + .code_entry_point = 0, + .data_dma_base = (hsf->vma->addr + hsf->data_addr) >> 8, + .data_size = hsf->data_size, + }; + + flcn_bl_dmem_desc_dump(&acr->subdev, &hsdesc); + + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); +} + +static int +gm20b_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr]; + + desc->ucode_blob_base = nvkm_memory_addr(acr->wpr); + desc->ucode_blob_size = nvkm_memory_size(acr->wpr); + flcn_acr_desc_dump(&acr->subdev, desc); + + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); +} + +const struct nvkm_acr_hsf_func +gm20b_acr_load_0 = { + .load = gm20b_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gm20b_acr_load_bld, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin"); +#endif + +static const struct nvkm_acr_hsf_fwif +gm20b_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gm20b_acr = { + .load = gm20b_acr_load_fwif, + .wpr_parse = gm200_acr_wpr_parse, + .wpr_layout = gm200_acr_wpr_layout, + .wpr_alloc = gm20b_acr_wpr_alloc, + .wpr_build = gm200_acr_wpr_build, + .wpr_patch = gm200_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +int +gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", + acr, "acr/bl", "acr/ucode_load", "load"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +gm20b_acr_fwif[] = { + { 0, gm20b_acr_load, &gm20b_acr }, + {} +}; + +int +gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c new file mode 100644 index 000000000000..49e11c46d525 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c @@ -0,0 +1,281 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/mmu.h> +#include <engine/sec2.h> + +#include <nvfw/acr.h> +#include <nvfw/flcn.h> + +void +gp102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust) +{ + struct wpr_header_v1 hdr; + struct lsb_header_v1 lsb; + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + + do { + nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr)); + wpr_header_v1_dump(&acr->subdev, &hdr); + + list_for_each_entry(lsfw, &acr->lsfw, head) { + if (lsfw->id != hdr.falcon_id) + continue; + + nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb)); + lsb_header_v1_dump(&acr->subdev, &lsb); + + lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust); + break; + } + + offset += sizeof(hdr); + } while (hdr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID); +} + +int +gp102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw) +{ + struct lsb_header_v1 hdr; + + if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature))) + return -EINVAL; + + memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size); + gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail); + + nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr)); + return 0; +} + +int +gp102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) +{ + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + int ret; + + /* Fill per-LSF structures. */ + list_for_each_entry(lsfw, &acr->lsfw, head) { + struct lsf_signature_v1 *sig = (void *)lsfw->sig->data; + struct wpr_header_v1 hdr = { + .falcon_id = lsfw->id, + .lsb_offset = lsfw->offset.lsb, + .bootstrap_owner = NVKM_ACR_LSF_SEC2, + .lazy_bootstrap = rtos && lsfw->id != rtos->id, + .bin_version = sig->version, + .status = WPR_HEADER_V1_STATUS_COPY, + }; + + /* Write WPR header. */ + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); + offset += sizeof(hdr); + + /* Write LSB header. */ + ret = gp102_acr_wpr_build_lsb(acr, lsfw); + if (ret) + return ret; + + /* Write ucode image. */ + nvkm_wobj(acr->wpr, lsfw->offset.img, + lsfw->img.data, + lsfw->img.size); + + /* Write bootloader data. */ + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); + } + + /* Finalise WPR. */ + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID); + return 0; +} + +int +gp102_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) +{ + int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST, + ALIGN(wpr_size, 0x40000) << 1, 0x40000, true, + &acr->wpr); + if (ret) + return ret; + + acr->shadow_start = nvkm_memory_addr(acr->wpr); + acr->wpr_start = acr->shadow_start + (nvkm_memory_size(acr->wpr) >> 1); + acr->wpr_end = acr->wpr_start + (nvkm_memory_size(acr->wpr) >> 1); + return 0; +} + +u32 +gp102_acr_wpr_layout(struct nvkm_acr *acr) +{ + struct nvkm_acr_lsfw *lsfw; + u32 wpr = 0; + + wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header_v1); + wpr = ALIGN(wpr, 256); + + wpr += 0x100; /* Shared sub-WPR headers. */ + + list_for_each_entry(lsfw, &acr->lsfw, head) { + wpr = ALIGN(wpr, 256); + lsfw->offset.lsb = wpr; + wpr += sizeof(struct lsb_header_v1); + + wpr = ALIGN(wpr, 4096); + lsfw->offset.img = wpr; + wpr += lsfw->img.size; + + wpr = ALIGN(wpr, 256); + lsfw->offset.bld = wpr; + lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256); + wpr += lsfw->bl_data_size; + } + + return wpr; +} + +int +gp102_acr_wpr_parse(struct nvkm_acr *acr) +{ + const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data; + + while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) { + wpr_header_v1_dump(&acr->subdev, hdr); + if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) + return -ENOMEM; + } + + return 0; +} + +MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +gp102_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 }, + {} +}; + +int +gp102_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + struct flcn_acr_desc_v1 *desc = (void *)&hsfw->image[hsfw->data_addr]; + + desc->wpr_region_id = 1; + desc->regions.no_regions = 2; + desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; + desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; + desc->regions.region_props[0].region_id = 1; + desc->regions.region_props[0].read_mask = 0xf; + desc->regions.region_props[0].write_mask = 0xc; + desc->regions.region_props[0].client_mask = 0x2; + desc->regions.region_props[0].shadow_mem_start_addr = + acr->shadow_start >> 8; + flcn_acr_desc_v1_dump(&acr->subdev, desc); + + return gm200_acr_hsfw_load(acr, hsfw, + &acr->subdev.device->sec2->falcon); +} + +static const struct nvkm_acr_hsf_func +gp102_acr_load_0 = { + .load = gp102_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gm200_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); + +static const struct nvkm_acr_hsf_fwif +gp102_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp102_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gp102_acr = { + .load = gp102_acr_load_fwif, + .unload = gp102_acr_unload_fwif, + .wpr_parse = gp102_acr_wpr_parse, + .wpr_layout = gp102_acr_wpr_layout, + .wpr_alloc = gp102_acr_wpr_alloc, + .wpr_build = gp102_acr_wpr_build, + .wpr_patch = gp102_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +int +gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", + acr, "acr/bl", "acr/ucode_load", "load"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", + acr, "acr/unload_bl", "acr/ucode_unload", + "unload"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +gp102_acr_fwif[] = { + { 0, gp102_acr_load, &gp102_acr }, + {} +}; + +int +gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c new file mode 100644 index 000000000000..f10dc9112678 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c @@ -0,0 +1,111 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/mmu.h> + +#include <nvfw/flcn.h> + +void +gp108_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + struct flcn_bl_dmem_desc_v2 hsdesc = { + .ctx_dma = FALCON_DMAIDX_VIRT, + .code_dma_base = hsf->vma->addr, + .non_sec_code_off = hsf->non_sec_addr, + .non_sec_code_size = hsf->non_sec_size, + .sec_code_off = hsf->sec_addr, + .sec_code_size = hsf->sec_size, + .code_entry_point = 0, + .data_dma_base = hsf->vma->addr + hsf->data_addr, + .data_size = hsf->data_size, + .argc = 0, + .argv = 0, + }; + + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hsdesc); + + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); +} + +const struct nvkm_acr_hsf_func +gp108_acr_unload_0 = { + .load = gm200_acr_unload_load, + .boot = gm200_acr_unload_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +gp108_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 }, + {} +}; + +static const struct nvkm_acr_hsf_func +gp108_acr_load_0 = { + .load = gp102_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin"); + +static const struct nvkm_acr_hsf_fwif +gp108_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp108_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gp108_acr = { + .load = gp108_acr_load_fwif, + .unload = gp108_acr_unload_fwif, + .wpr_parse = gp102_acr_wpr_parse, + .wpr_layout = gp102_acr_wpr_layout, + .wpr_alloc = gp102_acr_wpr_alloc, + .wpr_build = gp102_acr_wpr_build, + .wpr_patch = gp102_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +static const struct nvkm_acr_fwif +gp108_acr_fwif[] = { + { 0, gp102_acr_load, &gp108_acr }, + {} +}; + +int +gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c new file mode 100644 index 000000000000..39de64292a41 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) +MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); +#endif + +static const struct nvkm_acr_hsf_fwif +gp10b_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gp10b_acr = { + .load = gp10b_acr_load_fwif, + .wpr_parse = gm200_acr_wpr_parse, + .wpr_layout = gm200_acr_wpr_layout, + .wpr_alloc = gm20b_acr_wpr_alloc, + .wpr_build = gm200_acr_wpr_build, + .wpr_patch = gm200_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +static const struct nvkm_acr_fwif +gp10b_acr_fwif[] = { + { 0, gm20b_acr_load, &gp10b_acr }, + {} +}; + +int +gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c new file mode 100644 index 000000000000..aecce2dac558 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c @@ -0,0 +1,180 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> + +#include <nvfw/fw.h> +#include <nvfw/hs.h> + +static void +nvkm_acr_hsfw_del(struct nvkm_acr_hsfw *hsfw) +{ + list_del(&hsfw->head); + kfree(hsfw->imem); + kfree(hsfw->image); + kfree(hsfw->sig.prod.data); + kfree(hsfw->sig.dbg.data); + kfree(hsfw); +} + +void +nvkm_acr_hsfw_del_all(struct nvkm_acr *acr) +{ + struct nvkm_acr_hsfw *hsfw, *hsft; + list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) { + nvkm_acr_hsfw_del(hsfw); + } +} + +static int +nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver, + struct nvkm_acr_hsfw *hsfw) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct firmware *fw; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_hs_header *fwhdr; + const struct nvfw_hs_load_header *lhdr; + u32 loc, sig; + int ret; + + ret = nvkm_firmware_get(subdev, name, ver, &fw); + if (ret < 0) + return ret; + + hdr = nvfw_bin_hdr(subdev, fw->data); + fwhdr = nvfw_hs_header(subdev, fw->data + hdr->header_offset); + + /* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's + * standard format, and don't have the indirection seen in the 0x10de + * case. + */ + switch (hdr->bin_magic) { + case 0x000010de: + loc = *(u32 *)(fw->data + fwhdr->patch_loc); + sig = *(u32 *)(fw->data + fwhdr->patch_sig); + break; + case 0x3b1d14f0: + loc = fwhdr->patch_loc; + sig = fwhdr->patch_sig; + break; + default: + ret = -EINVAL; + goto done; + } + + lhdr = nvfw_hs_load_header(subdev, fw->data + fwhdr->hdr_offset); + + if (!(hsfw->image = kmalloc(hdr->data_size, GFP_KERNEL))) { + ret = -ENOMEM; + goto done; + } + + memcpy(hsfw->image, fw->data + hdr->data_offset, hdr->data_size); + hsfw->image_size = hdr->data_size; + hsfw->non_sec_addr = lhdr->non_sec_code_off; + hsfw->non_sec_size = lhdr->non_sec_code_size; + hsfw->sec_addr = lhdr->apps[0]; + hsfw->sec_size = lhdr->apps[lhdr->num_apps]; + hsfw->data_addr = lhdr->data_dma_base; + hsfw->data_size = lhdr->data_size; + + hsfw->sig.prod.size = fwhdr->sig_prod_size; + hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL); + if (!hsfw->sig.prod.data) { + ret = -ENOMEM; + goto done; + } + + memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig, + hsfw->sig.prod.size); + + hsfw->sig.dbg.size = fwhdr->sig_dbg_size; + hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL); + if (!hsfw->sig.dbg.data) { + ret = -ENOMEM; + goto done; + } + + memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig, + hsfw->sig.dbg.size); + + hsfw->sig.patch_loc = loc; +done: + nvkm_firmware_put(fw); + return ret; +} + +static int +nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver, + struct nvkm_acr_hsfw *hsfw) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_bl_desc *desc; + const struct firmware *fw; + u8 *data; + int ret; + + ret = nvkm_firmware_get(subdev, name, ver, &fw); + if (ret) + return ret; + + hdr = nvfw_bin_hdr(subdev, fw->data); + desc = nvfw_bl_desc(subdev, fw->data + hdr->header_offset); + data = (void *)fw->data + hdr->data_offset; + + hsfw->imem_size = desc->code_size; + hsfw->imem_tag = desc->start_tag; + hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL); + memcpy(hsfw->imem, data + desc->code_off, desc->code_size); + + nvkm_firmware_put(fw); + return 0; +} + +int +nvkm_acr_hsfw_load(struct nvkm_acr *acr, const char *bl, const char *fw, + const char *name, int version, + const struct nvkm_acr_hsf_fwif *fwif) +{ + struct nvkm_acr_hsfw *hsfw; + int ret; + + if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL))) + return -ENOMEM; + + hsfw->func = fwif->func; + hsfw->name = name; + list_add_tail(&hsfw->head, &acr->hsfw); + + ret = nvkm_acr_hsfw_load_bl(acr, bl, version, hsfw); + if (ret) + goto done; + + ret = nvkm_acr_hsfw_load_image(acr, fw, version, hsfw); +done: + if (ret) + nvkm_acr_hsfw_del(hsfw); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c new file mode 100644 index 000000000000..07d1830126ab --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c @@ -0,0 +1,253 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include <core/falcon.h> +#include <core/firmware.h> +#include <nvfw/fw.h> +#include <nvfw/ls.h> + +void +nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *lsfw) +{ + nvkm_blob_dtor(&lsfw->img); + nvkm_firmware_put(lsfw->sig); + list_del(&lsfw->head); + kfree(lsfw); +} + +void +nvkm_acr_lsfw_del_all(struct nvkm_acr *acr) +{ + struct nvkm_acr_lsfw *lsfw, *lsft; + list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { + nvkm_acr_lsfw_del(lsfw); + } +} + +static struct nvkm_acr_lsfw * +nvkm_acr_lsfw_get(struct nvkm_acr *acr, enum nvkm_acr_lsf_id id) +{ + struct nvkm_acr_lsfw *lsfw; + list_for_each_entry(lsfw, &acr->lsfw, head) { + if (lsfw->id == id) + return lsfw; + } + return NULL; +} + +struct nvkm_acr_lsfw * +nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr, + struct nvkm_falcon *falcon, enum nvkm_acr_lsf_id id) +{ + struct nvkm_acr_lsfw *lsfw; + + if (!acr) + return ERR_PTR(-ENOSYS); + + lsfw = nvkm_acr_lsfw_get(acr, id); + if (lsfw && lsfw->func) { + nvkm_error(&acr->subdev, "LSFW %d redefined\n", id); + return ERR_PTR(-EEXIST); + } + + if (!lsfw) { + if (!(lsfw = kzalloc(sizeof(*lsfw), GFP_KERNEL))) + return ERR_PTR(-ENOMEM); + + lsfw->id = id; + list_add_tail(&lsfw->head, &acr->lsfw); + } + + lsfw->func = func; + lsfw->falcon = falcon; + return lsfw; +} + +static struct nvkm_acr_lsfw * +nvkm_acr_lsfw_load_sig_image_desc_(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func, + const struct firmware **pdesc) +{ + struct nvkm_acr *acr = subdev->device->acr; + struct nvkm_acr_lsfw *lsfw; + int ret; + + if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id)))) + return lsfw; + + ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig); + if (ret) + goto done; + + ret = nvkm_firmware_load_blob(subdev, path, "image", ver, &lsfw->img); + if (ret) + goto done; + + ret = nvkm_firmware_load_name(subdev, path, "desc", ver, pdesc); +done: + if (ret) { + nvkm_acr_lsfw_del(lsfw); + return ERR_PTR(ret); + } + + return lsfw; +} + +static void +nvkm_acr_lsfw_from_desc(const struct nvfw_ls_desc_head *desc, + struct nvkm_acr_lsfw *lsfw) +{ + lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256); + lsfw->bootloader_imem_offset = desc->bootloader_imem_offset; + + lsfw->app_size = ALIGN(desc->app_size, 256); + lsfw->app_start_offset = desc->app_start_offset; + lsfw->app_imem_entry = desc->app_imem_entry; + lsfw->app_resident_code_offset = desc->app_resident_code_offset; + lsfw->app_resident_code_size = desc->app_resident_code_size; + lsfw->app_resident_data_offset = desc->app_resident_data_offset; + lsfw->app_resident_data_size = desc->app_resident_data_size; + + lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) + + lsfw->bootloader_size; + lsfw->data_size = lsfw->app_size + lsfw->bootloader_size - + lsfw->ucode_size; +} + +int +nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func) +{ + const struct firmware *fw; + struct nvkm_acr_lsfw *lsfw; + + lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver, + func, &fw); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); + + nvkm_acr_lsfw_from_desc(&nvfw_ls_desc(subdev, fw->data)->head, lsfw); + nvkm_firmware_put(fw); + return 0; +} + +int +nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func) +{ + const struct firmware *fw; + struct nvkm_acr_lsfw *lsfw; + + lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver, + func, &fw); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); + + nvkm_acr_lsfw_from_desc(&nvfw_ls_desc_v1(subdev, fw->data)->head, lsfw); + nvkm_firmware_put(fw); + return 0; +} + +int +nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func) +{ + struct nvkm_acr *acr = subdev->device->acr; + struct nvkm_acr_lsfw *lsfw; + const struct firmware *bl = NULL, *inst = NULL, *data = NULL; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_bl_desc *desc; + u32 *bldata; + int ret; + + if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id)))) + return PTR_ERR(lsfw); + + ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl); + if (ret) + goto done; + + hdr = nvfw_bin_hdr(subdev, bl->data); + desc = nvfw_bl_desc(subdev, bl->data + hdr->header_offset); + bldata = (void *)(bl->data + hdr->data_offset); + + ret = nvkm_firmware_load_name(subdev, path, "inst", ver, &inst); + if (ret) + goto done; + + ret = nvkm_firmware_load_name(subdev, path, "data", ver, &data); + if (ret) + goto done; + + ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig); + if (ret) + goto done; + + lsfw->bootloader_size = ALIGN(desc->code_size, 256); + lsfw->bootloader_imem_offset = desc->start_tag << 8; + + lsfw->app_start_offset = lsfw->bootloader_size; + lsfw->app_imem_entry = 0; + lsfw->app_resident_code_offset = 0; + lsfw->app_resident_code_size = ALIGN(inst->size, 256); + lsfw->app_resident_data_offset = lsfw->app_resident_code_size; + lsfw->app_resident_data_size = ALIGN(data->size, 256); + lsfw->app_size = lsfw->app_resident_code_size + + lsfw->app_resident_data_size; + + lsfw->img.size = lsfw->bootloader_size + lsfw->app_size; + if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) { + ret = -ENOMEM; + goto done; + } + + memcpy(lsfw->img.data, bldata, lsfw->bootloader_size); + memcpy(lsfw->img.data + lsfw->app_start_offset + + lsfw->app_resident_code_offset, inst->data, inst->size); + memcpy(lsfw->img.data + lsfw->app_start_offset + + lsfw->app_resident_data_offset, data->data, data->size); + + lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) + + lsfw->bootloader_size; + lsfw->data_size = lsfw->app_size + lsfw->bootloader_size - + lsfw->ucode_size; + +done: + if (ret) + nvkm_acr_lsfw_del(lsfw); + nvkm_firmware_put(data); + nvkm_firmware_put(inst); + nvkm_firmware_put(bl); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h new file mode 100644 index 000000000000..d8ba72806d39 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h @@ -0,0 +1,151 @@ +#ifndef __NVKM_ACR_PRIV_H__ +#define __NVKM_ACR_PRIV_H__ +#include <subdev/acr.h> +struct lsb_header_tail; + +struct nvkm_acr_fwif { + int version; + int (*load)(struct nvkm_acr *, int version, + const struct nvkm_acr_fwif *); + const struct nvkm_acr_func *func; +}; + +int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *); +int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *); + +struct nvkm_acr_lsf; +struct nvkm_acr_func { + const struct nvkm_acr_hsf_fwif *load; + const struct nvkm_acr_hsf_fwif *ahesasc; + const struct nvkm_acr_hsf_fwif *asb; + const struct nvkm_acr_hsf_fwif *unload; + int (*wpr_parse)(struct nvkm_acr *); + u32 (*wpr_layout)(struct nvkm_acr *); + int (*wpr_alloc)(struct nvkm_acr *, u32 wpr_size); + int (*wpr_build)(struct nvkm_acr *, struct nvkm_acr_lsf *rtos); + void (*wpr_patch)(struct nvkm_acr *, s64 adjust); + void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit); + int (*init)(struct nvkm_acr *); + void (*fini)(struct nvkm_acr *); +}; + +int gm200_acr_wpr_parse(struct nvkm_acr *); +u32 gm200_acr_wpr_layout(struct nvkm_acr *); +int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *); +void gm200_acr_wpr_patch(struct nvkm_acr *, s64); +void gm200_acr_wpr_check(struct nvkm_acr *, u64 *, u64 *); +void gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *, + struct lsb_header_tail *); +int gm200_acr_init(struct nvkm_acr *); + +int gm20b_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size); + +int gp102_acr_wpr_parse(struct nvkm_acr *); +u32 gp102_acr_wpr_layout(struct nvkm_acr *); +int gp102_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size); +int gp102_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *); +int gp102_acr_wpr_build_lsb(struct nvkm_acr *, struct nvkm_acr_lsfw *); +void gp102_acr_wpr_patch(struct nvkm_acr *, s64); + +struct nvkm_acr_hsfw { + const struct nvkm_acr_hsf_func *func; + const char *name; + struct list_head head; + + u32 imem_size; + u32 imem_tag; + u32 *imem; + + u8 *image; + u32 image_size; + u32 non_sec_addr; + u32 non_sec_size; + u32 sec_addr; + u32 sec_size; + u32 data_addr; + u32 data_size; + + struct { + struct { + void *data; + u32 size; + } prod, dbg; + u32 patch_loc; + } sig; +}; + +struct nvkm_acr_hsf_fwif { + int version; + int (*load)(struct nvkm_acr *, const char *bl, const char *fw, + const char *name, int version, + const struct nvkm_acr_hsf_fwif *); + const struct nvkm_acr_hsf_func *func; +}; + +int nvkm_acr_hsfw_load(struct nvkm_acr *, const char *, const char *, + const char *, int, const struct nvkm_acr_hsf_fwif *); +void nvkm_acr_hsfw_del_all(struct nvkm_acr *); + +struct nvkm_acr_hsf { + const struct nvkm_acr_hsf_func *func; + const char *name; + struct list_head head; + + u32 imem_size; + u32 imem_tag; + u32 *imem; + + u32 non_sec_addr; + u32 non_sec_size; + u32 sec_addr; + u32 sec_size; + u32 data_addr; + u32 data_size; + + struct nvkm_memory *ucode; + struct nvkm_vma *vma; + struct nvkm_falcon *falcon; +}; + +struct nvkm_acr_hsf_func { + int (*load)(struct nvkm_acr *, struct nvkm_acr_hsfw *); + int (*boot)(struct nvkm_acr *, struct nvkm_acr_hsf *); + void (*bld)(struct nvkm_acr *, struct nvkm_acr_hsf *); +}; + +int gm200_acr_hsfw_load(struct nvkm_acr *, struct nvkm_acr_hsfw *, + struct nvkm_falcon *); +int gm200_acr_hsfw_boot(struct nvkm_acr *, struct nvkm_acr_hsf *, + u32 clear_intr, u32 mbox0_ok); + +int gm200_acr_load_boot(struct nvkm_acr *, struct nvkm_acr_hsf *); + +extern const struct nvkm_acr_hsf_func gm200_acr_unload_0; +int gm200_acr_unload_load(struct nvkm_acr *, struct nvkm_acr_hsfw *); +int gm200_acr_unload_boot(struct nvkm_acr *, struct nvkm_acr_hsf *); +void gm200_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *); + +extern const struct nvkm_acr_hsf_func gm20b_acr_load_0; + +int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *); + +extern const struct nvkm_acr_hsf_func gp108_acr_unload_0; +void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *); + +int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int, + struct nvkm_acr **); +int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name); + +struct nvkm_acr_lsf { + const struct nvkm_acr_lsf_func *func; + struct nvkm_falcon *falcon; + enum nvkm_acr_lsf_id id; + struct list_head head; +}; + +struct nvkm_acr_lsfw *nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *, + struct nvkm_acr *, struct nvkm_falcon *, + enum nvkm_acr_lsf_id); +void nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *); +void nvkm_acr_lsfw_del_all(struct nvkm_acr *); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c new file mode 100644 index 000000000000..7f4b89d82d32 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c @@ -0,0 +1,215 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/firmware.h> +#include <core/memory.h> +#include <subdev/gsp.h> +#include <subdev/pmu.h> +#include <engine/sec2.h> + +#include <nvfw/acr.h> + +static int +tu102_acr_init(struct nvkm_acr *acr) +{ + int ret = nvkm_acr_hsf_boot(acr, "AHESASC"); + if (ret) + return ret; + + return nvkm_acr_hsf_boot(acr, "ASB"); +} + +static int +tu102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) +{ + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + int ret; + + /*XXX: shared sub-WPR headers, fill terminator for now. */ + nvkm_wo32(acr->wpr, 0x200, 0xffffffff); + + /* Fill per-LSF structures. */ + list_for_each_entry(lsfw, &acr->lsfw, head) { + struct lsf_signature_v1 *sig = (void *)lsfw->sig->data; + struct wpr_header_v1 hdr = { + .falcon_id = lsfw->id, + .lsb_offset = lsfw->offset.lsb, + .bootstrap_owner = NVKM_ACR_LSF_GSPLITE, + .lazy_bootstrap = 1, + .bin_version = sig->version, + .status = WPR_HEADER_V1_STATUS_COPY, + }; + + /* Write WPR header. */ + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); + offset += sizeof(hdr); + + /* Write LSB header. */ + ret = gp102_acr_wpr_build_lsb(acr, lsfw); + if (ret) + return ret; + + /* Write ucode image. */ + nvkm_wobj(acr->wpr, lsfw->offset.img, + lsfw->img.data, + lsfw->img.size); + + /* Write bootloader data. */ + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); + } + + /* Finalise WPR. */ + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID); + return 0; +} + +static int +tu102_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + return gm200_acr_hsfw_boot(acr, hsf, 0, 0); +} + +static int +tu102_acr_hsfw_nofw(struct nvkm_acr *acr, const char *bl, const char *fw, + const char *name, int version, + const struct nvkm_acr_hsf_fwif *fwif) +{ + return 0; +} + +MODULE_FIRMWARE("nvidia/tu102/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/tu104/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +tu102_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 }, + { -1, tu102_acr_hsfw_nofw }, + {} +}; + +static int +tu102_acr_asb_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->gsp->falcon); +} + +static const struct nvkm_acr_hsf_func +tu102_acr_asb_0 = { + .load = tu102_acr_asb_load, + .boot = tu102_acr_hsfw_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin"); +MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin"); +MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin"); + +static const struct nvkm_acr_hsf_fwif +tu102_acr_asb_fwif[] = { + { 0, nvkm_acr_hsfw_load, &tu102_acr_asb_0 }, + { -1, tu102_acr_hsfw_nofw }, + {} +}; + +static const struct nvkm_acr_hsf_func +tu102_acr_ahesasc_0 = { + .load = gp102_acr_load_load, + .boot = tu102_acr_hsfw_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/tu102/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/acr/ucode_ahesasc.bin"); + +MODULE_FIRMWARE("nvidia/tu104/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin"); + +MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin"); + +static const struct nvkm_acr_hsf_fwif +tu102_acr_ahesasc_fwif[] = { + { 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 }, + { -1, tu102_acr_hsfw_nofw }, + {} +}; + +static const struct nvkm_acr_func +tu102_acr = { + .ahesasc = tu102_acr_ahesasc_fwif, + .asb = tu102_acr_asb_fwif, + .unload = tu102_acr_unload_fwif, + .wpr_parse = gp102_acr_wpr_parse, + .wpr_layout = gp102_acr_wpr_layout, + .wpr_alloc = gp102_acr_wpr_alloc, + .wpr_patch = gp102_acr_wpr_patch, + .wpr_build = tu102_acr_wpr_build, + .wpr_check = gm200_acr_wpr_check, + .init = tu102_acr_init, +}; + +static int +tu102_acr_load(struct nvkm_acr *acr, int version, + const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC", + acr, "acr/bl", "acr/ucode_ahesasc", + "AHESASC"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB", + acr, "acr/bl", "acr/ucode_asb", "ASB"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", + acr, "acr/unload_bl", "acr/ucode_unload", + "unload"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +tu102_acr_fwif[] = { + { 0, tu102_acr_load, &tu102_acr }, + {} +}; + +int +tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild index 53b9d638f2c8..d65ec719f153 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild @@ -2,5 +2,6 @@ nvkm-y += nvkm/subdev/fault/base.o nvkm-y += nvkm/subdev/fault/user.o nvkm-y += nvkm/subdev/fault/gp100.o +nvkm-y += nvkm/subdev/fault/gp10b.o nvkm-y += nvkm/subdev/fault/gv100.o nvkm-y += nvkm/subdev/fault/tu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c index ca251560d3e0..f6dca97140d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c @@ -108,7 +108,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id) return ret; /* Pin fault buffer in BAR2. */ - buffer->addr = nvkm_memory_bar2(buffer->mem); + buffer->addr = fault->func->buffer.pin(buffer); if (buffer->addr == ~0ULL) return -EFAULT; @@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev) struct nvkm_fault *fault = nvkm_fault(subdev); int i; + nvkm_notify_fini(&fault->nrpfb); nvkm_event_fini(&fault->event); for (i = 0; i < fault->buffer_nr; i++) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c index 4f3c4e091117..f6b189cc4330 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c @@ -21,25 +21,26 @@ */ #include "priv.h" +#include <core/memory.h> #include <subdev/mc.h> #include <nvif/class.h> -static void +void gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable) { struct nvkm_device *device = buffer->fault->subdev.device; nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable); } -static void +void gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer) { struct nvkm_device *device = buffer->fault->subdev.device; nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000); } -static void +void gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer) { struct nvkm_device *device = buffer->fault->subdev.device; @@ -48,7 +49,12 @@ gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer) nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001); } -static void +u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer) +{ + return nvkm_memory_bar2(buffer->mem); +} + +void gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer) { buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78); @@ -56,7 +62,7 @@ gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer) buffer->put = 0x002a80; } -static void +void gp100_fault_intr(struct nvkm_fault *fault) { nvkm_event_send(&fault->event, 1, 0, NULL, 0); @@ -68,6 +74,7 @@ gp100_fault = { .buffer.nr = 1, .buffer.entry_size = 32, .buffer.info = gp100_fault_buffer_info, + .buffer.pin = gp100_fault_buffer_pin, .buffer.init = gp100_fault_buffer_init, .buffer.fini = gp100_fault_buffer_fini, .buffer.intr = gp100_fault_buffer_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c new file mode 100644 index 000000000000..9e66d1f7654d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2019 NVIDIA Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "priv.h" + +#include <core/memory.h> + +#include <nvif/class.h> + +u64 +gp10b_fault_buffer_pin(struct nvkm_fault_buffer *buffer) +{ + return nvkm_memory_addr(buffer->mem); +} + +static const struct nvkm_fault_func +gp10b_fault = { + .intr = gp100_fault_intr, + .buffer.nr = 1, + .buffer.entry_size = 32, + .buffer.info = gp100_fault_buffer_info, + .buffer.pin = gp10b_fault_buffer_pin, + .buffer.init = gp100_fault_buffer_init, + .buffer.fini = gp100_fault_buffer_fini, + .buffer.intr = gp100_fault_buffer_intr, + .user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 }, +}; + +int +gp10b_fault_new(struct nvkm_device *device, int index, + struct nvkm_fault **pfault) +{ + return nvkm_fault_new_(&gp10b_fault, device, index, pfault); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c index 6747f09c2dc3..2707be4ffabc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c @@ -214,6 +214,7 @@ gv100_fault = { .buffer.nr = 2, .buffer.entry_size = 32, .buffer.info = gv100_fault_buffer_info, + .buffer.pin = gp100_fault_buffer_pin, .buffer.init = gv100_fault_buffer_init, .buffer.fini = gv100_fault_buffer_fini, .buffer.intr = gv100_fault_buffer_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h index 975e66ac6344..f6f1dd7eee1f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h @@ -30,6 +30,7 @@ struct nvkm_fault_func { int nr; u32 entry_size; void (*info)(struct nvkm_fault_buffer *); + u64 (*pin)(struct nvkm_fault_buffer *); void (*init)(struct nvkm_fault_buffer *); void (*fini)(struct nvkm_fault_buffer *); void (*intr)(struct nvkm_fault_buffer *, bool enable); @@ -40,6 +41,15 @@ struct nvkm_fault_func { } user; }; +void gp100_fault_buffer_intr(struct nvkm_fault_buffer *, bool enable); +void gp100_fault_buffer_fini(struct nvkm_fault_buffer *); +void gp100_fault_buffer_init(struct nvkm_fault_buffer *); +u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *); +void gp100_fault_buffer_info(struct nvkm_fault_buffer *); +void gp100_fault_intr(struct nvkm_fault *); + +u64 gp10b_fault_buffer_pin(struct nvkm_fault_buffer *); + int gv100_fault_oneinit(struct nvkm_fault *); int nvkm_ufault_new(struct nvkm_device *, const struct nvkm_oclass *, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c index fa1dfe5692b0..45a6a68b9f48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c @@ -154,6 +154,7 @@ tu102_fault = { .buffer.nr = 2, .buffer.entry_size = 32, .buffer.info = tu102_fault_buffer_info, + .buffer.pin = gp100_fault_buffer_pin, .buffer.init = tu102_fault_buffer_init, .buffer.fini = tu102_fault_buffer_fini, .buffer.intr = tu102_fault_buffer_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index b2bb5a3ccb02..5940e0dea2f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -126,6 +126,34 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev) } static int +nvkm_fb_init_scrub_vpr(struct nvkm_fb *fb) +{ + struct nvkm_subdev *subdev = &fb->subdev; + int ret; + + nvkm_debug(subdev, "VPR locked, running scrubber binary\n"); + + if (!fb->vpr_scrubber.size) { + nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n"); + return 0; + } + + ret = fb->func->vpr.scrub(fb); + if (ret) { + nvkm_error(subdev, "VPR scrubber binary failed\n"); + return ret; + } + + if (fb->func->vpr.scrub_required(fb)) { + nvkm_error(subdev, "VPR still locked after scrub!\n"); + return -EIO; + } + + nvkm_debug(subdev, "VPR scrubber binary successful\n"); + return 0; +} + +static int nvkm_fb_init(struct nvkm_subdev *subdev) { struct nvkm_fb *fb = nvkm_fb(subdev); @@ -154,6 +182,14 @@ nvkm_fb_init(struct nvkm_subdev *subdev) if (fb->func->init_unkn) fb->func->init_unkn(fb); + + if (fb->func->vpr.scrub_required && + fb->func->vpr.scrub_required(fb)) { + ret = nvkm_fb_init_scrub_vpr(fb); + if (ret) + return ret; + } + return 0; } @@ -172,6 +208,8 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev) nvkm_mm_fini(&fb->tags); nvkm_ram_del(&fb->ram); + nvkm_blob_dtor(&fb->vpr_scrubber); + if (fb->func->dtor) return fb->func->dtor(fb); return fb; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c index b4d74e815674..fc8c93aa3da5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c @@ -24,7 +24,81 @@ #include "gf100.h" #include "ram.h" +#include <core/firmware.h> #include <core/memory.h> +#include <nvfw/fw.h> +#include <nvfw/hs.h> +#include <engine/nvdec.h> + +int +gp102_fb_vpr_scrub(struct nvkm_fb *fb) +{ + struct nvkm_subdev *subdev = &fb->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_falcon *falcon = &device->nvdec[0]->falcon; + struct nvkm_blob *blob = &fb->vpr_scrubber; + const struct nvfw_bin_hdr *hsbin_hdr; + const struct nvfw_hs_header *fw_hdr; + const struct nvfw_hs_load_header *lhdr; + void *scrub_data; + u32 patch_loc, patch_sig; + int ret; + + nvkm_falcon_get(falcon, subdev); + + hsbin_hdr = nvfw_bin_hdr(subdev, blob->data); + fw_hdr = nvfw_hs_header(subdev, blob->data + hsbin_hdr->header_offset); + lhdr = nvfw_hs_load_header(subdev, blob->data + fw_hdr->hdr_offset); + scrub_data = blob->data + hsbin_hdr->data_offset; + + patch_loc = *(u32 *)(blob->data + fw_hdr->patch_loc); + patch_sig = *(u32 *)(blob->data + fw_hdr->patch_sig); + if (falcon->debug) { + memcpy(scrub_data + patch_loc, + blob->data + fw_hdr->sig_dbg_offset + patch_sig, + fw_hdr->sig_dbg_size); + } else { + memcpy(scrub_data + patch_loc, + blob->data + fw_hdr->sig_prod_offset + patch_sig, + fw_hdr->sig_prod_size); + } + + nvkm_falcon_reset(falcon); + nvkm_falcon_bind_context(falcon, NULL); + + nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off, + lhdr->non_sec_code_size, + lhdr->non_sec_code_off >> 8, 0, false); + nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0], + ALIGN(lhdr->apps[0], 0x100), + lhdr->apps[1], + lhdr->apps[0] >> 8, 0, true); + nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0, + lhdr->data_size, 0); + + nvkm_falcon_set_start_addr(falcon, 0x0); + nvkm_falcon_start(falcon); + + ret = nvkm_falcon_wait_for_halt(falcon, 500); + if (ret < 0) { + ret = -ETIMEDOUT; + goto end; + } + + /* put nvdec in clean state - without reset it will remain in HS mode */ + nvkm_falcon_reset(falcon); +end: + nvkm_falcon_put(falcon, subdev); + return ret; +} + +bool +gp102_fb_vpr_scrub_required(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + nvkm_wr32(device, 0x100cd0, 0x2); + return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0; +} static const struct nvkm_fb_func gp102_fb = { @@ -33,11 +107,32 @@ gp102_fb = { .init = gp100_fb_init, .init_remapper = gp100_fb_init_remapper, .init_page = gm200_fb_init_page, + .vpr.scrub_required = gp102_fb_vpr_scrub_required, + .vpr.scrub = gp102_fb_vpr_scrub, .ram_new = gp100_ram_new, }; int +gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, + int index, struct nvkm_fb **pfb) +{ + int ret = gf100_fb_new_(func, device, index, pfb); + if (ret) + return ret; + + nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0, + &(*pfb)->vpr_scrubber); + return 0; +} + +int gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gp102_fb, device, index, pfb); + return gp102_fb_new_(&gp102_fb, device, index, pfb); } + +MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c index 3c5e02e9794a..389bad312bf2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c @@ -35,6 +35,8 @@ gv100_fb = { .init = gp100_fb_init, .init_page = gv100_fb_init_page, .init_unkn = gp100_fb_init_unkn, + .vpr.scrub_required = gp102_fb_vpr_scrub_required, + .vpr.scrub = gp102_fb_vpr_scrub, .ram_new = gp100_ram_new, .default_bigpage = 16, }; @@ -42,5 +44,10 @@ gv100_fb = { int gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gv100_fb, device, index, pfb); + return gp102_fb_new_(&gv100_fb, device, index, pfb); } + +MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index c4e9f55af283..5be9c563350d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -17,6 +17,11 @@ struct nvkm_fb_func { void (*intr)(struct nvkm_fb *); struct { + bool (*scrub_required)(struct nvkm_fb *); + int (*scrub)(struct nvkm_fb *); + } vpr; + + struct { int regions; void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nvkm_fb_tile *); @@ -72,4 +77,9 @@ int gm200_fb_init_page(struct nvkm_fb *); void gp100_fb_init_remapper(struct nvkm_fb *); void gp100_fb_init_unkn(struct nvkm_fb *); + +int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int, + struct nvkm_fb **); +bool gp102_fb_vpr_scrub_required(struct nvkm_fb *); +int gp102_fb_vpr_scrub(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c index ac87a3b6b7c9..ba43fe158b22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c @@ -655,7 +655,7 @@ gf100_ram_new_(const struct nvkm_ram_func *func, static const struct nvkm_ram_func gf100_ram = { - .upper = 0x0200000000, + .upper = 0x0200000000ULL, .probe_fbp = gf100_ram_probe_fbp, .probe_fbp_amount = gf100_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c index 70a06e3cd55a..d97fa43efb91 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c @@ -43,7 +43,7 @@ gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao, static const struct nvkm_ram_func gf108_ram = { - .upper = 0x0200000000, + .upper = 0x0200000000ULL, .probe_fbp = gf100_ram_probe_fbp, .probe_fbp_amount = gf108_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c index 456aed1f2a02..d350d92852d2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c @@ -1698,7 +1698,7 @@ gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb, static const struct nvkm_ram_func gk104_ram = { - .upper = 0x0200000000, + .upper = 0x0200000000ULL, .probe_fbp = gf100_ram_probe_fbp, .probe_fbp_amount = gf108_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c index 27c68e3f9772..be91da854dca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c @@ -33,7 +33,7 @@ gm107_ram_probe_fbp(const struct nvkm_ram_func *func, static const struct nvkm_ram_func gm107_ram = { - .upper = 0x1000000000, + .upper = 0x1000000000ULL, .probe_fbp = gm107_ram_probe_fbp, .probe_fbp_amount = gf108_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c index 6b0cac1fe7b4..8f91ea91ee25 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c @@ -48,7 +48,7 @@ gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao, static const struct nvkm_ram_func gm200_ram = { - .upper = 0x1000000000, + .upper = 0x1000000000ULL, .probe_fbp = gm107_ram_probe_fbp, .probe_fbp_amount = gm200_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c index adb62a6beb63..378f6fb70990 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c @@ -79,7 +79,7 @@ gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa) static const struct nvkm_ram_func gp100_ram = { - .upper = 0x1000000000, + .upper = 0x1000000000ULL, .probe_fbp = gm107_ram_probe_fbp, .probe_fbp_amount = gm200_ram_probe_fbp_amount, .probe_fbpa_amount = gp100_ram_probe_fbpa, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index e7c4f068936e..67cc3b320169 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -1,2 +1,3 @@ # SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/gsp/base.o nvkm-y += nvkm/subdev/gsp/gv100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c new file mode 100644 index 000000000000..5a32df0f9992 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include <core/falcon.h> +#include <core/firmware.h> +#include <subdev/acr.h> +#include <subdev/top.h> + +static void * +nvkm_gsp_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_gsp *gsp = nvkm_gsp(subdev); + nvkm_falcon_dtor(&gsp->falcon); + return gsp; +} + +static const struct nvkm_subdev_func +nvkm_gsp = { + .dtor = nvkm_gsp_dtor, +}; + +int +nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_gsp **pgsp) +{ + struct nvkm_gsp *gsp; + + if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL))) + return -ENOMEM; + + nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev); + + fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev, + nvkm_subdev_name[gsp->subdev.index], 0, + &gsp->falcon); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c index dccfaf1162e2..2114f9b00a28 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c @@ -19,44 +19,37 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include <subdev/gsp.h> -#include <subdev/top.h> -#include <engine/falcon.h> +#include "priv.h" + +static const struct nvkm_falcon_func +gv100_gsp_flcn = { + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = gp102_sec2_flcn_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = gp102_sec2_flcn_enable, + .disable = nvkm_falcon_v1_disable, +}; static int -gv100_gsp_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_gsp *gsp = nvkm_gsp(subdev); - - gsp->addr = nvkm_top_addr(subdev->device, subdev->index); - if (!gsp->addr) - return -EINVAL; - - return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon); -} - -static void * -gv100_gsp_dtor(struct nvkm_subdev *subdev) +gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) { - struct nvkm_gsp *gsp = nvkm_gsp(subdev); - nvkm_falcon_del(&gsp->falcon); - return gsp; + return 0; } -static const struct nvkm_subdev_func -gv100_gsp = { - .dtor = gv100_gsp_dtor, - .oneinit = gv100_gsp_oneinit, +struct nvkm_gsp_fwif +gv100_gsp[] = { + { -1, gv100_gsp_nofw, &gv100_gsp_flcn }, + {} }; int gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp) { - struct nvkm_gsp *gsp; - - if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL))) - return -ENOMEM; - - nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev); - return 0; + return nvkm_gsp_new_(gv100_gsp, device, index, pgsp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h new file mode 100644 index 000000000000..92820fb997c1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_GSP_PRIV_H__ +#define __NVKM_GSP_PRIV_H__ +#include <subdev/gsp.h> +enum nvkm_acr_lsf_id; + +struct nvkm_gsp_fwif { + int version; + int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *); + const struct nvkm_falcon_func *flcn; +}; + +int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int, + struct nvkm_gsp **); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild index 2b6d36ea7067..728d75010847 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild @@ -6,3 +6,4 @@ nvkm-y += nvkm/subdev/ltc/gm107.o nvkm-y += nvkm/subdev/ltc/gm200.o nvkm-y += nvkm/subdev/ltc/gp100.o nvkm-y += nvkm/subdev/ltc/gp102.o +nvkm-y += nvkm/subdev/ltc/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c new file mode 100644 index 000000000000..c0063c7caa50 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2019 NVIDIA Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Thierry Reding + */ + +#include "priv.h" + +static void +gp10b_ltc_init(struct nvkm_ltc *ltc) +{ + struct nvkm_device *device = ltc->subdev.device; + struct iommu_fwspec *spec; + + nvkm_wr32(device, 0x17e27c, ltc->ltc_nr); + nvkm_wr32(device, 0x17e000, ltc->ltc_nr); + nvkm_wr32(device, 0x100800, ltc->ltc_nr); + + spec = dev_iommu_fwspec_get(device->dev); + if (spec) { + u32 sid = spec->ids[0] & 0xffff; + + /* stream ID */ + nvkm_wr32(device, 0x160000, sid << 2); + } +} + +static const struct nvkm_ltc_func +gp10b_ltc = { + .oneinit = gp100_ltc_oneinit, + .init = gp10b_ltc_init, + .intr = gp100_ltc_intr, + .cbc_clear = gm107_ltc_cbc_clear, + .cbc_wait = gm107_ltc_cbc_wait, + .zbc = 16, + .zbc_clear_color = gm107_ltc_zbc_clear_color, + .zbc_clear_depth = gm107_ltc_zbc_clear_depth, + .zbc_clear_stencil = gp102_ltc_zbc_clear_stencil, + .invalidate = gf100_ltc_invalidate, + .flush = gf100_ltc_flush, +}; + +int +gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +{ + return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h index 2fcf18e46ce3..eca5a711b1b8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h @@ -46,4 +46,6 @@ void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32); int gp100_ltc_oneinit(struct nvkm_ltc *); void gp100_ltc_init(struct nvkm_ltc *); void gp100_ltc_intr(struct nvkm_ltc *); + +void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *, int, const u32); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c index 2d075246dc46..2cd5ec81c0d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c @@ -30,7 +30,7 @@ * The value 0xff represents an invalid storage type. */ const u8 * -gf100_mmu_kind(struct nvkm_mmu *mmu, int *count) +gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) { static const u8 kind[256] = { @@ -69,6 +69,7 @@ gf100_mmu_kind(struct nvkm_mmu *mmu, int *count) }; *count = ARRAY_SIZE(kind); + *invalid = 0xff; return kind; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c index dbf644ebac97..83990c83f9f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c @@ -27,7 +27,7 @@ #include <nvif/class.h> const u8 * -gm200_mmu_kind(struct nvkm_mmu *mmu, int *count) +gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) { static const u8 kind[256] = { @@ -65,6 +65,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count) 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff }; *count = ARRAY_SIZE(kind); + *invalid = 0xff; return kind; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c index db3dfbbb2aa0..c0083ddda65a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c @@ -27,7 +27,7 @@ #include <nvif/class.h> const u8 * -nv50_mmu_kind(struct nvkm_mmu *base, int *count) +nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid) { /* 0x01: no bank swizzle * 0x02: bank swizzled @@ -57,6 +57,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count) 0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f }; *count = ARRAY_SIZE(kind); + *invalid = 0x7f; return kind; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h index 07f2fcd18f3d..479b02344271 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h @@ -35,17 +35,17 @@ struct nvkm_mmu_func { u32 pd_offset; } vmm; - const u8 *(*kind)(struct nvkm_mmu *, int *count); + const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid); bool kind_sys; }; extern const struct nvkm_mmu_func nv04_mmu; -const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count); +const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); -const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count); +const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); -const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *); +const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *); struct nvkm_mmu_pt { union { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c index c0db0ce10cba..b21e82eb0916 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c @@ -1,5 +1,6 @@ /* * Copyright 2018 Red Hat Inc. + * Copyright 2019 NVIDIA Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,13 +27,26 @@ #include <nvif/class.h> +const u8 * +tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) +{ + static const u8 + kind[16] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */ + 0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07, + }; + *count = ARRAY_SIZE(kind); + *invalid = 0x07; + return kind; +} + static const struct nvkm_mmu_func tu102_mmu = { .dma_bits = 47, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new }, - .kind = gm200_mmu_kind, + .kind = tu102_mmu_kind, .kind_sys = true, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c index 353f10f92b77..0e4b8941da37 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c @@ -111,15 +111,17 @@ nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc) } *args = argv; const u8 *kind = NULL; int ret = -ENOSYS, count = 0; + u8 kind_inv = 0; if (mmu->func->kind) - kind = mmu->func->kind(mmu, &count); + kind = mmu->func->kind(mmu, &count, &kind_inv); if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) { if (argc != args->v0.count * sizeof(*args->v0.data)) return -EINVAL; if (args->v0.count > count) return -EINVAL; + args->v0.kind_inv = kind_inv; memcpy(args->v0.data, kind, args->v0.count); } else return ret; @@ -157,9 +159,10 @@ nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass, struct nvkm_mmu *mmu = device->mmu; struct nvkm_ummu *ummu; int ret = -ENOSYS, kinds = 0; + u8 unused = 0; if (mmu->func->kind) - mmu->func->kind(mmu, &kinds); + mmu->func->kind(mmu, &kinds, &unused); if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { args->v0.dmabits = mmu->dma_bits; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c index ab6424faf84c..6a2d9eb8e1ea 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c @@ -247,7 +247,7 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, } *args = argv; struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_memory *memory = map->memory; - u8 kind, priv, ro, vol; + u8 kind, kind_inv, priv, ro, vol; int kindn, aper, ret = -ENOSYS; const u8 *kindm; @@ -274,8 +274,8 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, if (WARN_ON(aper < 0)) return aper; - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); - if (kind >= kindn || kindm[kind] == 0xff) { + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { VMM_DEBUG(vmm, "kind %02x", kind); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index b4f519768d5e..d86287565542 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -320,7 +320,7 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, } *args = argv; struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_memory *memory = map->memory; - u8 kind, priv, ro, vol; + u8 kind, kind_inv, priv, ro, vol; int kindn, aper, ret = -ENOSYS; const u8 *kindm; @@ -347,8 +347,8 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, if (WARN_ON(aper < 0)) return aper; - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); - if (kind >= kindn || kindm[kind] == 0xff) { + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { VMM_DEBUG(vmm, "kind %02x", kind); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c index c98afe3134ee..2d89e27e8e9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c @@ -235,7 +235,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_ram *ram = device->fb->ram; struct nvkm_memory *memory = map->memory; - u8 aper, kind, comp, priv, ro; + u8 aper, kind, kind_inv, comp, priv, ro; int kindn, ret = -ENOSYS; const u8 *kindm; @@ -278,8 +278,8 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, return -EINVAL; } - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); - if (kind >= kindn || kindm[kind] == 0x7f) { + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { VMM_DEBUG(vmm, "kind %02x", kind); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild index e37b6e45eaa2..a76c2a7bd696 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild @@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/pmu/gm107.o nvkm-y += nvkm/subdev/pmu/gm20b.o nvkm-y += nvkm/subdev/pmu/gp100.o nvkm-y += nvkm/subdev/pmu/gp102.o +nvkm-y += nvkm/subdev/pmu/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index ea2e11771bca..a0fe607c9c07 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -23,7 +23,7 @@ */ #include "priv.h" -#include <core/msgqueue.h> +#include <core/firmware.h> #include <subdev/timer.h> bool @@ -85,6 +85,12 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) pmu->func->fini(pmu); flush_work(&pmu->recv.work); + + reinit_completion(&pmu->wpr_ready); + + nvkm_falcon_cmdq_fini(pmu->lpq); + nvkm_falcon_cmdq_fini(pmu->hpq); + pmu->initmsg_received = false; return 0; } @@ -133,19 +139,15 @@ nvkm_pmu_init(struct nvkm_subdev *subdev) return ret; } -static int -nvkm_pmu_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_pmu *pmu = nvkm_pmu(subdev); - return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon); -} - static void * nvkm_pmu_dtor(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - nvkm_msgqueue_del(&pmu->queue); - nvkm_falcon_del(&pmu->falcon); + nvkm_falcon_msgq_del(&pmu->msgq); + nvkm_falcon_cmdq_del(&pmu->lpq); + nvkm_falcon_cmdq_del(&pmu->hpq); + nvkm_falcon_qmgr_del(&pmu->qmgr); + nvkm_falcon_dtor(&pmu->falcon); return nvkm_pmu(subdev); } @@ -153,29 +155,50 @@ static const struct nvkm_subdev_func nvkm_pmu = { .dtor = nvkm_pmu_dtor, .preinit = nvkm_pmu_preinit, - .oneinit = nvkm_pmu_oneinit, .init = nvkm_pmu_init, .fini = nvkm_pmu_fini, .intr = nvkm_pmu_intr, }; int -nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device, +nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, int index, struct nvkm_pmu *pmu) { + int ret; + nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev); - pmu->func = func; + INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); init_waitqueue_head(&pmu->recv.wait); + + fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + pmu->func = fwif->func; + + ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, + nvkm_subdev_name[pmu->subdev.index], 0x10a000, + &pmu->falcon); + if (ret) + return ret; + + if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) || + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) || + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) || + (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq))) + return ret; + + init_completion(&pmu->wpr_ready); return 0; } int -nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device, +nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { struct nvkm_pmu *pmu; if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) return -ENOMEM; - return nvkm_pmu_ctor(func, device, index, *ppmu); + return nvkm_pmu_ctor(fwif, device, index, *ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c index 0b458656e870..3ecb3d9cbcf2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c @@ -42,6 +42,7 @@ gf100_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gf100_pmu = { + .flcn = >215_pmu_flcn, .code.data = gf100_pmu_code, .code.size = sizeof(gf100_pmu_code), .data.data = gf100_pmu_data, @@ -56,7 +57,19 @@ gf100_pmu = { }; int +gf100_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) +{ + return 0; +} + +static const struct nvkm_pmu_fwif +gf100_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gf100_pmu }, + {} +}; + +int gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu); + return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c index 3dfa79d4fb13..8dd0271aaaee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c @@ -26,6 +26,7 @@ static const struct nvkm_pmu_func gf119_pmu = { + .flcn = >215_pmu_flcn, .code.data = gf119_pmu_code, .code.size = sizeof(gf119_pmu_code), .data.data = gf119_pmu_data, @@ -39,8 +40,14 @@ gf119_pmu = { .recv = gt215_pmu_recv, }; +static const struct nvkm_pmu_fwif +gf119_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gf119_pmu }, + {} +}; + int gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu); + return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c index 8f7ec10fd2a4..8b70cc17a634 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c @@ -105,6 +105,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) static const struct nvkm_pmu_func gk104_pmu = { + .flcn = >215_pmu_flcn, .code.data = gk104_pmu_code, .code.size = sizeof(gk104_pmu_code), .data.data = gk104_pmu_data, @@ -119,8 +120,14 @@ gk104_pmu = { .pgob = gk104_pmu_pgob, }; +static const struct nvkm_pmu_fwif +gk104_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk104_pmu }, + {} +}; + int gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu); + return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c index 345741d55a56..0081f2141b10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c @@ -84,6 +84,7 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable) static const struct nvkm_pmu_func gk110_pmu = { + .flcn = >215_pmu_flcn, .code.data = gk110_pmu_code, .code.size = sizeof(gk110_pmu_code), .data.data = gk110_pmu_data, @@ -98,8 +99,14 @@ gk110_pmu = { .pgob = gk110_pmu_pgob, }; +static const struct nvkm_pmu_fwif +gk110_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk110_pmu }, + {} +}; + int gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu); + return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c index e4acf7876ea1..b227c701a5e7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c @@ -26,6 +26,7 @@ static const struct nvkm_pmu_func gk208_pmu = { + .flcn = >215_pmu_flcn, .code.data = gk208_pmu_code, .code.size = sizeof(gk208_pmu_code), .data.data = gk208_pmu_data, @@ -40,8 +41,14 @@ gk208_pmu = { .pgob = gk110_pmu_pgob, }; +static const struct nvkm_pmu_fwif +gk208_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk208_pmu }, + {} +}; + int gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu); + return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c index 05e81855c367..26c1adf8f44c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c @@ -95,7 +95,7 @@ static void gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, struct gk20a_pmu_dvfs_dev_status *status) { - struct nvkm_falcon *falcon = pmu->base.falcon; + struct nvkm_falcon *falcon = &pmu->base.falcon; status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10)); status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10)); @@ -104,7 +104,7 @@ gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, static void gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) { - struct nvkm_falcon *falcon = pmu->base.falcon; + struct nvkm_falcon *falcon = &pmu->base.falcon; nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000); nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000); @@ -160,7 +160,7 @@ gk20a_pmu_fini(struct nvkm_pmu *pmu) struct gk20a_pmu *gpmu = gk20a_pmu(pmu); nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm); - nvkm_falcon_put(pmu->falcon, &pmu->subdev); + nvkm_falcon_put(&pmu->falcon, &pmu->subdev); } static int @@ -169,7 +169,7 @@ gk20a_pmu_init(struct nvkm_pmu *pmu) struct gk20a_pmu *gpmu = gk20a_pmu(pmu); struct nvkm_subdev *subdev = &pmu->subdev; struct nvkm_device *device = pmu->subdev.device; - struct nvkm_falcon *falcon = pmu->falcon; + struct nvkm_falcon *falcon = &pmu->falcon; int ret; ret = nvkm_falcon_get(falcon, subdev); @@ -196,25 +196,34 @@ gk20a_dvfs_data= { static const struct nvkm_pmu_func gk20a_pmu = { + .flcn = >215_pmu_flcn, .enabled = gf100_pmu_enabled, .init = gk20a_pmu_init, .fini = gk20a_pmu_fini, .reset = gf100_pmu_reset, }; +static const struct nvkm_pmu_fwif +gk20a_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk20a_pmu }, + {} +}; + int gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { struct gk20a_pmu *pmu; + int ret; if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) return -ENOMEM; *ppmu = &pmu->base; - nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base); + ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base); + if (ret) + return ret; pmu->data = &gk20a_dvfs_data; nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work); - return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c index 459df1ef9e70..5afb55e58b51 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c @@ -28,6 +28,7 @@ static const struct nvkm_pmu_func gm107_pmu = { + .flcn = >215_pmu_flcn, .code.data = gm107_pmu_code, .code.size = sizeof(gm107_pmu_code), .data.data = gm107_pmu_data, @@ -41,8 +42,14 @@ gm107_pmu = { .recv = gt215_pmu_recv, }; +static const struct nvkm_pmu_fwif +gm107_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gm107_pmu }, + {} +}; + int gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu); + return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 31c843145c7a..82571032a07d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -19,38 +19,224 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ - -#include <engine/falcon.h> -#include <core/msgqueue.h> #include "priv.h" -static void +#include <core/memory.h> +#include <subdev/acr.h> + +#include <nvfw/flcn.h> +#include <nvfw/pmu.h> + +static int +gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr) +{ + struct nv_pmu_acr_bootstrap_falcon_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + return msg->falcon_id; +} + +int +gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + struct nv_pmu_acr_bootstrap_falcon_cmd cmd = { + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON, + .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES, + .falcon_id = id, + }; + int ret; + + ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, + gm20b_pmu_acr_bootstrap_falcon_cb, + &pmu->subdev, msecs_to_jiffies(1000)); + if (ret >= 0) { + if (ret != cmd.falcon_id) + ret = -EIO; + else + ret = 0; + } + + return ret; +} + +int +gm20b_pmu_acr_boot(struct nvkm_falcon *falcon) +{ + struct nv_pmu_args args = { .secure_mode = true }; + const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args); + nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0); + nvkm_falcon_start(falcon); + return 0; +} + +void +gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct loader_config hdr; + u64 addr; + + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8); + hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8); + addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8); + hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8); + addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8); + hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8); + hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8); + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + + loader_config_dump(&acr->subdev, &hdr); +} + +void +gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const u64 base = lsfw->offset.img + lsfw->app_start_offset; + const u64 code = (base + lsfw->app_resident_code_offset) >> 8; + const u64 data = (base + lsfw->app_resident_data_offset) >> 8; + const struct loader_config hdr = { + .dma_idx = FALCON_DMAIDX_UCODE, + .code_dma_base = lower_32_bits(code), + .code_size_total = lsfw->app_size, + .code_size_to_load = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = lower_32_bits(data), + .data_size = lsfw->app_resident_data_size, + .overlay_dma_base = lower_32_bits(code), + .argc = 1, + .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args), + .code_dma_base1 = upper_32_bits(code), + .data_dma_base1 = upper_32_bits(data), + .overlay_dma_base1 = upper_32_bits(code), + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +static const struct nvkm_acr_lsf_func +gm20b_pmu_acr = { + .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, + .bld_size = sizeof(struct loader_config), + .bld_write = gm20b_pmu_acr_bld_write, + .bld_patch = gm20b_pmu_acr_bld_patch, + .boot = gm20b_pmu_acr_boot, + .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, +}; + +static int +gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr) +{ + struct nv_pmu_acr_init_wpr_region_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + struct nvkm_pmu *pmu = priv; + struct nvkm_subdev *subdev = &pmu->subdev; + + if (msg->error_code) { + nvkm_error(subdev, "ACR WPR init failure: %d\n", + msg->error_code); + return -EINVAL; + } + + nvkm_debug(subdev, "ACR WPR init complete\n"); + complete_all(&pmu->wpr_ready); + return 0; +} + +static int +gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) +{ + struct nv_pmu_acr_init_wpr_region_cmd cmd = { + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION, + .region_id = 1, + .wpr_offset = 0, + }; + + return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, + gm20b_pmu_acr_init_wpr_callback, pmu, 0); +} + +int +gm20b_pmu_initmsg(struct nvkm_pmu *pmu) +{ + struct nv_pmu_init_msg msg; + int ret; + + ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg)); + if (ret) + return ret; + + if (msg.hdr.unit_id != NV_PMU_UNIT_INIT || + msg.msg_type != NV_PMU_INIT_MSG_INIT) + return -EINVAL; + + nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index, + msg.queue_info[0].offset, + msg.queue_info[0].size); + nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index, + msg.queue_info[1].offset, + msg.queue_info[1].size); + nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index, + msg.queue_info[4].offset, + msg.queue_info[4].size); + return gm20b_pmu_acr_init_wpr(pmu); +} + +void gm20b_pmu_recv(struct nvkm_pmu *pmu) { - if (!pmu->queue) { - nvkm_warn(&pmu->subdev, - "recv function called while no firmware set!\n"); - return; + if (!pmu->initmsg_received) { + int ret = pmu->func->initmsg(pmu); + if (ret) { + nvkm_error(&pmu->subdev, + "error parsing init message: %d\n", ret); + return; + } + + pmu->initmsg_received = true; } - nvkm_msgqueue_recv(pmu->queue); + nvkm_falcon_msgq_recv(pmu->msgq); } static const struct nvkm_pmu_func gm20b_pmu = { + .flcn = >215_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, + .initmsg = gm20b_pmu_initmsg, }; +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); +MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); +MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); +#endif + int -gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) { - int ret; + return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon, + NVKM_ACR_LSF_PMU, "pmu/", + ver, fwif->acr); +} - ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu); - if (ret) - return ret; +static const struct nvkm_pmu_fwif +gm20b_pmu_fwif[] = { + { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr }, + {} +}; - return 0; +int +gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +{ + return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c index e210cd6af816..09e05db21ff5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c @@ -25,12 +25,19 @@ static const struct nvkm_pmu_func gp100_pmu = { + .flcn = >215_pmu_flcn, .enabled = gf100_pmu_enabled, .reset = gf100_pmu_reset, }; +static const struct nvkm_pmu_fwif +gp100_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gp100_pmu }, + {} +}; + int gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu); + return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 98c7a2a8afc4..262b8a3dd507 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -39,12 +39,19 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gp102_pmu = { + .flcn = >215_pmu_flcn, .enabled = gp102_pmu_enabled, .reset = gp102_pmu_reset, }; +static const struct nvkm_pmu_fwif +gp102_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gp102_pmu }, + {} +}; + int gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu); + return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c new file mode 100644 index 000000000000..5b81c7320479 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/acr.h> + +#include <nvfw/flcn.h> +#include <nvfw/pmu.h> + +static int +gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv, + struct nv_falcon_msg *hdr) +{ + struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + return msg->falcon_mask; +} +static int +gp10b_pmu_acr_bootstrap_multiple_falcons(struct nvkm_falcon *falcon, u32 mask) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + struct nv_pmu_acr_bootstrap_multiple_falcons_cmd cmd = { + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS, + .flags = NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES, + .falcon_mask = mask, + .wpr_lo = 0, /*XXX*/ + .wpr_hi = 0, /*XXX*/ + }; + int ret; + + ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, + gp10b_pmu_acr_bootstrap_multiple_falcons_cb, + &pmu->subdev, msecs_to_jiffies(1000)); + if (ret >= 0) { + if (ret != cmd.falcon_mask) + ret = -EIO; + else + ret = 0; + } + + return ret; +} + +static const struct nvkm_acr_lsf_func +gp10b_pmu_acr = { + .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, + .bld_size = sizeof(struct loader_config), + .bld_write = gm20b_pmu_acr_bld_write, + .bld_patch = gm20b_pmu_acr_bld_patch, + .boot = gm20b_pmu_acr_boot, + .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, + .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons, +}; + +static const struct nvkm_pmu_func +gp10b_pmu = { + .flcn = >215_pmu_flcn, + .enabled = gf100_pmu_enabled, + .intr = gt215_pmu_intr, + .recv = gm20b_pmu_recv, + .initmsg = gm20b_pmu_initmsg, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); +MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); +MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); +#endif + +static const struct nvkm_pmu_fwif +gp10b_pmu_fwif[] = { + { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr }, + {} +}; + +int +gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +{ + return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c index e04216daea58..88b909913ff9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c @@ -241,8 +241,27 @@ gt215_pmu_init(struct nvkm_pmu *pmu) return 0; } +const struct nvkm_falcon_func +gt215_pmu_flcn = { + .debug = 0xc08, + .fbif = 0xe00, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .cmdq = { 0x4a0, 0x4b0, 4 }, + .msgq = { 0x4c8, 0x4cc, 0 }, +}; + static const struct nvkm_pmu_func gt215_pmu = { + .flcn = >215_pmu_flcn, .code.data = gt215_pmu_code, .code.size = sizeof(gt215_pmu_code), .data.data = gt215_pmu_data, @@ -256,8 +275,14 @@ gt215_pmu = { .recv = gt215_pmu_recv, }; +static const struct nvkm_pmu_fwif +gt215_pmu_fwif[] = { + { -1, gf100_pmu_nofw, >215_pmu }, + {} +}; + int gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(>215_pmu, device, index, ppmu); + return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index 26d73f9cd6d3..f470859244de 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -4,13 +4,12 @@ #define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev) #include <subdev/pmu.h> #include <subdev/pmu/fuc/os.h> - -int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *, - int index, struct nvkm_pmu *); -int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *, - int index, struct nvkm_pmu **); +enum nvkm_acr_lsf_id; +struct nvkm_acr_lsfw; struct nvkm_pmu_func { + const struct nvkm_falcon_func *flcn; + struct { u32 *data; u32 size; @@ -29,9 +28,11 @@ struct nvkm_pmu_func { int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process, u32 message, u32 data0, u32 data1); void (*recv)(struct nvkm_pmu *); + int (*initmsg)(struct nvkm_pmu *); void (*pgob)(struct nvkm_pmu *, bool); }; +extern const struct nvkm_falcon_func gt215_pmu_flcn; int gt215_pmu_init(struct nvkm_pmu *); void gt215_pmu_fini(struct nvkm_pmu *); void gt215_pmu_intr(struct nvkm_pmu *); @@ -42,4 +43,26 @@ bool gf100_pmu_enabled(struct nvkm_pmu *); void gf100_pmu_reset(struct nvkm_pmu *); void gk110_pmu_pgob(struct nvkm_pmu *, bool); + +void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); +void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); +int gm20b_pmu_acr_boot(struct nvkm_falcon *); +int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id); +void gm20b_pmu_recv(struct nvkm_pmu *); +int gm20b_pmu_initmsg(struct nvkm_pmu *); + +struct nvkm_pmu_fwif { + int version; + int (*load)(struct nvkm_pmu *, int ver, const struct nvkm_pmu_fwif *); + const struct nvkm_pmu_func *func; + const struct nvkm_acr_lsf_func *acr; +}; + +int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); +int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); + +int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *, + int index, struct nvkm_pmu *); +int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *, + int index, struct nvkm_pmu **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild deleted file mode 100644 index f3dee2693c79..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild +++ /dev/null @@ -1,17 +0,0 @@ -# SPDX-License-Identifier: MIT -nvkm-y += nvkm/subdev/secboot/base.o -nvkm-y += nvkm/subdev/secboot/hs_ucode.o -nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o -nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o -nvkm-y += nvkm/subdev/secboot/acr.o -nvkm-y += nvkm/subdev/secboot/acr_r352.o -nvkm-y += nvkm/subdev/secboot/acr_r361.o -nvkm-y += nvkm/subdev/secboot/acr_r364.o -nvkm-y += nvkm/subdev/secboot/acr_r367.o -nvkm-y += nvkm/subdev/secboot/acr_r370.o -nvkm-y += nvkm/subdev/secboot/acr_r375.o -nvkm-y += nvkm/subdev/secboot/gm200.o -nvkm-y += nvkm/subdev/secboot/gm20b.o -nvkm-y += nvkm/subdev/secboot/gp102.o -nvkm-y += nvkm/subdev/secboot/gp108.o -nvkm-y += nvkm/subdev/secboot/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c deleted file mode 100644 index dc80985cf093..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" - -#include <core/firmware.h> - -/** - * Convenience function to duplicate a firmware file in memory and check that - * it has the required minimum size. - */ -void * -nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name, - size_t min_size) -{ - const struct firmware *fw; - void *blob; - int ret; - - ret = nvkm_firmware_get(subdev, name, &fw); - if (ret) - return ERR_PTR(ret); - if (fw->size < min_size) { - nvkm_error(subdev, "%s is smaller than expected size %zu\n", - name, min_size); - nvkm_firmware_put(fw); - return ERR_PTR(-EINVAL); - } - blob = kmemdup(fw->data, fw->size, GFP_KERNEL); - nvkm_firmware_put(fw); - if (!blob) - return ERR_PTR(-ENOMEM); - - return blob; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h deleted file mode 100644 index 73a2ac81ac69..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef __NVKM_SECBOOT_ACR_H__ -#define __NVKM_SECBOOT_ACR_H__ - -#include "priv.h" - -struct nvkm_acr; - -/** - * struct nvkm_acr_func - properties and functions specific to an ACR - * - * @load: make the ACR ready to run on the given secboot device - * @reset: reset the specified falcon - * @start: start the specified falcon (assumed to have been reset) - */ -struct nvkm_acr_func { - void (*dtor)(struct nvkm_acr *); - int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *); - int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool); - int (*load)(struct nvkm_acr *, struct nvkm_falcon *, - struct nvkm_gpuobj *, u64); - int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long); -}; - -/** - * struct nvkm_acr - instance of an ACR - * - * @boot_falcon: ID of the falcon that will perform secure boot - * @managed_falcons: bitfield of falcons managed by this ACR - * @optional_falcons: bitfield of falcons we can live without - */ -struct nvkm_acr { - const struct nvkm_acr_func *func; - const struct nvkm_subdev *subdev; - - enum nvkm_secboot_falcon boot_falcon; - unsigned long managed_falcons; - unsigned long optional_falcons; -}; - -void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t); - -struct nvkm_acr *acr_r352_new(unsigned long); -struct nvkm_acr *acr_r361_new(unsigned long); -struct nvkm_acr *acr_r364_new(unsigned long); -struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long); -struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long); -struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c deleted file mode 100644 index 7af971db91bc..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c +++ /dev/null @@ -1,1241 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r352.h" -#include "hs_ucode.h" - -#include <core/gpuobj.h> -#include <core/firmware.h> -#include <engine/falcon.h> -#include <subdev/pmu.h> -#include <core/msgqueue.h> -#include <engine/sec2.h> - -/** - * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor - * @signature: 16B signature for secure code. 0s if no secure code - * @ctx_dma: DMA context to be used by BL while loading code/data - * @code_dma_base: 256B-aligned Physical FB Address where code is located - * (falcon's $xcbase register) - * @non_sec_code_off: offset from code_dma_base where the non-secure code is - * located. The offset must be multiple of 256 to help perf - * @non_sec_code_size: the size of the nonSecure code part. - * @sec_code_off: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @sec_code_size: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @code_entry_point: code entry point which will be invoked by BL after - * code is loaded. - * @data_dma_base: 256B aligned Physical FB Address where data is located. - * (falcon's $xdbase register) - * @data_size: size of data block. Should be multiple of 256B - * - * Structure used by the bootloader to load the rest of the code. This has - * to be filled by host and copied into DMEM at offset provided in the - * hsflcn_bl_desc.bl_desc_dmem_load_off. - */ -struct acr_r352_flcn_bl_desc { - u32 reserved[4]; - u32 signature[4]; - u32 ctx_dma; - u32 code_dma_base; - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 sec_code_off; - u32 sec_code_size; - u32 code_entry_point; - u32 data_dma_base; - u32 data_size; - u32 code_dma_base1; - u32 data_dma_base1; -}; - -/** - * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image - */ -static void -acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - struct acr_r352_flcn_bl_desc *desc = _desc; - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - u64 base, addr_code, addr_data; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = (base + pdesc->app_resident_code_offset) >> 8; - addr_data = (base + pdesc->app_resident_data_offset) >> 8; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = lower_32_bits(addr_code); - desc->code_dma_base1 = upper_32_bits(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = lower_32_bits(addr_data); - desc->data_dma_base1 = upper_32_bits(addr_data); - desc->data_size = pdesc->app_resident_data_size; -} - - -/** - * struct hsflcn_acr_desc - data section of the HS firmware - * - * This header is to be copied at the beginning of DMEM by the HS bootloader. - * - * @signature: signature of ACR ucode - * @wpr_region_id: region ID holding the WPR header and its details - * @wpr_offset: offset from the WPR region holding the wpr header - * @regions: region descriptors - * @nonwpr_ucode_blob_size: size of LS blob - * @nonwpr_ucode_blob_start: FB location of LS blob is - */ -struct hsflcn_acr_desc { - union { - u8 reserved_dmem[0x200]; - u32 signatures[4]; - } ucode_reserved_space; - u32 wpr_region_id; - u32 wpr_offset; - u32 mmu_mem_range; -#define FLCN_ACR_MAX_REGIONS 2 - struct { - u32 no_regions; - struct { - u32 start_addr; - u32 end_addr; - u32 region_id; - u32 read_mask; - u32 write_mask; - u32 client_mask; - } region_props[FLCN_ACR_MAX_REGIONS]; - } regions; - u32 ucode_blob_size; - u64 ucode_blob_base __aligned(8); - struct { - u32 vpr_enabled; - u32 vpr_start; - u32 vpr_end; - u32 hdcp_policies; - } vpr_desc; -}; - - -/* - * Low-secure blob creation - */ - -/** - * struct acr_r352_lsf_lsb_header - LS firmware header - * @signature: signature to verify the firmware against - * @ucode_off: offset of the ucode blob in the WPR region. The ucode - * blob contains the bootloader, code and data of the - * LS falcon - * @ucode_size: size of the ucode blob, including bootloader - * @data_size: size of the ucode blob data - * @bl_code_size: size of the bootloader code - * @bl_imem_off: offset in imem of the bootloader - * @bl_data_off: offset of the bootloader data in WPR region - * @bl_data_size: size of the bootloader data - * @app_code_off: offset of the app code relative to ucode_off - * @app_code_size: size of the app code - * @app_data_off: offset of the app data relative to ucode_off - * @app_data_size: size of the app data - * @flags: flags for the secure bootloader - * - * This structure is written into the WPR region for each managed falcon. Each - * instance is referenced by the lsb_offset member of the corresponding - * lsf_wpr_header. - */ -struct acr_r352_lsf_lsb_header { - /** - * LS falcon signatures - * @prd_keys: signature to use in production mode - * @dgb_keys: signature to use in debug mode - * @b_prd_present: whether the production key is present - * @b_dgb_present: whether the debug key is present - * @falcon_id: ID of the falcon the ucode applies to - */ - struct { - u8 prd_keys[2][16]; - u8 dbg_keys[2][16]; - u32 b_prd_present; - u32 b_dbg_present; - u32 falcon_id; - } signature; - u32 ucode_off; - u32 ucode_size; - u32 data_size; - u32 bl_code_size; - u32 bl_imem_off; - u32 bl_data_off; - u32 bl_data_size; - u32 app_code_off; - u32 app_code_size; - u32 app_data_off; - u32 app_data_size; - u32 flags; -}; - -/** - * struct acr_r352_lsf_wpr_header - LS blob WPR Header - * @falcon_id: LS falcon ID - * @lsb_offset: offset of the lsb_lsf_header in the WPR region - * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon - * @lazy_bootstrap: skip bootstrapping by ACR - * @status: bootstrapping status - * - * An array of these is written at the beginning of the WPR region, one for - * each managed falcon. The array is terminated by an instance which falcon_id - * is LSF_FALCON_ID_INVALID. - */ -struct acr_r352_lsf_wpr_header { - u32 falcon_id; - u32 lsb_offset; - u32 bootstrap_owner; - u32 lazy_bootstrap; - u32 status; -#define LSF_IMAGE_STATUS_NONE 0 -#define LSF_IMAGE_STATUS_COPY 1 -#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 -#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 -#define LSF_IMAGE_STATUS_VALIDATION_DONE 4 -#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 -#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 -}; - -/** - * struct ls_ucode_img_r352 - ucode image augmented with r352 headers - */ -struct ls_ucode_img_r352 { - struct ls_ucode_img base; - - const struct acr_r352_lsf_func *func; - - struct acr_r352_lsf_wpr_header wpr_header; - struct acr_r352_lsf_lsb_header lsb_header; -}; -#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base) - -/** - * ls_ucode_img_load() - create a lsf_ucode_img and load it - */ -struct ls_ucode_img * -acr_r352_ls_ucode_img_load(const struct acr_r352 *acr, - const struct nvkm_secboot *sb, - enum nvkm_secboot_falcon falcon_id) -{ - const struct nvkm_subdev *subdev = acr->base.subdev; - const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id]; - struct ls_ucode_img_r352 *img; - int ret; - - img = kzalloc(sizeof(*img), GFP_KERNEL); - if (!img) - return ERR_PTR(-ENOMEM); - - img->base.falcon_id = falcon_id; - - ret = func->load(sb, func->version_max, &img->base); - if (ret < 0) { - kfree(img->base.ucode_data); - kfree(img->base.sig); - kfree(img); - return ERR_PTR(ret); - } - - img->func = func->version[ret]; - - /* Check that the signature size matches our expectations... */ - if (img->base.sig_size != sizeof(img->lsb_header.signature)) { - nvkm_error(subdev, "invalid signature size for %s falcon!\n", - nvkm_secboot_falcon_name[falcon_id]); - return ERR_PTR(-EINVAL); - } - - /* Copy signature to the right place */ - memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size); - - /* not needed? the signature should already have the right value */ - img->lsb_header.signature.falcon_id = falcon_id; - - return &img->base; -} - -#define LSF_LSB_HEADER_ALIGN 256 -#define LSF_BL_DATA_ALIGN 256 -#define LSF_BL_DATA_SIZE_ALIGN 256 -#define LSF_BL_CODE_SIZE_ALIGN 256 -#define LSF_UCODE_DATA_ALIGN 4096 - -/** - * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image - * @acr: ACR to use - * @img: image to generate for - * @offset: offset in the WPR region where this image starts - * - * Allocate space in the WPR area from offset and write the WPR and LSB headers - * accordingly. - * - * Return: offset at the end of this image. - */ -static u32 -acr_r352_ls_img_fill_headers(struct acr_r352 *acr, - struct ls_ucode_img_r352 *img, u32 offset) -{ - struct ls_ucode_img *_img = &img->base; - struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header; - struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header; - struct ls_ucode_img_desc *desc = &_img->ucode_desc; - const struct acr_r352_lsf_func *func = img->func; - - /* Fill WPR header */ - whdr->falcon_id = _img->falcon_id; - whdr->bootstrap_owner = acr->base.boot_falcon; - whdr->status = LSF_IMAGE_STATUS_COPY; - - /* Skip bootstrapping falcons started by someone else than ACR */ - if (acr->lazy_bootstrap & BIT(_img->falcon_id)) - whdr->lazy_bootstrap = 1; - - /* Align, save off, and include an LSB header size */ - offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); - whdr->lsb_offset = offset; - offset += sizeof(*lhdr); - - /* - * Align, save off, and include the original (static) ucode - * image size - */ - offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); - _img->ucode_off = lhdr->ucode_off = offset; - offset += _img->ucode_size; - - /* - * For falcons that use a boot loader (BL), we append a loader - * desc structure on the end of the ucode image and consider - * this the boot loader data. The host will then copy the loader - * desc args to this space within the WPR region (before locking - * down) and the HS bin will then copy them to DMEM 0 for the - * loader. - */ - lhdr->bl_code_size = ALIGN(desc->bootloader_size, - LSF_BL_CODE_SIZE_ALIGN); - lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, - LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; - lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + - lhdr->bl_code_size - lhdr->ucode_size; - /* - * Though the BL is located at 0th offset of the image, the VA - * is different to make sure that it doesn't collide the actual - * OS VA range - */ - lhdr->bl_imem_off = desc->bootloader_imem_offset; - lhdr->app_code_off = desc->app_start_offset + - desc->app_resident_code_offset; - lhdr->app_code_size = desc->app_resident_code_size; - lhdr->app_data_off = desc->app_start_offset + - desc->app_resident_data_offset; - lhdr->app_data_size = desc->app_resident_data_size; - - lhdr->flags = func->lhdr_flags; - if (_img->falcon_id == acr->base.boot_falcon) - lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX; - - /* Align and save off BL descriptor size */ - lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN); - - /* - * Align, save off, and include the additional BL data - */ - offset = ALIGN(offset, LSF_BL_DATA_ALIGN); - lhdr->bl_data_off = offset; - offset += lhdr->bl_data_size; - - return offset; -} - -/** - * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images - */ -int -acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) -{ - struct ls_ucode_img_r352 *img; - struct list_head *l; - u32 count = 0; - u32 offset; - - /* Count the number of images to manage */ - list_for_each(l, imgs) - count++; - - /* - * Start with an array of WPR headers at the base of the WPR. - * The expectation here is that the secure falcon will do a single DMA - * read of this array and cache it internally so it's ok to pack these. - * Also, we add 1 to the falcon count to indicate the end of the array. - */ - offset = sizeof(img->wpr_header) * (count + 1); - - /* - * Walk the managed falcons, accounting for the LSB structs - * as well as the ucode images. - */ - list_for_each_entry(img, imgs, base.node) { - offset = acr_r352_ls_img_fill_headers(acr, img, offset); - } - - return offset; -} - -/** - * acr_r352_ls_write_wpr - write the WPR blob contents - */ -int -acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, - struct nvkm_gpuobj *wpr_blob, u64 wpr_addr) -{ - struct ls_ucode_img *_img; - u32 pos = 0; - u32 max_desc_size = 0; - u8 *gdesc; - - /* Figure out how large we need gdesc to be. */ - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - max_desc_size = max(max_desc_size, ls_func->bl_desc_size); - } - - gdesc = kmalloc(max_desc_size, GFP_KERNEL); - if (!gdesc) - return -ENOMEM; - - nvkm_kmap(wpr_blob); - - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, - sizeof(img->wpr_header)); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, - &img->lsb_header, sizeof(img->lsb_header)); - - /* Generate and write BL descriptor */ - memset(gdesc, 0, ls_func->bl_desc_size); - ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off, - gdesc, ls_func->bl_desc_size); - - /* Copy ucode */ - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, - _img->ucode_data, _img->ucode_size); - - pos += sizeof(img->wpr_header); - } - - nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); - - nvkm_done(wpr_blob); - - kfree(gdesc); - - return 0; -} - -/* Both size and address of WPR need to be 256K-aligned */ -#define WPR_ALIGNMENT 0x40000 -/** - * acr_r352_prepare_ls_blob() - prepare the LS blob - * - * For each securely managed falcon, load the FW, signatures and bootloaders and - * prepare a ucode blob. Then, compute the offsets in the WPR region for each - * blob, and finally write the headers and ucode blobs into a GPU object that - * will be copied into the WPR region by the HS firmware. - */ -static int -acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = acr->base.subdev; - struct list_head imgs; - struct ls_ucode_img *img, *t; - unsigned long managed_falcons = acr->base.managed_falcons; - u64 wpr_addr = sb->wpr_addr; - u32 wpr_size = sb->wpr_size; - int managed_count = 0; - u32 image_wpr_size, ls_blob_size; - int falcon_id; - int ret; - - INIT_LIST_HEAD(&imgs); - - /* Load all LS blobs */ - for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { - struct ls_ucode_img *img; - - img = acr->func->ls_ucode_img_load(acr, sb, falcon_id); - if (IS_ERR(img)) { - if (acr->base.optional_falcons & BIT(falcon_id)) { - managed_falcons &= ~BIT(falcon_id); - nvkm_info(subdev, "skipping %s falcon...\n", - nvkm_secboot_falcon_name[falcon_id]); - continue; - } - ret = PTR_ERR(img); - goto cleanup; - } - - list_add_tail(&img->node, &imgs); - managed_count++; - } - - /* Commit the actual list of falcons we will manage from now on */ - acr->base.managed_falcons = managed_falcons; - - /* - * If the boot falcon has a firmare, let it manage the bootstrap of other - * falcons. - */ - if (acr->func->ls_func[acr->base.boot_falcon] && - (managed_falcons & BIT(acr->base.boot_falcon))) { - for_each_set_bit(falcon_id, &managed_falcons, - NVKM_SECBOOT_FALCON_END) { - if (falcon_id == acr->base.boot_falcon) - continue; - - acr->lazy_bootstrap |= BIT(falcon_id); - } - } - - /* - * Fill the WPR and LSF headers with the right offsets and compute - * required WPR size - */ - image_wpr_size = acr->func->ls_fill_headers(acr, &imgs); - image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT); - - ls_blob_size = image_wpr_size; - - /* - * If we need a shadow area, allocate twice the size and use the - * upper half as WPR - */ - if (wpr_size == 0 && acr->func->shadow_blob) - ls_blob_size *= 2; - - /* Allocate GPU object that will contain the WPR region */ - ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT, - false, NULL, &acr->ls_blob); - if (ret) - goto cleanup; - - nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n", - managed_count, image_wpr_size); - - /* If WPR address and size are not fixed, set them to fit the LS blob */ - if (wpr_size == 0) { - wpr_addr = acr->ls_blob->addr; - if (acr->func->shadow_blob) - wpr_addr += acr->ls_blob->size / 2; - - wpr_size = image_wpr_size; - /* - * But if the WPR region is set by the bootloader, it is illegal for - * the HS blob to be larger than this region. - */ - } else if (image_wpr_size > wpr_size) { - nvkm_error(subdev, "WPR region too small for FW blob!\n"); - nvkm_error(subdev, "required: %dB\n", image_wpr_size); - nvkm_error(subdev, "available: %dB\n", wpr_size); - ret = -ENOSPC; - goto cleanup; - } - - /* Write LS blob */ - ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr); - if (ret) - nvkm_gpuobj_del(&acr->ls_blob); - -cleanup: - list_for_each_entry_safe(img, t, &imgs, node) { - kfree(img->ucode_data); - kfree(img->sig); - kfree(img); - } - - return ret; -} - - - - -void -acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, - void *_desc) -{ - struct hsflcn_acr_desc *desc = _desc; - struct nvkm_gpuobj *ls_blob = acr->ls_blob; - - /* WPR region information if WPR is not fixed */ - if (sb->wpr_size == 0) { - u64 wpr_start = ls_blob->addr; - u64 wpr_end = wpr_start + ls_blob->size; - - desc->wpr_region_id = 1; - desc->regions.no_regions = 2; - desc->regions.region_props[0].start_addr = wpr_start >> 8; - desc->regions.region_props[0].end_addr = wpr_end >> 8; - desc->regions.region_props[0].region_id = 1; - desc->regions.region_props[0].read_mask = 0xf; - desc->regions.region_props[0].write_mask = 0xc; - desc->regions.region_props[0].client_mask = 0x2; - } else { - desc->ucode_blob_base = ls_blob->addr; - desc->ucode_blob_size = ls_blob->size; - } -} - -static void -acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, - u64 offset) -{ - struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc; - u64 addr_code, addr_data; - - addr_code = offset >> 8; - addr_data = (offset + hdr->data_dma_base) >> 8; - - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; - bl_desc->code_dma_base = lower_32_bits(addr_code); - bl_desc->non_sec_code_off = hdr->non_sec_code_off; - bl_desc->non_sec_code_size = hdr->non_sec_code_size; - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); - bl_desc->code_entry_point = 0; - bl_desc->data_dma_base = lower_32_bits(addr_data); - bl_desc->data_size = hdr->data_size; -} - -/** - * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor - * - * @sb secure boot instance to prepare for - * @fw name of the HS firmware to load - * @blob pointer to gpuobj that will be allocated to receive the HS FW payload - * @bl_desc pointer to the BL descriptor to write for this firmware - * @patch whether we should patch the HS descriptor (only for HS loaders) - */ -static int -acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb, - const char *fw, struct nvkm_gpuobj **blob, - struct hsf_load_header *load_header, bool patch) -{ - struct nvkm_subdev *subdev = &sb->subdev; - void *acr_image; - struct fw_bin_header *hsbin_hdr; - struct hsf_fw_header *fw_hdr; - struct hsf_load_header *load_hdr; - void *acr_data; - int ret; - - acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw); - if (IS_ERR(acr_image)) - return PTR_ERR(acr_image); - - hsbin_hdr = acr_image; - fw_hdr = acr_image + hsbin_hdr->header_offset; - load_hdr = acr_image + fw_hdr->hdr_offset; - acr_data = acr_image + hsbin_hdr->data_offset; - - /* Patch descriptor with WPR information? */ - if (patch) { - struct hsflcn_acr_desc *desc; - - desc = acr_data + load_hdr->data_dma_base; - acr->func->fixup_hs_desc(acr, sb, desc); - } - - if (load_hdr->num_apps > ACR_R352_MAX_APPS) { - nvkm_error(subdev, "more apps (%d) than supported (%d)!", - load_hdr->num_apps, ACR_R352_MAX_APPS); - ret = -EINVAL; - goto cleanup; - } - memcpy(load_header, load_hdr, sizeof(*load_header) + - (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps)); - - /* Create ACR blob and copy HS data to it */ - ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256), - 0x1000, false, NULL, blob); - if (ret) - goto cleanup; - - nvkm_kmap(*blob); - nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size); - nvkm_done(*blob); - -cleanup: - kfree(acr_image); - - return ret; -} - -/** - * acr_r352_load_blobs - load blobs common to all ACR V1 versions. - * - * This includes the LS blob, HS ucode loading blob, and HS bootloader. - * - * The HS ucode unload blob is only used on dGPU if the WPR region is variable. - */ -int -acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - int ret; - - /* Firmware already loaded? */ - if (acr->firmware_ok) - return 0; - - /* Load and prepare the managed falcon's firmwares */ - ret = acr_r352_prepare_ls_blob(acr, sb); - if (ret) - return ret; - - /* Load the HS firmware that will load the LS firmwares */ - if (!acr->load_blob) { - ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load", - &acr->load_blob, - &acr->load_bl_header, true); - if (ret) - return ret; - } - - /* If the ACR region is dynamically programmed, we need an unload FW */ - if (sb->wpr_size == 0) { - ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload", - &acr->unload_blob, - &acr->unload_bl_header, false); - if (ret) - return ret; - } - - /* Load the HS firmware bootloader */ - if (!acr->hsbl_blob) { - acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0); - if (IS_ERR(acr->hsbl_blob)) { - ret = PTR_ERR(acr->hsbl_blob); - acr->hsbl_blob = NULL; - return ret; - } - - if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) { - acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev, - "acr/unload_bl", 0); - if (IS_ERR(acr->hsbl_unload_blob)) { - ret = PTR_ERR(acr->hsbl_unload_blob); - acr->hsbl_unload_blob = NULL; - return ret; - } - } else { - acr->hsbl_unload_blob = acr->hsbl_blob; - } - } - - acr->firmware_ok = true; - nvkm_debug(&sb->subdev, "LS blob successfully created\n"); - - return 0; -} - -/** - * acr_r352_load() - prepare HS falcon to run the specified blob, mapped. - * - * Returns the start address to use, or a negative error value. - */ -static int -acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, - struct nvkm_gpuobj *blob, u64 offset) -{ - struct acr_r352 *acr = acr_r352(_acr); - const u32 bl_desc_size = acr->func->hs_bl_desc_size; - const struct hsf_load_header *load_hdr; - struct fw_bin_header *bl_hdr; - struct fw_bl_desc *hsbl_desc; - void *bl, *blob_data, *hsbl_code, *hsbl_data; - u32 code_size; - u8 *bl_desc; - - bl_desc = kzalloc(bl_desc_size, GFP_KERNEL); - if (!bl_desc) - return -ENOMEM; - - /* Find the bootloader descriptor for our blob and copy it */ - if (blob == acr->load_blob) { - load_hdr = &acr->load_bl_header; - bl = acr->hsbl_blob; - } else if (blob == acr->unload_blob) { - load_hdr = &acr->unload_bl_header; - bl = acr->hsbl_unload_blob; - } else { - nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); - kfree(bl_desc); - return -EINVAL; - } - - bl_hdr = bl; - hsbl_desc = bl + bl_hdr->header_offset; - blob_data = bl + bl_hdr->data_offset; - hsbl_code = blob_data + hsbl_desc->code_off; - hsbl_data = blob_data + hsbl_desc->data_off; - code_size = ALIGN(hsbl_desc->code_size, 256); - - /* - * Copy HS bootloader data - */ - nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0); - - /* Copy HS bootloader code to end of IMEM */ - nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size, - code_size, hsbl_desc->start_tag, 0, false); - - /* Generate the BL header */ - acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset); - - /* - * Copy HS BL header where the HS descriptor expects it to be - */ - nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off, - bl_desc_size, 0); - - kfree(bl_desc); - return hsbl_desc->start_tag << 8; -} - -static int -acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - int i; - - /* Run the unload blob to unprotect the WPR region */ - if (acr->unload_blob && sb->wpr_set) { - int ret; - - nvkm_debug(subdev, "running HS unload blob\n"); - ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon); - if (ret < 0) - return ret; - /* - * Unload blob will return this error code - it is not an error - * and the expected behavior on RM as well - */ - if (ret && ret != 0x1d) { - nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret); - return -EINVAL; - } - nvkm_debug(subdev, "HS unload blob completed\n"); - } - - for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++) - acr->falcon_state[i] = NON_SECURE; - - sb->wpr_set = false; - - return 0; -} - -/** - * Check if the WPR region has been indeed set by the ACR firmware, and - * matches where it should be. - */ -static bool -acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = &sb->subdev; - const struct nvkm_device *device = subdev->device; - u64 wpr_lo, wpr_hi; - u64 wpr_range_lo, wpr_range_hi; - - nvkm_wr32(device, 0x100cd4, 0x2); - wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff); - wpr_lo <<= 8; - nvkm_wr32(device, 0x100cd4, 0x3); - wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff); - wpr_hi <<= 8; - - if (sb->wpr_size != 0) { - wpr_range_lo = sb->wpr_addr; - wpr_range_hi = wpr_range_lo + sb->wpr_size; - } else { - wpr_range_lo = acr->ls_blob->addr; - wpr_range_hi = wpr_range_lo + acr->ls_blob->size; - } - - return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi && - wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi); -} - -static int -acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = &sb->subdev; - unsigned long managed_falcons = acr->base.managed_falcons; - int falcon_id; - int ret; - - if (sb->wpr_set) - return 0; - - /* Make sure all blobs are ready */ - ret = acr_r352_load_blobs(acr, sb); - if (ret) - return ret; - - nvkm_debug(subdev, "running HS load blob\n"); - ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon); - /* clear halt interrupt */ - nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10); - sb->wpr_set = acr_r352_wpr_is_set(acr, sb); - if (ret < 0) { - return ret; - } else if (ret > 0) { - nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret); - return -EINVAL; - } - nvkm_debug(subdev, "HS load blob completed\n"); - /* WPR must be set at this point */ - if (!sb->wpr_set) { - nvkm_error(subdev, "ACR blob completed but WPR not set!\n"); - return -EINVAL; - } - - /* Run LS firmwares post_run hooks */ - for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { - const struct acr_r352_ls_func *func = - acr->func->ls_func[falcon_id]; - - if (func->post_run) { - ret = func->post_run(&acr->base, sb); - if (ret) - return ret; - } - } - - return 0; -} - -/** - * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded - * - * Reset is done by re-executing secure boot from scratch, with lazy bootstrap - * disabled. This has the effect of making all managed falcons ready-to-run. - */ -static int -acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb, - unsigned long falcon_mask) -{ - int falcon; - int ret; - - /* - * Perform secure boot each time we are called on FECS. Since only FECS - * and GPCCS are managed and started together, this ought to be safe. - */ - if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS))) - goto end; - - ret = acr_r352_shutdown(acr, sb); - if (ret) - return ret; - - ret = acr_r352_bootstrap(acr, sb); - if (ret) - return ret; - -end: - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) { - acr->falcon_state[falcon] = RESET; - } - return 0; -} - -/* - * acr_r352_reset() - execute secure boot from the prepared state - * - * Load the HS bootloader and ask the falcon to run it. This will in turn - * load the HS firmware and run it, so once the falcon stops all the managed - * falcons should have their LS firmware loaded and be ready to run. - */ -static int -acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, - unsigned long falcon_mask) -{ - struct acr_r352 *acr = acr_r352(_acr); - struct nvkm_msgqueue *queue; - int falcon; - bool wpr_already_set = sb->wpr_set; - int ret; - - /* Make sure secure boot is performed */ - ret = acr_r352_bootstrap(acr, sb); - if (ret) - return ret; - - /* No PMU interface? */ - if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) { - /* Redo secure boot entirely if it was already done */ - if (wpr_already_set) - return acr_r352_reset_nopmu(acr, sb, falcon_mask); - /* Else return the result of the initial invokation */ - else - return ret; - } - - switch (_acr->boot_falcon) { - case NVKM_SECBOOT_FALCON_PMU: - queue = sb->subdev.device->pmu->queue; - break; - case NVKM_SECBOOT_FALCON_SEC2: - queue = sb->subdev.device->sec2->queue; - break; - default: - return -EINVAL; - } - - /* Otherwise just ask the LS firmware to reset the falcon */ - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) - nvkm_debug(&sb->subdev, "resetting %s falcon\n", - nvkm_secboot_falcon_name[falcon]); - ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask); - if (ret) { - nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret); - return ret; - } - nvkm_debug(&sb->subdev, "falcon reset done\n"); - - return 0; -} - -static int -acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend) -{ - struct acr_r352 *acr = acr_r352(_acr); - - return acr_r352_shutdown(acr, sb); -} - -static void -acr_r352_dtor(struct nvkm_acr *_acr) -{ - struct acr_r352 *acr = acr_r352(_acr); - - nvkm_gpuobj_del(&acr->unload_blob); - - if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU) - kfree(acr->hsbl_unload_blob); - kfree(acr->hsbl_blob); - nvkm_gpuobj_del(&acr->load_blob); - nvkm_gpuobj_del(&acr->ls_blob); - - kfree(acr); -} - -static const struct acr_r352_lsf_func -acr_r352_ls_fecs_func_0 = { - .generate_bl_desc = acr_r352_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r352_ls_fecs_func = { - .load = acr_ls_ucode_load_fecs, - .version_max = 0, - .version = { - &acr_r352_ls_fecs_func_0, - } -}; - -static const struct acr_r352_lsf_func -acr_r352_ls_gpccs_func_0 = { - .generate_bl_desc = acr_r352_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), - /* GPCCS will be loaded using PRI */ - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, -}; - -static const struct acr_r352_ls_func -acr_r352_ls_gpccs_func = { - .load = acr_ls_ucode_load_gpccs, - .version_max = 0, - .version = { - &acr_r352_ls_gpccs_func_0, - } -}; - - - -/** - * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor - * @dma_idx: DMA context to be used by BL while loading code/data - * @code_dma_base: 256B-aligned Physical FB Address where code is located - * @total_code_size: total size of the code part in the ucode - * @code_size_to_load: size of the code part to load in PMU IMEM. - * @code_entry_point: entry point in the code. - * @data_dma_base: Physical FB address where data part of ucode is located - * @data_size: Total size of the data portion. - * @overlay_dma_base: Physical Fb address for resident code present in ucode - * @argc: Total number of args - * @argv: offset where args are copied into PMU's DMEM. - * - * Structure used by the PMU bootloader to load the rest of the code - */ -struct acr_r352_pmu_bl_desc { - u32 dma_idx; - u32 code_dma_base; - u32 code_size_total; - u32 code_size_to_load; - u32 code_entry_point; - u32 data_dma_base; - u32 data_size; - u32 overlay_dma_base; - u32 argc; - u32 argv; - u16 code_dma_base1; - u16 data_dma_base1; - u16 overlay_dma_base1; -}; - -/** - * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image - * - */ -static void -acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; - struct acr_r352_pmu_bl_desc *desc = _desc; - u64 base; - u64 addr_code; - u64 addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = (base + pdesc->app_resident_code_offset) >> 8; - addr_data = (base + pdesc->app_resident_data_offset) >> 8; - addr_args = pmu->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->dma_idx = FALCON_DMAIDX_UCODE; - desc->code_dma_base = lower_32_bits(addr_code); - desc->code_dma_base1 = upper_32_bits(addr_code); - desc->code_size_total = pdesc->app_size; - desc->code_size_to_load = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = lower_32_bits(addr_data); - desc->data_dma_base1 = upper_32_bits(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->overlay_dma_base = lower_32_bits(addr_code); - desc->overlay_dma_base1 = upper_32_bits(addr_code); - desc->argc = 1; - desc->argv = addr_args; -} - -static const struct acr_r352_lsf_func -acr_r352_ls_pmu_func_0 = { - .generate_bl_desc = acr_r352_generate_pmu_bl_desc, - .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc), -}; - -static const struct acr_r352_ls_func -acr_r352_ls_pmu_func = { - .load = acr_ls_ucode_load_pmu, - .post_run = acr_ls_pmu_post_run, - .version_max = 0, - .version = { - &acr_r352_ls_pmu_func_0, - } -}; - -const struct acr_r352_func -acr_r352_func = { - .fixup_hs_desc = acr_r352_fixup_hs_desc, - .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, - .ls_fill_headers = acr_r352_ls_fill_headers, - .ls_write_wpr = acr_r352_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func, - }, -}; - -static const struct nvkm_acr_func -acr_r352_base_func = { - .dtor = acr_r352_dtor, - .fini = acr_r352_fini, - .load = acr_r352_load, - .reset = acr_r352_reset, -}; - -struct nvkm_acr * -acr_r352_new_(const struct acr_r352_func *func, - enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - struct acr_r352 *acr; - int i; - - /* Check that all requested falcons are supported */ - for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) { - if (!func->ls_func[i]) - return ERR_PTR(-ENOTSUPP); - } - - acr = kzalloc(sizeof(*acr), GFP_KERNEL); - if (!acr) - return ERR_PTR(-ENOMEM); - - acr->base.boot_falcon = boot_falcon; - acr->base.managed_falcons = managed_falcons; - acr->base.func = &acr_r352_base_func; - acr->func = func; - - return &acr->base; -} - -struct nvkm_acr * -acr_r352_new(unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU, - managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h deleted file mode 100644 index e516cab849dd..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef __NVKM_SECBOOT_ACR_R352_H__ -#define __NVKM_SECBOOT_ACR_R352_H__ - -#include "acr.h" -#include "ls_ucode.h" -#include "hs_ucode.h" - -struct ls_ucode_img; - -#define ACR_R352_MAX_APPS 8 - -#define LSF_FLAG_LOAD_CODE_AT_0 1 -#define LSF_FLAG_DMACTL_REQ_CTX 4 -#define LSF_FLAG_FORCE_PRIV_LOAD 8 - -static inline u32 -hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app) -{ - return hdr->apps[app]; -} - -static inline u32 -hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app) -{ - return hdr->apps[hdr->num_apps + app]; -} - -/** - * struct acr_r352_lsf_func - manages a specific LS firmware version - * - * @generate_bl_desc: function called on a block of bl_desc_size to generate the - * proper bootloader descriptor for this LS firmware - * @bl_desc_size: size of the bootloader descriptor - * @lhdr_flags: LS flags - */ -struct acr_r352_lsf_func { - void (*generate_bl_desc)(const struct nvkm_acr *, - const struct ls_ucode_img *, u64, void *); - u32 bl_desc_size; - u32 lhdr_flags; -}; - -/** - * struct acr_r352_ls_func - manages a single LS falcon - * - * @load: load the external firmware into a ls_ucode_img - * @post_run: hook called right after the ACR is executed - */ -struct acr_r352_ls_func { - int (*load)(const struct nvkm_secboot *, int maxver, - struct ls_ucode_img *); - int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *); - int version_max; - const struct acr_r352_lsf_func *version[]; -}; - -struct acr_r352; - -/** - * struct acr_r352_func - manages nuances between ACR versions - * - * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate - * the proper HS bootloader descriptor - * @hs_bl_desc_size: size of the HS bootloader descriptor - */ -struct acr_r352_func { - void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *, - u64); - void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *); - u32 hs_bl_desc_size; - bool shadow_blob; - - struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *, - const struct nvkm_secboot *, - enum nvkm_secboot_falcon); - int (*ls_fill_headers)(struct acr_r352 *, struct list_head *); - int (*ls_write_wpr)(struct acr_r352 *, struct list_head *, - struct nvkm_gpuobj *, u64); - - const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END]; -}; - -/** - * struct acr_r352 - ACR data for driver release 352 (and beyond) - */ -struct acr_r352 { - struct nvkm_acr base; - const struct acr_r352_func *func; - - /* - * HS FW - lock WPR region (dGPU only) and load LS FWs - * on Tegra the HS FW copies the LS blob into the fixed WPR instead - */ - struct nvkm_gpuobj *load_blob; - struct { - struct hsf_load_header load_bl_header; - u32 __load_apps[ACR_R352_MAX_APPS * 2]; - }; - - /* HS FW - unlock WPR region (dGPU only) */ - struct nvkm_gpuobj *unload_blob; - struct { - struct hsf_load_header unload_bl_header; - u32 __unload_apps[ACR_R352_MAX_APPS * 2]; - }; - - /* HS bootloader */ - void *hsbl_blob; - - /* HS bootloader for unload blob, if using a different falcon */ - void *hsbl_unload_blob; - - /* LS FWs, to be loaded by the HS ACR */ - struct nvkm_gpuobj *ls_blob; - - /* Firmware already loaded? */ - bool firmware_ok; - - /* Falcons to lazy-bootstrap */ - u32 lazy_bootstrap; - - /* To keep track of the state of all managed falcons */ - enum { - /* In non-secure state, no firmware loaded, no privileges*/ - NON_SECURE = 0, - /* In low-secure mode and ready to be started */ - RESET, - /* In low-secure mode and running */ - RUNNING, - } falcon_state[NVKM_SECBOOT_FALCON_END]; -}; -#define acr_r352(acr) container_of(acr, struct acr_r352, base) - -struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *, - enum nvkm_secboot_falcon, unsigned long); - -struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *, - const struct nvkm_secboot *, - enum nvkm_secboot_falcon); -int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *); -int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *, - struct nvkm_gpuobj *, u64); - -void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c deleted file mode 100644 index f6b2d20d7fc3..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r361.h" - -#include <engine/falcon.h> -#include <core/msgqueue.h> -#include <subdev/pmu.h> -#include <engine/sec2.h> - -static void -acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - struct acr_r361_flcn_bl_desc *desc = _desc; - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - u64 base, addr_code, addr_data; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; -} - -void -acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, - u64 offset) -{ - struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc; - - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; - bl_desc->code_dma_base = u64_to_flcn64(offset); - bl_desc->non_sec_code_off = hdr->non_sec_code_off; - bl_desc->non_sec_code_size = hdr->non_sec_code_size; - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); - bl_desc->code_entry_point = 0; - bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); - bl_desc->data_size = hdr->data_size; -} - -static const struct acr_r352_lsf_func -acr_r361_ls_fecs_func_0 = { - .generate_bl_desc = acr_r361_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r361_ls_fecs_func = { - .load = acr_ls_ucode_load_fecs, - .version_max = 0, - .version = { - &acr_r361_ls_fecs_func_0, - } -}; - -static const struct acr_r352_lsf_func -acr_r361_ls_gpccs_func_0 = { - .generate_bl_desc = acr_r361_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - /* GPCCS will be loaded using PRI */ - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, -}; - -const struct acr_r352_ls_func -acr_r361_ls_gpccs_func = { - .load = acr_ls_ucode_load_gpccs, - .version_max = 0, - .version = { - &acr_r361_ls_gpccs_func_0, - } -}; - -struct acr_r361_pmu_bl_desc { - u32 reserved; - u32 dma_idx; - struct flcn_u64 code_dma_base; - u32 total_code_size; - u32 code_size_to_load; - u32 code_entry_point; - struct flcn_u64 data_dma_base; - u32 data_size; - struct flcn_u64 overlay_dma_base; - u32 argc; - u32 argv; -}; - -static void -acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; - struct acr_r361_pmu_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = pmu->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->dma_idx = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->total_code_size = pdesc->app_size; - desc->code_size_to_load = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->overlay_dma_base = u64_to_flcn64(addr_code); - desc->argc = 1; - desc->argv = addr_args; -} - -static const struct acr_r352_lsf_func -acr_r361_ls_pmu_func_0 = { - .generate_bl_desc = acr_r361_generate_pmu_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r361_ls_pmu_func = { - .load = acr_ls_ucode_load_pmu, - .post_run = acr_ls_pmu_post_run, - .version_max = 0, - .version = { - &acr_r361_ls_pmu_func_0, - } -}; - -static void -acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_sec2 *sec = acr->subdev->device->sec2; - struct acr_r361_pmu_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - /* For some reason we should not add app_resident_code_offset here */ - addr_code = base; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = sec->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->total_code_size = pdesc->app_size; - desc->code_size_to_load = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->overlay_dma_base = u64_to_flcn64(addr_code); - desc->argc = 1; - /* args are stored at the beginning of EMEM */ - desc->argv = 0x01000000; -} - -const struct acr_r352_lsf_func -acr_r361_ls_sec2_func_0 = { - .generate_bl_desc = acr_r361_generate_sec2_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc), -}; - -static const struct acr_r352_ls_func -acr_r361_ls_sec2_func = { - .load = acr_ls_ucode_load_sec2, - .post_run = acr_ls_sec2_post_run, - .version_max = 0, - .version = { - &acr_r361_ls_sec2_func_0, - } -}; - - -const struct acr_r352_func -acr_r361_func = { - .fixup_hs_desc = acr_r352_fixup_hs_desc, - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, - .ls_fill_headers = acr_r352_ls_fill_headers, - .ls_write_wpr = acr_r352_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func, - }, -}; - -struct nvkm_acr * -acr_r361_new(unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU, - managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h deleted file mode 100644 index 38dec93779c8..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_ACR_R361_H__ -#define __NVKM_SECBOOT_ACR_R361_H__ - -#include "acr_r352.h" - -/** - * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor - * @signature: 16B signature for secure code. 0s if no secure code - * @ctx_dma: DMA context to be used by BL while loading code/data - * @code_dma_base: 256B-aligned Physical FB Address where code is located - * (falcon's $xcbase register) - * @non_sec_code_off: offset from code_dma_base where the non-secure code is - * located. The offset must be multiple of 256 to help perf - * @non_sec_code_size: the size of the nonSecure code part. - * @sec_code_off: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @sec_code_size: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @code_entry_point: code entry point which will be invoked by BL after - * code is loaded. - * @data_dma_base: 256B aligned Physical FB Address where data is located. - * (falcon's $xdbase register) - * @data_size: size of data block. Should be multiple of 256B - * - * Structure used by the bootloader to load the rest of the code. This has - * to be filled by host and copied into DMEM at offset provided in the - * hsflcn_bl_desc.bl_desc_dmem_load_off. - */ -struct acr_r361_flcn_bl_desc { - u32 reserved[4]; - u32 signature[4]; - u32 ctx_dma; - struct flcn_u64 code_dma_base; - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 sec_code_off; - u32 sec_code_size; - u32 code_entry_point; - struct flcn_u64 data_dma_base; - u32 data_size; -}; - -void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64); - -extern const struct acr_r352_ls_func acr_r361_ls_fecs_func; -extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func; -extern const struct acr_r352_ls_func acr_r361_ls_pmu_func; -extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0; -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c deleted file mode 100644 index 30cf04109991..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r361.h" - -#include <core/gpuobj.h> - -/* - * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem - * parameter. - */ - -struct acr_r364_hsflcn_desc { - union { - u8 reserved_dmem[0x200]; - u32 signatures[4]; - } ucode_reserved_space; - u32 wpr_region_id; - u32 wpr_offset; - u32 mmu_memory_range; - struct { - u32 no_regions; - struct { - u32 start_addr; - u32 end_addr; - u32 region_id; - u32 read_mask; - u32 write_mask; - u32 client_mask; - u32 shadow_mem_start_addr; - } region_props[2]; - } regions; - u32 ucode_blob_size; - u64 ucode_blob_base __aligned(8); - struct { - u32 vpr_enabled; - u32 vpr_start; - u32 vpr_end; - u32 hdcp_policies; - } vpr_desc; -}; - -static void -acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, - void *_desc) -{ - struct acr_r364_hsflcn_desc *desc = _desc; - struct nvkm_gpuobj *ls_blob = acr->ls_blob; - - /* WPR region information if WPR is not fixed */ - if (sb->wpr_size == 0) { - u64 wpr_start = ls_blob->addr; - u64 wpr_end = ls_blob->addr + ls_blob->size; - - if (acr->func->shadow_blob) - wpr_start += ls_blob->size / 2; - - desc->wpr_region_id = 1; - desc->regions.no_regions = 2; - desc->regions.region_props[0].start_addr = wpr_start >> 8; - desc->regions.region_props[0].end_addr = wpr_end >> 8; - desc->regions.region_props[0].region_id = 1; - desc->regions.region_props[0].read_mask = 0xf; - desc->regions.region_props[0].write_mask = 0xc; - desc->regions.region_props[0].client_mask = 0x2; - if (acr->func->shadow_blob) - desc->regions.region_props[0].shadow_mem_start_addr = - ls_blob->addr >> 8; - else - desc->regions.region_props[0].shadow_mem_start_addr = 0; - } else { - desc->ucode_blob_base = ls_blob->addr; - desc->ucode_blob_size = ls_blob->size; - } -} - -const struct acr_r352_func -acr_r364_func = { - .fixup_hs_desc = acr_r364_fixup_hs_desc, - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, - .ls_fill_headers = acr_r352_ls_fill_headers, - .ls_write_wpr = acr_r352_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, - }, -}; - - -struct nvkm_acr * -acr_r364_new(unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU, - managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c deleted file mode 100644 index 472ced29da7e..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r367.h" -#include "acr_r361.h" -#include "acr_r370.h" - -#include <core/gpuobj.h> - -/* - * r367 ACR: new LS signature format requires a rewrite of LS firmware and - * blob creation functions. Also the hsflcn_desc layout has changed slightly. - */ - -#define LSF_LSB_DEPMAP_SIZE 11 - -/** - * struct acr_r367_lsf_lsb_header - LS firmware header - * - * See also struct acr_r352_lsf_lsb_header for documentation. - */ -struct acr_r367_lsf_lsb_header { - /** - * LS falcon signatures - * @prd_keys: signature to use in production mode - * @dgb_keys: signature to use in debug mode - * @b_prd_present: whether the production key is present - * @b_dgb_present: whether the debug key is present - * @falcon_id: ID of the falcon the ucode applies to - */ - struct { - u8 prd_keys[2][16]; - u8 dbg_keys[2][16]; - u32 b_prd_present; - u32 b_dbg_present; - u32 falcon_id; - u32 supports_versioning; - u32 version; - u32 depmap_count; - u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4]; - u8 kdf[16]; - } signature; - u32 ucode_off; - u32 ucode_size; - u32 data_size; - u32 bl_code_size; - u32 bl_imem_off; - u32 bl_data_off; - u32 bl_data_size; - u32 app_code_off; - u32 app_code_size; - u32 app_data_off; - u32 app_data_size; - u32 flags; -}; - -/** - * struct acr_r367_lsf_wpr_header - LS blob WPR Header - * - * See also struct acr_r352_lsf_wpr_header for documentation. - */ -struct acr_r367_lsf_wpr_header { - u32 falcon_id; - u32 lsb_offset; - u32 bootstrap_owner; - u32 lazy_bootstrap; - u32 bin_version; - u32 status; -#define LSF_IMAGE_STATUS_NONE 0 -#define LSF_IMAGE_STATUS_COPY 1 -#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 -#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 -#define LSF_IMAGE_STATUS_VALIDATION_DONE 4 -#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 -#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 -#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7 -}; - -/** - * struct ls_ucode_img_r367 - ucode image augmented with r367 headers - */ -struct ls_ucode_img_r367 { - struct ls_ucode_img base; - - const struct acr_r352_lsf_func *func; - - struct acr_r367_lsf_wpr_header wpr_header; - struct acr_r367_lsf_lsb_header lsb_header; -}; -#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base) - -struct ls_ucode_img * -acr_r367_ls_ucode_img_load(const struct acr_r352 *acr, - const struct nvkm_secboot *sb, - enum nvkm_secboot_falcon falcon_id) -{ - const struct nvkm_subdev *subdev = acr->base.subdev; - const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id]; - struct ls_ucode_img_r367 *img; - int ret; - - img = kzalloc(sizeof(*img), GFP_KERNEL); - if (!img) - return ERR_PTR(-ENOMEM); - - img->base.falcon_id = falcon_id; - - ret = func->load(sb, func->version_max, &img->base); - if (ret < 0) { - kfree(img->base.ucode_data); - kfree(img->base.sig); - kfree(img); - return ERR_PTR(ret); - } - - img->func = func->version[ret]; - - /* Check that the signature size matches our expectations... */ - if (img->base.sig_size != sizeof(img->lsb_header.signature)) { - nvkm_error(subdev, "invalid signature size for %s falcon!\n", - nvkm_secboot_falcon_name[falcon_id]); - return ERR_PTR(-EINVAL); - } - - /* Copy signature to the right place */ - memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size); - - /* not needed? the signature should already have the right value */ - img->lsb_header.signature.falcon_id = falcon_id; - - return &img->base; -} - -#define LSF_LSB_HEADER_ALIGN 256 -#define LSF_BL_DATA_ALIGN 256 -#define LSF_BL_DATA_SIZE_ALIGN 256 -#define LSF_BL_CODE_SIZE_ALIGN 256 -#define LSF_UCODE_DATA_ALIGN 4096 - -static u32 -acr_r367_ls_img_fill_headers(struct acr_r352 *acr, - struct ls_ucode_img_r367 *img, u32 offset) -{ - struct ls_ucode_img *_img = &img->base; - struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header; - struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header; - struct ls_ucode_img_desc *desc = &_img->ucode_desc; - const struct acr_r352_lsf_func *func = img->func; - - /* Fill WPR header */ - whdr->falcon_id = _img->falcon_id; - whdr->bootstrap_owner = acr->base.boot_falcon; - whdr->bin_version = lhdr->signature.version; - whdr->status = LSF_IMAGE_STATUS_COPY; - - /* Skip bootstrapping falcons started by someone else than ACR */ - if (acr->lazy_bootstrap & BIT(_img->falcon_id)) - whdr->lazy_bootstrap = 1; - - /* Align, save off, and include an LSB header size */ - offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); - whdr->lsb_offset = offset; - offset += sizeof(*lhdr); - - /* - * Align, save off, and include the original (static) ucode - * image size - */ - offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); - _img->ucode_off = lhdr->ucode_off = offset; - offset += _img->ucode_size; - - /* - * For falcons that use a boot loader (BL), we append a loader - * desc structure on the end of the ucode image and consider - * this the boot loader data. The host will then copy the loader - * desc args to this space within the WPR region (before locking - * down) and the HS bin will then copy them to DMEM 0 for the - * loader. - */ - lhdr->bl_code_size = ALIGN(desc->bootloader_size, - LSF_BL_CODE_SIZE_ALIGN); - lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, - LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; - lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + - lhdr->bl_code_size - lhdr->ucode_size; - /* - * Though the BL is located at 0th offset of the image, the VA - * is different to make sure that it doesn't collide the actual - * OS VA range - */ - lhdr->bl_imem_off = desc->bootloader_imem_offset; - lhdr->app_code_off = desc->app_start_offset + - desc->app_resident_code_offset; - lhdr->app_code_size = desc->app_resident_code_size; - lhdr->app_data_off = desc->app_start_offset + - desc->app_resident_data_offset; - lhdr->app_data_size = desc->app_resident_data_size; - - lhdr->flags = func->lhdr_flags; - if (_img->falcon_id == acr->base.boot_falcon) - lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX; - - /* Align and save off BL descriptor size */ - lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN); - - /* - * Align, save off, and include the additional BL data - */ - offset = ALIGN(offset, LSF_BL_DATA_ALIGN); - lhdr->bl_data_off = offset; - offset += lhdr->bl_data_size; - - return offset; -} - -int -acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) -{ - struct ls_ucode_img_r367 *img; - struct list_head *l; - u32 count = 0; - u32 offset; - - /* Count the number of images to manage */ - list_for_each(l, imgs) - count++; - - /* - * Start with an array of WPR headers at the base of the WPR. - * The expectation here is that the secure falcon will do a single DMA - * read of this array and cache it internally so it's ok to pack these. - * Also, we add 1 to the falcon count to indicate the end of the array. - */ - offset = sizeof(img->wpr_header) * (count + 1); - - /* - * Walk the managed falcons, accounting for the LSB structs - * as well as the ucode images. - */ - list_for_each_entry(img, imgs, base.node) { - offset = acr_r367_ls_img_fill_headers(acr, img, offset); - } - - return offset; -} - -int -acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, - struct nvkm_gpuobj *wpr_blob, u64 wpr_addr) -{ - struct ls_ucode_img *_img; - u32 pos = 0; - u32 max_desc_size = 0; - u8 *gdesc; - - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - max_desc_size = max(max_desc_size, ls_func->bl_desc_size); - } - - gdesc = kmalloc(max_desc_size, GFP_KERNEL); - if (!gdesc) - return -ENOMEM; - - nvkm_kmap(wpr_blob); - - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, - sizeof(img->wpr_header)); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, - &img->lsb_header, sizeof(img->lsb_header)); - - /* Generate and write BL descriptor */ - memset(gdesc, 0, ls_func->bl_desc_size); - ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off, - gdesc, ls_func->bl_desc_size); - - /* Copy ucode */ - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, - _img->ucode_data, _img->ucode_size); - - pos += sizeof(img->wpr_header); - } - - nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); - - nvkm_done(wpr_blob); - - kfree(gdesc); - - return 0; -} - -struct acr_r367_hsflcn_desc { - u8 reserved_dmem[0x200]; - u32 signatures[4]; - u32 wpr_region_id; - u32 wpr_offset; - u32 mmu_memory_range; -#define FLCN_ACR_MAX_REGIONS 2 - struct { - u32 no_regions; - struct { - u32 start_addr; - u32 end_addr; - u32 region_id; - u32 read_mask; - u32 write_mask; - u32 client_mask; - u32 shadow_mem_start_addr; - } region_props[FLCN_ACR_MAX_REGIONS]; - } regions; - u32 ucode_blob_size; - u64 ucode_blob_base __aligned(8); - struct { - u32 vpr_enabled; - u32 vpr_start; - u32 vpr_end; - u32 hdcp_policies; - } vpr_desc; -}; - -void -acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, - void *_desc) -{ - struct acr_r367_hsflcn_desc *desc = _desc; - struct nvkm_gpuobj *ls_blob = acr->ls_blob; - - /* WPR region information if WPR is not fixed */ - if (sb->wpr_size == 0) { - u64 wpr_start = ls_blob->addr; - u64 wpr_end = ls_blob->addr + ls_blob->size; - - if (acr->func->shadow_blob) - wpr_start += ls_blob->size / 2; - - desc->wpr_region_id = 1; - desc->regions.no_regions = 2; - desc->regions.region_props[0].start_addr = wpr_start >> 8; - desc->regions.region_props[0].end_addr = wpr_end >> 8; - desc->regions.region_props[0].region_id = 1; - desc->regions.region_props[0].read_mask = 0xf; - desc->regions.region_props[0].write_mask = 0xc; - desc->regions.region_props[0].client_mask = 0x2; - if (acr->func->shadow_blob) - desc->regions.region_props[0].shadow_mem_start_addr = - ls_blob->addr >> 8; - else - desc->regions.region_props[0].shadow_mem_start_addr = 0; - } else { - desc->ucode_blob_base = ls_blob->addr; - desc->ucode_blob_size = ls_blob->size; - } -} - -static const struct acr_r352_ls_func -acr_r367_ls_sec2_func = { - .load = acr_ls_ucode_load_sec2, - .post_run = acr_ls_sec2_post_run, - .version_max = 1, - .version = { - &acr_r361_ls_sec2_func_0, - &acr_r370_ls_sec2_func_0, - } -}; - -const struct acr_r352_func -acr_r367_func = { - .fixup_hs_desc = acr_r367_fixup_hs_desc, - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - .shadow_blob = true, - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, - .ls_fill_headers = acr_r367_ls_fill_headers, - .ls_write_wpr = acr_r367_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func, - }, -}; - -struct nvkm_acr * -acr_r367_new(enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h deleted file mode 100644 index 8bdfb3e5cd1c..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_ACR_R367_H__ -#define __NVKM_SECBOOT_ACR_R367_H__ - -#include "acr_r352.h" - -void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *); - -struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *, - const struct nvkm_secboot *, - enum nvkm_secboot_falcon); -int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *); -int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *, - struct nvkm_gpuobj *, u64); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c deleted file mode 100644 index e821d0fd6217..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r370.h" -#include "acr_r367.h" - -#include <core/msgqueue.h> -#include <engine/falcon.h> -#include <engine/sec2.h> - -static void -acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - struct acr_r370_flcn_bl_desc *desc = _desc; - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - u64 base, addr_code, addr_data; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; -} - -static const struct acr_r352_lsf_func -acr_r370_ls_fecs_func_0 = { - .generate_bl_desc = acr_r370_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r370_ls_fecs_func = { - .load = acr_ls_ucode_load_fecs, - .version_max = 0, - .version = { - &acr_r370_ls_fecs_func_0, - } -}; - -static const struct acr_r352_lsf_func -acr_r370_ls_gpccs_func_0 = { - .generate_bl_desc = acr_r370_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), - /* GPCCS will be loaded using PRI */ - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, -}; - -const struct acr_r352_ls_func -acr_r370_ls_gpccs_func = { - .load = acr_ls_ucode_load_gpccs, - .version_max = 0, - .version = { - &acr_r370_ls_gpccs_func_0, - } -}; - -static void -acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_sec2 *sec = acr->subdev->device->sec2; - struct acr_r370_flcn_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - /* For some reason we should not add app_resident_code_offset here */ - addr_code = base; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = sec->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->argc = 1; - /* args are stored at the beginning of EMEM */ - desc->argv = 0x01000000; -} - -const struct acr_r352_lsf_func -acr_r370_ls_sec2_func_0 = { - .generate_bl_desc = acr_r370_generate_sec2_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r370_ls_sec2_func = { - .load = acr_ls_ucode_load_sec2, - .post_run = acr_ls_sec2_post_run, - .version_max = 0, - .version = { - &acr_r370_ls_sec2_func_0, - } -}; - -void -acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, - u64 offset) -{ - struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc; - - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; - bl_desc->non_sec_code_off = hdr->non_sec_code_off; - bl_desc->non_sec_code_size = hdr->non_sec_code_size; - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); - bl_desc->code_entry_point = 0; - bl_desc->code_dma_base = u64_to_flcn64(offset); - bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); - bl_desc->data_size = hdr->data_size; -} - -const struct acr_r352_func -acr_r370_func = { - .fixup_hs_desc = acr_r367_fixup_hs_desc, - .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), - .shadow_blob = true, - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, - .ls_fill_headers = acr_r367_ls_fill_headers, - .ls_write_wpr = acr_r367_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func, - [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func, - }, -}; - -struct nvkm_acr * -acr_r370_new(enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h deleted file mode 100644 index 2efed6f995ad..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_ACR_R370_H__ -#define __NVKM_SECBOOT_ACR_R370_H__ - -#include "priv.h" -struct hsf_load_header; - -/* Same as acr_r361_flcn_bl_desc, plus argc/argv */ -struct acr_r370_flcn_bl_desc { - u32 reserved[4]; - u32 signature[4]; - u32 ctx_dma; - struct flcn_u64 code_dma_base; - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 sec_code_off; - u32 sec_code_size; - u32 code_entry_point; - struct flcn_u64 data_dma_base; - u32 data_size; - u32 argc; - u32 argv; -}; - -void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64); -extern const struct acr_r352_ls_func acr_r370_ls_fecs_func; -extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func; -extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0; -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c deleted file mode 100644 index 8f0647766038..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r370.h" -#include "acr_r367.h" - -#include <core/msgqueue.h> -#include <subdev/pmu.h> - -static void -acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; - struct acr_r370_flcn_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = pmu->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->argc = 1; - desc->argv = addr_args; -} - -static const struct acr_r352_lsf_func -acr_r375_ls_pmu_func_0 = { - .generate_bl_desc = acr_r375_generate_pmu_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r375_ls_pmu_func = { - .load = acr_ls_ucode_load_pmu, - .post_run = acr_ls_pmu_post_run, - .version_max = 0, - .version = { - &acr_r375_ls_pmu_func_0, - } -}; - -const struct acr_r352_func -acr_r375_func = { - .fixup_hs_desc = acr_r367_fixup_hs_desc, - .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), - .shadow_blob = true, - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, - .ls_fill_headers = acr_r367_ls_fill_headers, - .ls_write_wpr = acr_r367_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func, - }, -}; - -struct nvkm_acr * -acr_r375_new(enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c deleted file mode 100644 index ee29c6c11afd..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* - * Secure boot is the process by which NVIDIA-signed firmware is loaded into - * some of the falcons of a GPU. For production devices this is the only way - * for the firmware to access useful (but sensitive) registers. - * - * A Falcon microprocessor supporting advanced security modes can run in one of - * three modes: - * - * - Non-secure (NS). In this mode, functionality is similar to Falcon - * architectures before security modes were introduced (pre-Maxwell), but - * capability is restricted. In particular, certain registers may be - * inaccessible for reads and/or writes, and physical memory access may be - * disabled (on certain Falcon instances). This is the only possible mode that - * can be used if you don't have microcode cryptographically signed by NVIDIA. - * - * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's - * not possible to read or write any Falcon internal state or Falcon registers - * from outside the Falcon (for example, from the host system). The only way - * to enable this mode is by loading microcode that has been signed by NVIDIA. - * (The loading process involves tagging the IMEM block as secure, writing the - * signature into a Falcon register, and starting execution. The hardware will - * validate the signature, and if valid, grant HS privileges.) - * - * - Light Secure (LS). In this mode, the microprocessor has more privileges - * than NS but fewer than HS. Some of the microprocessor state is visible to - * host software to ease debugging. The only way to enable this mode is by HS - * microcode enabling LS mode. Some privileges available to HS mode are not - * available here. LS mode is introduced in GM20x. - * - * Secure boot consists in temporarily switching a HS-capable falcon (typically - * PMU) into HS mode in order to validate the LS firmwares of managed falcons, - * load them, and switch managed falcons into LS mode. Once secure boot - * completes, no falcon remains in HS mode. - * - * Secure boot requires a write-protected memory region (WPR) which can only be - * written by the secure falcon. On dGPU, the driver sets up the WPR region in - * video memory. On Tegra, it is set up by the bootloader and its location and - * size written into memory controller registers. - * - * The secure boot process takes place as follows: - * - * 1) A LS blob is constructed that contains all the LS firmwares we want to - * load, along with their signatures and bootloaders. - * - * 2) A HS blob (also called ACR) is created that contains the signed HS - * firmware in charge of loading the LS firmwares into their respective - * falcons. - * - * 3) The HS blob is loaded (via its own bootloader) and executed on the - * HS-capable falcon. It authenticates itself, switches the secure falcon to - * HS mode and setup the WPR region around the LS blob (dGPU) or copies the - * LS blob into the WPR region (Tegra). - * - * 4) The LS blob is now secure from all external tampering. The HS falcon - * checks the signatures of the LS firmwares and, if valid, switches the - * managed falcons to LS mode and makes them ready to run the LS firmware. - * - * 5) The managed falcons remain in LS mode and can be started. - * - */ - -#include "priv.h" -#include "acr.h" - -#include <subdev/mc.h> -#include <subdev/timer.h> -#include <subdev/pmu.h> -#include <engine/sec2.h> - -const char * -nvkm_secboot_falcon_name[] = { - [NVKM_SECBOOT_FALCON_PMU] = "PMU", - [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>", - [NVKM_SECBOOT_FALCON_FECS] = "FECS", - [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS", - [NVKM_SECBOOT_FALCON_SEC2] = "SEC2", - [NVKM_SECBOOT_FALCON_END] = "<invalid>", -}; -/** - * nvkm_secboot_reset() - reset specified falcon - */ -int -nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask) -{ - /* Unmanaged falcon? */ - if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) { - nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n"); - return -EINVAL; - } - - return sb->acr->func->reset(sb->acr, sb, falcon_mask); -} - -/** - * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed - */ -bool -nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid) -{ - if (!sb) - return false; - - return sb->acr->managed_falcons & BIT(fid); -} - -static int -nvkm_secboot_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_secboot *sb = nvkm_secboot(subdev); - int ret = 0; - - switch (sb->acr->boot_falcon) { - case NVKM_SECBOOT_FALCON_PMU: - sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon; - break; - case NVKM_SECBOOT_FALCON_SEC2: - /* we must keep SEC2 alive forever since ACR will run on it */ - nvkm_engine_ref(&subdev->device->sec2->engine); - sb->boot_falcon = subdev->device->sec2->falcon; - sb->halt_falcon = subdev->device->pmu->falcon; - break; - default: - nvkm_error(subdev, "Unmanaged boot falcon %s!\n", - nvkm_secboot_falcon_name[sb->acr->boot_falcon]); - return -EINVAL; - } - nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name); - - /* Call chip-specific init function */ - if (sb->func->oneinit) - ret = sb->func->oneinit(sb); - if (ret) { - nvkm_error(subdev, "Secure Boot initialization failed: %d\n", - ret); - return ret; - } - - return 0; -} - -static int -nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend) -{ - struct nvkm_secboot *sb = nvkm_secboot(subdev); - int ret = 0; - - if (sb->func->fini) - ret = sb->func->fini(sb, suspend); - - return ret; -} - -static void * -nvkm_secboot_dtor(struct nvkm_subdev *subdev) -{ - struct nvkm_secboot *sb = nvkm_secboot(subdev); - void *ret = NULL; - - if (sb->func->dtor) - ret = sb->func->dtor(sb); - - return ret; -} - -static const struct nvkm_subdev_func -nvkm_secboot = { - .oneinit = nvkm_secboot_oneinit, - .fini = nvkm_secboot_fini, - .dtor = nvkm_secboot_dtor, -}; - -int -nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr, - struct nvkm_device *device, int index, - struct nvkm_secboot *sb) -{ - unsigned long fid; - - nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev); - sb->func = func; - sb->acr = acr; - acr->subdev = &sb->subdev; - - nvkm_debug(&sb->subdev, "securely managed falcons:\n"); - for_each_set_bit(fid, &sb->acr->managed_falcons, - NVKM_SECBOOT_FALCON_END) - nvkm_debug(&sb->subdev, "- %s\n", - nvkm_secboot_falcon_name[fid]); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c deleted file mode 100644 index 5e91b3f90065..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - - -#include "acr.h" -#include "gm200.h" - -#include <core/gpuobj.h> -#include <subdev/fb.h> -#include <engine/falcon.h> -#include <subdev/mc.h> - -/** - * gm200_secboot_run_blob() - run the given high-secure blob - * - */ -int -gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, - struct nvkm_falcon *falcon) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - struct nvkm_subdev *subdev = &gsb->base.subdev; - struct nvkm_vma *vma = NULL; - u32 start_address; - int ret; - - ret = nvkm_falcon_get(falcon, subdev); - if (ret) - return ret; - - /* Map the HS firmware so the HS bootloader can see it */ - ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma); - if (ret) { - nvkm_falcon_put(falcon, subdev); - return ret; - } - - ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0); - if (ret) - goto end; - - /* Reset and set the falcon up */ - ret = nvkm_falcon_reset(falcon); - if (ret) - goto end; - nvkm_falcon_bind_context(falcon, gsb->inst); - - /* Load the HS bootloader into the falcon's IMEM/DMEM */ - ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr); - if (ret < 0) - goto end; - - start_address = ret; - - /* Disable interrupts as we will poll for the HALT bit */ - nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false); - - /* Set default error value in mailbox register */ - nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); - - /* Start the HS bootloader */ - nvkm_falcon_set_start_addr(falcon, start_address); - nvkm_falcon_start(falcon); - ret = nvkm_falcon_wait_for_halt(falcon, 100); - if (ret) - goto end; - - /* - * The mailbox register contains the (positive) error code - return this - * to the caller - */ - ret = nvkm_falcon_rd32(falcon, 0x040); - -end: - /* Reenable interrupts */ - nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true); - - /* We don't need the ACR firmware anymore */ - nvkm_vmm_put(gsb->vmm, &vma); - nvkm_falcon_put(falcon, subdev); - - return ret; -} - -int -gm200_secboot_oneinit(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - struct nvkm_device *device = sb->subdev.device; - int ret; - - /* Allocate instance block and VM */ - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, - &gsb->inst); - if (ret) - return ret; - - ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr", - &gsb->vmm); - if (ret) - return ret; - - atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]); - gsb->vmm->debug = gsb->base.subdev.debug; - - ret = nvkm_vmm_join(gsb->vmm, gsb->inst); - if (ret) - return ret; - - if (sb->acr->func->oneinit) { - ret = sb->acr->func->oneinit(sb->acr, sb); - if (ret) - return ret; - } - - return 0; -} - -int -gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend) -{ - int ret = 0; - - if (sb->acr->func->fini) - ret = sb->acr->func->fini(sb->acr, sb, suspend); - - return ret; -} - -void * -gm200_secboot_dtor(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - - sb->acr->func->dtor(sb->acr); - - nvkm_vmm_part(gsb->vmm, gsb->inst); - nvkm_vmm_unref(&gsb->vmm); - nvkm_memory_unref(&gsb->inst); - - return gsb; -} - - -static const struct nvkm_secboot_func -gm200_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gm200_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gm200_secboot_run_blob, -}; - -int -gm200_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - - -MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin"); - -MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin"); - -MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); - -MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h deleted file mode 100644 index 62c5e162099a..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_GM200_H__ -#define __NVKM_SECBOOT_GM200_H__ - -#include "priv.h" - -struct gm200_secboot { - struct nvkm_secboot base; - - /* Instance block & address space used for HS FW execution */ - struct nvkm_memory *inst; - struct nvkm_vmm *vmm; -}; -#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) - -int gm200_secboot_oneinit(struct nvkm_secboot *); -int gm200_secboot_fini(struct nvkm_secboot *, bool); -void *gm200_secboot_dtor(struct nvkm_secboot *); -int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *, - struct nvkm_falcon *); - -/* Tegra-only */ -int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c deleted file mode 100644 index df8b919dcf09..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" -#include "gm200.h" - -#define TEGRA210_MC_BASE 0x70019000 - -#ifdef CONFIG_ARCH_TEGRA -#define MC_SECURITY_CARVEOUT2_CFG0 0xc58 -#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c -#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60 -#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64 -#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1) -/** - * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra - * - * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region - * is reserved from system memory by the bootloader and irreversibly locked. - * This function reads the address and size of the pre-configured WPR region. - */ -int -gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) -{ - struct nvkm_secboot *sb = &gsb->base; - void __iomem *mc; - u32 cfg; - - mc = ioremap(mc_base, 0xd00); - if (!mc) { - nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n"); - return -ENOMEM; - } - sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) | - ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32); - sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K) - << 17; - cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0); - iounmap(mc); - - /* Check that WPR settings are valid */ - if (sb->wpr_size == 0) { - nvkm_error(&sb->subdev, "WPR region is empty\n"); - return -EINVAL; - } - - if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) { - nvkm_error(&sb->subdev, "WPR region not locked\n"); - return -EINVAL; - } - - return 0; -} -#else -int -gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) -{ - nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n"); - return -EINVAL; -} -#endif - -static int -gm20b_secboot_oneinit(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - int ret; - - ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE); - if (ret) - return ret; - - return gm200_secboot_oneinit(sb); -} - -static const struct nvkm_secboot_func -gm20b_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gm20b_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gm200_secboot_run_blob, -}; - -int -gm20b_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_PMU)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - /* Support the initial GM20B firmware release without PMU */ - acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - -#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) -MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); -MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); -MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c deleted file mode 100644 index 4695f1c8e33f..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" -#include "gm200.h" - -#include "ls_ucode.h" -#include "hs_ucode.h" -#include <subdev/mc.h> -#include <subdev/timer.h> -#include <engine/falcon.h> -#include <engine/nvdec.h> - -static bool -gp102_secboot_scrub_required(struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - struct nvkm_device *device = subdev->device; - u32 reg; - - nvkm_wr32(device, 0x100cd0, 0x2); - reg = nvkm_rd32(device, 0x100cd0); - - return (reg & BIT(4)); -} - -static int -gp102_run_secure_scrub(struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - struct nvkm_device *device = subdev->device; - struct nvkm_engine *engine; - struct nvkm_falcon *falcon; - void *scrub_image; - struct fw_bin_header *hsbin_hdr; - struct hsf_fw_header *fw_hdr; - struct hsf_load_header *lhdr; - void *scrub_data; - int ret; - - nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n"); - - engine = nvkm_engine_ref(&device->nvdec[0]->engine); - if (IS_ERR(engine)) - return PTR_ERR(engine); - falcon = device->nvdec[0]->falcon; - - nvkm_falcon_get(falcon, &sb->subdev); - - scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber"); - if (IS_ERR(scrub_image)) - return PTR_ERR(scrub_image); - - nvkm_falcon_reset(falcon); - nvkm_falcon_bind_context(falcon, NULL); - - hsbin_hdr = scrub_image; - fw_hdr = scrub_image + hsbin_hdr->header_offset; - lhdr = scrub_image + fw_hdr->hdr_offset; - scrub_data = scrub_image + hsbin_hdr->data_offset; - - nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off, - lhdr->non_sec_code_size, - lhdr->non_sec_code_off >> 8, 0, false); - nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0], - ALIGN(lhdr->apps[0], 0x100), - lhdr->apps[1], - lhdr->apps[0] >> 8, 0, true); - nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0, - lhdr->data_size, 0); - - kfree(scrub_image); - - nvkm_falcon_set_start_addr(falcon, 0x0); - nvkm_falcon_start(falcon); - - ret = nvkm_falcon_wait_for_halt(falcon, 500); - if (ret < 0) { - nvkm_error(subdev, "failed to run VPR scrubber binary!\n"); - ret = -ETIMEDOUT; - goto end; - } - - /* put nvdec in clean state - without reset it will remain in HS mode */ - nvkm_falcon_reset(falcon); - - if (gp102_secboot_scrub_required(sb)) { - nvkm_error(subdev, "VPR scrubber binary failed!\n"); - ret = -EINVAL; - goto end; - } - - nvkm_debug(subdev, "VPR scrub successfully completed\n"); - -end: - nvkm_falcon_put(falcon, &sb->subdev); - nvkm_engine_unref(&engine); - return ret; -} - -static int -gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, - struct nvkm_falcon *falcon) -{ - int ret; - - /* make sure the VPR region is unlocked */ - if (gp102_secboot_scrub_required(sb)) { - ret = gp102_run_secure_scrub(sb); - if (ret) - return ret; - } - - return gm200_secboot_run_blob(sb, blob, falcon); -} - -const struct nvkm_secboot_func -gp102_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gm200_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gp102_secboot_run_blob, -}; - -int -gp102_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2, - BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS) | - BIT(NVKM_SECBOOT_FALCON_SEC2)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - -MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c deleted file mode 100644 index 737a8d50a1f2..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2017 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "gm200.h" -#include "acr.h" - -int -gp108_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2, - BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS) | - BIT(NVKM_SECBOOT_FALCON_SEC2)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) { - acr->func->dtor(acr); - return -ENOMEM; - } - *psb = &gsb->base; - - return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base); -} - -MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); - -MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c deleted file mode 100644 index 28ca29d0eeee..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" -#include "gm200.h" - -#define TEGRA186_MC_BASE 0x02c10000 - -static int -gp10b_secboot_oneinit(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - int ret; - - ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE); - if (ret) - return ret; - - return gm200_secboot_oneinit(sb); -} - -static const struct nvkm_secboot_func -gp10b_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gp10b_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gm200_secboot_run_blob, -}; - -int -gp10b_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS) | - BIT(NVKM_SECBOOT_FALCON_PMU)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - -#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) -MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); -MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); -MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c deleted file mode 100644 index 6b33182ddc2f..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "hs_ucode.h" -#include "ls_ucode.h" -#include "acr.h" - -#include <engine/falcon.h> - -/** - * hs_ucode_patch_signature() - patch HS blob with correct signature for - * specified falcon. - */ -static void -hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image, - bool new_format) -{ - struct fw_bin_header *hsbin_hdr = acr_image; - struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset; - void *hs_data = acr_image + hsbin_hdr->data_offset; - void *sig; - u32 sig_size; - u32 patch_loc, patch_sig; - - /* - * I had the brilliant idea to "improve" the binary format by - * removing this useless indirection. However to make NVIDIA files - * directly compatible, let's support both format. - */ - if (new_format) { - patch_loc = fw_hdr->patch_loc; - patch_sig = fw_hdr->patch_sig; - } else { - patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc); - patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig); - } - - /* Falcon in debug or production mode? */ - if (falcon->debug) { - sig = acr_image + fw_hdr->sig_dbg_offset; - sig_size = fw_hdr->sig_dbg_size; - } else { - sig = acr_image + fw_hdr->sig_prod_offset; - sig_size = fw_hdr->sig_prod_size; - } - - /* Patch signature */ - memcpy(hs_data + patch_loc, sig + patch_sig, sig_size); -} - -void * -hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon, - const char *fw) -{ - void *acr_image; - bool new_format; - - acr_image = nvkm_acr_load_firmware(subdev, fw, 0); - if (IS_ERR(acr_image)) - return acr_image; - - /* detect the format to define how signature should be patched */ - switch (((u32 *)acr_image)[0]) { - case 0x3b1d14f0: - new_format = true; - break; - case 0x000010de: - new_format = false; - break; - default: - nvkm_error(subdev, "unknown header for HS blob %s\n", fw); - return ERR_PTR(-EINVAL); - } - - hs_ucode_patch_signature(falcon, acr_image, new_format); - - return acr_image; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h deleted file mode 100644 index d8cfc6f7752a..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_HS_UCODE_H__ -#define __NVKM_SECBOOT_HS_UCODE_H__ - -#include <core/os.h> -#include <core/subdev.h> - -struct nvkm_falcon; - -/** - * struct hsf_fw_header - HS firmware descriptor - * @sig_dbg_offset: offset of the debug signature - * @sig_dbg_size: size of the debug signature - * @sig_prod_offset: offset of the production signature - * @sig_prod_size: size of the production signature - * @patch_loc: offset of the offset (sic) of where the signature is - * @patch_sig: offset of the offset (sic) to add to sig_*_offset - * @hdr_offset: offset of the load header (see struct hs_load_header) - * @hdr_size: size of above header - * - * This structure is embedded in the HS firmware image at - * hs_bin_hdr.header_offset. - */ -struct hsf_fw_header { - u32 sig_dbg_offset; - u32 sig_dbg_size; - u32 sig_prod_offset; - u32 sig_prod_size; - u32 patch_loc; - u32 patch_sig; - u32 hdr_offset; - u32 hdr_size; -}; - -/** - * struct hsf_load_header - HS firmware load header - */ -struct hsf_load_header { - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 data_dma_base; - u32 data_size; - u32 num_apps; - /* - * Organized as follows: - * - app0_code_off - * - app1_code_off - * - ... - * - appn_code_off - * - app0_code_size - * - app1_code_size - * - ... - */ - u32 apps[0]; -}; - -void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *, - const char *); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h deleted file mode 100644 index d43f906da3a7..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_LS_UCODE_H__ -#define __NVKM_SECBOOT_LS_UCODE_H__ - -#include <core/os.h> -#include <core/subdev.h> -#include <subdev/secboot.h> - -struct nvkm_acr; - -/** - * struct ls_ucode_img_desc - descriptor of firmware image - * @descriptor_size: size of this descriptor - * @image_size: size of the whole image - * @bootloader_start_offset: start offset of the bootloader in ucode image - * @bootloader_size: size of the bootloader - * @bootloader_imem_offset: start off set of the bootloader in IMEM - * @bootloader_entry_point: entry point of the bootloader in IMEM - * @app_start_offset: start offset of the LS firmware - * @app_size: size of the LS firmware's code and data - * @app_imem_offset: offset of the app in IMEM - * @app_imem_entry: entry point of the app in IMEM - * @app_dmem_offset: offset of the data in DMEM - * @app_resident_code_offset: offset of app code from app_start_offset - * @app_resident_code_size: size of the code - * @app_resident_data_offset: offset of data from app_start_offset - * @app_resident_data_size: size of data - * - * A firmware image contains the code, data, and bootloader of a given LS - * falcon in a single blob. This structure describes where everything is. - * - * This can be generated from a (bootloader, code, data) set if they have - * been loaded separately, or come directly from a file. - */ -struct ls_ucode_img_desc { - u32 descriptor_size; - u32 image_size; - u32 tools_version; - u32 app_version; - char date[64]; - u32 bootloader_start_offset; - u32 bootloader_size; - u32 bootloader_imem_offset; - u32 bootloader_entry_point; - u32 app_start_offset; - u32 app_size; - u32 app_imem_offset; - u32 app_imem_entry; - u32 app_dmem_offset; - u32 app_resident_code_offset; - u32 app_resident_code_size; - u32 app_resident_data_offset; - u32 app_resident_data_size; - u32 nb_overlays; - struct {u32 start; u32 size; } load_ovl[64]; - u32 compressed; -}; - -/** - * struct ls_ucode_img - temporary storage for loaded LS firmwares - * @node: to link within lsf_ucode_mgr - * @falcon_id: ID of the falcon this LS firmware is for - * @ucode_desc: loaded or generated map of ucode_data - * @ucode_data: firmware payload (code and data) - * @ucode_size: size in bytes of data in ucode_data - * @ucode_off: offset of the ucode in ucode_data - * @sig: signature for this firmware - * @sig:size: size of the signature in bytes - * - * Preparing the WPR LS blob requires information about all the LS firmwares - * (size, etc) to be known. This structure contains all the data of one LS - * firmware. - */ -struct ls_ucode_img { - struct list_head node; - enum nvkm_secboot_falcon falcon_id; - - struct ls_ucode_img_desc ucode_desc; - u8 *ucode_data; - u32 ucode_size; - u32 ucode_off; - - u8 *sig; - u32 sig_size; -}; - -/** - * struct fw_bin_header - header of firmware files - * @bin_magic: always 0x3b1d14f0 - * @bin_ver: version of the bin format - * @bin_size: entire image size including this header - * @header_offset: offset of the firmware/bootloader header in the file - * @data_offset: offset of the firmware/bootloader payload in the file - * @data_size: size of the payload - * - * This header is located at the beginning of the HS firmware and HS bootloader - * files, to describe where the headers and data can be found. - */ -struct fw_bin_header { - u32 bin_magic; - u32 bin_ver; - u32 bin_size; - u32 header_offset; - u32 data_offset; - u32 data_size; -}; - -/** - * struct fw_bl_desc - firmware bootloader descriptor - * @start_tag: starting tag of bootloader - * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc - * @code_off: offset of code section - * @code_size: size of code section - * @data_off: offset of data section - * @data_size: size of data section - * - * This structure is embedded in bootloader firmware files at to describe the - * IMEM and DMEM layout expected by the bootloader. - */ -struct fw_bl_desc { - u32 start_tag; - u32 dmem_load_off; - u32 code_off; - u32 code_size; - u32 data_off; - u32 data_size; -}; - -int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); -int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c deleted file mode 100644 index 821d3b2bdb1f..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - - -#include "ls_ucode.h" -#include "acr.h" - -#include <core/firmware.h> - -#define BL_DESC_BLK_SIZE 256 -/** - * Build a ucode image and descriptor from provided bootloader, code and data. - * - * @bl: bootloader image, including 16-bytes descriptor - * @code: LS firmware code segment - * @data: LS firmware data segment - * @desc: ucode descriptor to be written - * - * Return: allocated ucode image with corresponding descriptor information. desc - * is also updated to contain the right offsets within returned image. - */ -static void * -ls_ucode_img_build(const struct firmware *bl, const struct firmware *code, - const struct firmware *data, struct ls_ucode_img_desc *desc) -{ - struct fw_bin_header *bin_hdr = (void *)bl->data; - struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset; - void *bl_data = (void *)bl->data + bin_hdr->data_offset; - u32 pos = 0; - void *image; - - desc->bootloader_start_offset = pos; - desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32)); - desc->bootloader_imem_offset = bl_desc->start_tag * 256; - desc->bootloader_entry_point = bl_desc->start_tag * 256; - - pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE); - desc->app_start_offset = pos; - desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) + - ALIGN(data->size, BL_DESC_BLK_SIZE); - desc->app_imem_offset = 0; - desc->app_imem_entry = 0; - desc->app_dmem_offset = 0; - desc->app_resident_code_offset = 0; - desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE); - - pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE); - desc->app_resident_data_offset = pos - desc->app_start_offset; - desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE); - - desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) + - desc->app_size; - - image = kzalloc(desc->image_size, GFP_KERNEL); - if (!image) - return ERR_PTR(-ENOMEM); - - memcpy(image + desc->bootloader_start_offset, bl_data, - bl_desc->code_size); - memcpy(image + desc->app_start_offset, code->data, code->size); - memcpy(image + desc->app_start_offset + desc->app_resident_data_offset, - data->data, data->size); - - return image; -} - -/** - * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image - * - * Load the LS microcode, bootloader and signature and pack them into a single - * blob. Also generate the corresponding ucode descriptor. - */ -static int -ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver, - struct ls_ucode_img *img, const char *falcon_name) -{ - const struct firmware *bl, *code, *data, *sig; - char f[64]; - int ret; - - snprintf(f, sizeof(f), "gr/%s_bl", falcon_name); - ret = nvkm_firmware_get(subdev, f, &bl); - if (ret) - goto error; - - snprintf(f, sizeof(f), "gr/%s_inst", falcon_name); - ret = nvkm_firmware_get(subdev, f, &code); - if (ret) - goto free_bl; - - snprintf(f, sizeof(f), "gr/%s_data", falcon_name); - ret = nvkm_firmware_get(subdev, f, &data); - if (ret) - goto free_inst; - - snprintf(f, sizeof(f), "gr/%s_sig", falcon_name); - ret = nvkm_firmware_get(subdev, f, &sig); - if (ret) - goto free_data; - - img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); - if (!img->sig) { - ret = -ENOMEM; - goto free_sig; - } - img->sig_size = sig->size; - - img->ucode_data = ls_ucode_img_build(bl, code, data, - &img->ucode_desc); - if (IS_ERR(img->ucode_data)) { - kfree(img->sig); - ret = PTR_ERR(img->ucode_data); - goto free_sig; - } - img->ucode_size = img->ucode_desc.image_size; - -free_sig: - nvkm_firmware_put(sig); -free_data: - nvkm_firmware_put(data); -free_inst: - nvkm_firmware_put(code); -free_bl: - nvkm_firmware_put(bl); -error: - return ret; -} - -int -acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs"); -} - -int -acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs"); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c deleted file mode 100644 index a84a999445bb..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - - -#include "ls_ucode.h" -#include "acr.h" - -#include <core/firmware.h> -#include <core/msgqueue.h> -#include <subdev/pmu.h> -#include <engine/sec2.h> -#include <subdev/mc.h> -#include <subdev/timer.h> - -/** - * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw - * - * Load the LS microcode, desc and signature and pack them into a single - * blob. - */ -static int -acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name, - int maxver, struct ls_ucode_img *img) -{ - const struct firmware *image, *desc, *sig; - char f[64]; - int ver, ret; - - snprintf(f, sizeof(f), "%s/image", name); - ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image); - if (ver < 0) - return ver; - img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL); - nvkm_firmware_put(image); - if (!img->ucode_data) - return -ENOMEM; - - snprintf(f, sizeof(f), "%s/desc", name); - ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc); - if (ret < 0) - return ret; - memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc)); - img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256); - nvkm_firmware_put(desc); - - snprintf(f, sizeof(f), "%s/sig", name); - ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig); - if (ret < 0) - return ret; - img->sig_size = sig->size; - img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); - nvkm_firmware_put(sig); - if (!img->sig) - return -ENOMEM; - - return ver; -} - -static int -acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue, - struct nvkm_falcon *falcon, u32 addr_args) -{ - struct nvkm_device *device = falcon->owner->device; - u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE]; - - memset(buf, 0, sizeof(buf)); - nvkm_msgqueue_write_cmdline(queue, buf); - nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0); - /* rearm the queue so it will wait for the init message */ - nvkm_msgqueue_reinit(queue); - - /* Enable interrupts */ - nvkm_falcon_wr32(falcon, 0x10, 0xff); - nvkm_mc_intr_mask(device, falcon->owner->index, true); - - /* Start LS firmware on boot falcon */ - nvkm_falcon_start(falcon); - - return 0; -} - -int -acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - struct nvkm_pmu *pmu = sb->subdev.device->pmu; - int ret; - - ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img); - if (ret) - return ret; - - /* Allocate the PMU queue corresponding to the FW version */ - ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon, - sb, &pmu->queue); - if (ret) - return ret; - - return 0; -} - -int -acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) -{ - struct nvkm_device *device = sb->subdev.device; - struct nvkm_pmu *pmu = device->pmu; - u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE; - int ret; - - ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args); - if (ret) - return ret; - - nvkm_debug(&sb->subdev, "%s started\n", - nvkm_secboot_falcon_name[acr->boot_falcon]); - - return 0; -} - -int -acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - struct nvkm_sec2 *sec = sb->subdev.device->sec2; - int ver, ret; - - ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img); - if (ver < 0) - return ver; - - /* Allocate the PMU queue corresponding to the FW version */ - ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon, - sb, &sec->queue); - if (ret) - return ret; - - return ver; -} - -int -acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = &sb->subdev; - struct nvkm_device *device = subdev->device; - struct nvkm_sec2 *sec = device->sec2; - /* on SEC arguments are always at the beginning of EMEM */ - const u32 addr_args = 0x01000000; - int ret; - - ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args); - if (ret) - return ret; - - nvkm_debug(&sb->subdev, "%s started\n", - nvkm_secboot_falcon_name[acr->boot_falcon]); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h deleted file mode 100644 index 959a7b2dbdc9..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_PRIV_H__ -#define __NVKM_SECBOOT_PRIV_H__ - -#include <subdev/secboot.h> -#include <subdev/mmu.h> -struct nvkm_gpuobj; - -struct nvkm_secboot_func { - int (*oneinit)(struct nvkm_secboot *); - int (*fini)(struct nvkm_secboot *, bool suspend); - void *(*dtor)(struct nvkm_secboot *); - int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *, - struct nvkm_falcon *); -}; - -int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *, - struct nvkm_device *, int, struct nvkm_secboot *); -int nvkm_secboot_falcon_reset(struct nvkm_secboot *); -int nvkm_secboot_falcon_run(struct nvkm_secboot *); - -extern const struct nvkm_secboot_func gp102_secboot; - -struct flcn_u64 { - u32 lo; - u32 hi; -}; - -static inline u64 flcn64_to_u64(const struct flcn_u64 f) -{ - return ((u64)f.hi) << 32 | f.lo; -} - -static inline struct flcn_u64 u64_to_flcn64(u64 u) -{ - struct flcn_u64 ret; - - ret.hi = upper_32_bits(u); - ret.lo = lower_32_bits(u); - - return ret; -} - -#endif |