drm/nouveau/secboot: remove

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2020-01-15 06:34:22 +10:00
parent 22dcda45a3
commit 9d350c5e51
33 changed files with 1 additions and 4189 deletions

View File

@ -30,7 +30,6 @@ enum nvkm_devidx {
NVKM_SUBDEV_THERM,
NVKM_SUBDEV_CLK,
NVKM_SUBDEV_GSP,
NVKM_SUBDEV_SECBOOT,
NVKM_ENGINE_BSP,
@ -151,7 +150,6 @@ struct nvkm_device {
struct nvkm_subdev *mxm;
struct nvkm_pci *pci;
struct nvkm_pmu *pmu;
struct nvkm_secboot *secboot;
struct nvkm_therm *therm;
struct nvkm_timer *timer;
struct nvkm_top *top;
@ -225,7 +223,6 @@ struct nvkm_device_chip {
int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **);
int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
int (*top )(struct nvkm_device *, int idx, struct nvkm_top **);

View File

@ -1,28 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_CORE_MSGQUEUE_H
#define __NVKM_CORE_MSGQUEUE_H
/* Hopefully we will never have firmware arguments larger than that... */
#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
#endif

View File

@ -51,7 +51,6 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_SUBDEV_MXM ] = "mxm",
[NVKM_SUBDEV_PCI ] = "pci",
[NVKM_SUBDEV_PMU ] = "pmu",
[NVKM_SUBDEV_SECBOOT ] = "secboot",
[NVKM_SUBDEV_THERM ] = "therm",
[NVKM_SUBDEV_TIMER ] = "tmr",
[NVKM_SUBDEV_TOP ] = "top",

View File

@ -2691,7 +2691,6 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
_(MXM , device->mxm , device->mxm);
_(PCI , device->pci , &device->pci->subdev);
_(PMU , device->pmu , &device->pmu->subdev);
_(SECBOOT , device->secboot , &device->secboot->subdev);
_(THERM , device->therm , &device->therm->subdev);
_(TIMER , device->timer , &device->timer->subdev);
_(TOP , device->top , &device->top->subdev);
@ -3198,7 +3197,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_SUBDEV_MXM , mxm);
_(NVKM_SUBDEV_PCI , pci);
_(NVKM_SUBDEV_PMU , pmu);
_(NVKM_SUBDEV_SECBOOT , secboot);
_(NVKM_SUBDEV_THERM , therm);
_(NVKM_SUBDEV_TIMER , timer);
_(NVKM_SUBDEV_TOP , top);

View File

@ -28,7 +28,6 @@
#include <subdev/timer.h>
#include <subdev/top.h>
#include <subdev/volt.h>
#include <subdev/secboot.h>
#include <engine/bsp.h>
#include <engine/ce.h>

View File

@ -84,7 +84,7 @@ gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
u32 val;
/* Bypass MMU check for non-secure boot */
if (!device->secboot) {
if (!device->acr) {
nvkm_wr32(device, 0x100ce4, 0xffffffff);
if (nvkm_rd32(device, 0x100ce4) != 0xffffffff)

View File

@ -20,7 +20,6 @@ include $(src)/nvkm/subdev/mmu/Kbuild
include $(src)/nvkm/subdev/mxm/Kbuild
include $(src)/nvkm/subdev/pci/Kbuild
include $(src)/nvkm/subdev/pmu/Kbuild
include $(src)/nvkm/subdev/secboot/Kbuild
include $(src)/nvkm/subdev/therm/Kbuild
include $(src)/nvkm/subdev/timer/Kbuild
include $(src)/nvkm/subdev/top/Kbuild

View File

@ -1,17 +0,0 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/secboot/base.o
nvkm-y += nvkm/subdev/secboot/hs_ucode.o
nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o
nvkm-y += nvkm/subdev/secboot/acr.o
nvkm-y += nvkm/subdev/secboot/acr_r352.o
nvkm-y += nvkm/subdev/secboot/acr_r361.o
nvkm-y += nvkm/subdev/secboot/acr_r364.o
nvkm-y += nvkm/subdev/secboot/acr_r367.o
nvkm-y += nvkm/subdev/secboot/acr_r370.o
nvkm-y += nvkm/subdev/secboot/acr_r375.o
nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o
nvkm-y += nvkm/subdev/secboot/gp102.o
nvkm-y += nvkm/subdev/secboot/gp108.o
nvkm-y += nvkm/subdev/secboot/gp10b.o

View File

@ -1,54 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr.h"
#include <core/firmware.h>
/**
* Convenience function to duplicate a firmware file in memory and check that
* it has the required minimum size.
*/
void *
nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
size_t min_size)
{
const struct firmware *fw;
void *blob;
int ret;
ret = nvkm_firmware_get(subdev, name, &fw);
if (ret)
return ERR_PTR(ret);
if (fw->size < min_size) {
nvkm_error(subdev, "%s is smaller than expected size %zu\n",
name, min_size);
nvkm_firmware_put(fw);
return ERR_PTR(-EINVAL);
}
blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
nvkm_firmware_put(fw);
if (!blob)
return ERR_PTR(-ENOMEM);
return blob;
}

View File

@ -1,70 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_ACR_H__
#define __NVKM_SECBOOT_ACR_H__
#include "priv.h"
struct nvkm_acr;
/**
* struct nvkm_acr_func - properties and functions specific to an ACR
*
* @load: make the ACR ready to run on the given secboot device
* @reset: reset the specified falcon
* @start: start the specified falcon (assumed to have been reset)
*/
struct nvkm_acr_func {
void (*dtor)(struct nvkm_acr *);
int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
struct nvkm_gpuobj *, u64);
int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long);
};
/**
* struct nvkm_acr - instance of an ACR
*
* @boot_falcon: ID of the falcon that will perform secure boot
* @managed_falcons: bitfield of falcons managed by this ACR
* @optional_falcons: bitfield of falcons we can live without
*/
struct nvkm_acr {
const struct nvkm_acr_func *func;
const struct nvkm_subdev *subdev;
enum nvkm_secboot_falcon boot_falcon;
unsigned long managed_falcons;
unsigned long optional_falcons;
};
void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
struct nvkm_acr *acr_r352_new(unsigned long);
struct nvkm_acr *acr_r361_new(unsigned long);
struct nvkm_acr *acr_r364_new(unsigned long);
struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,165 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_ACR_R352_H__
#define __NVKM_SECBOOT_ACR_R352_H__
#include "acr.h"
#include "ls_ucode.h"
#include "hs_ucode.h"
struct ls_ucode_img;
#define ACR_R352_MAX_APPS 8
#define LSF_FLAG_LOAD_CODE_AT_0 1
#define LSF_FLAG_DMACTL_REQ_CTX 4
#define LSF_FLAG_FORCE_PRIV_LOAD 8
static inline u32
hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app)
{
return hdr->apps[app];
}
static inline u32
hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
{
return hdr->apps[hdr->num_apps + app];
}
/**
* struct acr_r352_lsf_func - manages a specific LS firmware version
*
* @generate_bl_desc: function called on a block of bl_desc_size to generate the
* proper bootloader descriptor for this LS firmware
* @bl_desc_size: size of the bootloader descriptor
* @lhdr_flags: LS flags
*/
struct acr_r352_lsf_func {
void (*generate_bl_desc)(const struct nvkm_acr *,
const struct ls_ucode_img *, u64, void *);
u32 bl_desc_size;
u32 lhdr_flags;
};
/**
* struct acr_r352_ls_func - manages a single LS falcon
*
* @load: load the external firmware into a ls_ucode_img
*/
struct acr_r352_ls_func {
int (*load)(const struct nvkm_secboot *, int maxver,
struct ls_ucode_img *);
int version_max;
const struct acr_r352_lsf_func *version[];
};
struct acr_r352;
/**
* struct acr_r352_func - manages nuances between ACR versions
*
* @generate_hs_bl_desc: function called on a block of bl_desc_size to generate
* the proper HS bootloader descriptor
* @hs_bl_desc_size: size of the HS bootloader descriptor
*/
struct acr_r352_func {
void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
u64);
void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *);
u32 hs_bl_desc_size;
bool shadow_blob;
struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
struct nvkm_gpuobj *, u64);
const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
};
/**
* struct acr_r352 - ACR data for driver release 352 (and beyond)
*/
struct acr_r352 {
struct nvkm_acr base;
const struct acr_r352_func *func;
/*
* HS FW - lock WPR region (dGPU only) and load LS FWs
* on Tegra the HS FW copies the LS blob into the fixed WPR instead
*/
struct nvkm_gpuobj *load_blob;
struct {
struct hsf_load_header load_bl_header;
u32 __load_apps[ACR_R352_MAX_APPS * 2];
};
/* HS FW - unlock WPR region (dGPU only) */
struct nvkm_gpuobj *unload_blob;
struct {
struct hsf_load_header unload_bl_header;
u32 __unload_apps[ACR_R352_MAX_APPS * 2];
};
/* HS bootloader */
void *hsbl_blob;
/* HS bootloader for unload blob, if using a different falcon */
void *hsbl_unload_blob;
/* LS FWs, to be loaded by the HS ACR */
struct nvkm_gpuobj *ls_blob;
/* Firmware already loaded? */
bool firmware_ok;
/* Falcons to lazy-bootstrap */
u32 lazy_bootstrap;
/* To keep track of the state of all managed falcons */
enum {
/* In non-secure state, no firmware loaded, no privileges*/
NON_SECURE = 0,
/* In low-secure mode and ready to be started */
RESET,
/* In low-secure mode and running */
RUNNING,
} falcon_state[NVKM_SECBOOT_FALCON_END];
};
#define acr_r352(acr) container_of(acr, struct acr_r352, base)
struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
enum nvkm_secboot_falcon, unsigned long);
struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
struct nvkm_gpuobj *, u64);
void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
#endif

View File

@ -1,227 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_r361.h"
#include <engine/falcon.h>
#include <core/msgqueue.h>
#include <subdev/pmu.h>
#include <engine/sec2.h>
static void
acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
struct acr_r361_flcn_bl_desc *desc = _desc;
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
u64 base, addr_code, addr_data;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
desc->ctx_dma = FALCON_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->non_sec_code_off = pdesc->app_resident_code_offset;
desc->non_sec_code_size = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
}
void
acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
u64 offset)
{
struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc;
bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
bl_desc->code_dma_base = u64_to_flcn64(offset);
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
bl_desc->code_entry_point = 0;
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
bl_desc->data_size = hdr->data_size;
}
static const struct acr_r352_lsf_func
acr_r361_ls_fecs_func_0 = {
.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
};
const struct acr_r352_ls_func
acr_r361_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
.version_max = 0,
.version = {
&acr_r361_ls_fecs_func_0,
}
};
static const struct acr_r352_lsf_func
acr_r361_ls_gpccs_func_0 = {
.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
/* GPCCS will be loaded using PRI */
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
const struct acr_r352_ls_func
acr_r361_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
.version_max = 0,
.version = {
&acr_r361_ls_gpccs_func_0,
}
};
struct acr_r361_pmu_bl_desc {
u32 reserved;
u32 dma_idx;
struct flcn_u64 code_dma_base;
u32 total_code_size;
u32 code_size_to_load;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
struct flcn_u64 overlay_dma_base;
u32 argc;
u32 argv;
};
static void
acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
struct acr_r361_pmu_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
addr_args = pmu->falcon.data.limit;
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
desc->dma_idx = FALCON_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->total_code_size = pdesc->app_size;
desc->code_size_to_load = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
desc->overlay_dma_base = u64_to_flcn64(addr_code);
desc->argc = 1;
desc->argv = addr_args;
}
static const struct acr_r352_lsf_func
acr_r361_ls_pmu_func_0 = {
.generate_bl_desc = acr_r361_generate_pmu_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
};
const struct acr_r352_ls_func
acr_r361_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
.version_max = 0,
.version = {
&acr_r361_ls_pmu_func_0,
}
};
static void
acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
struct acr_r361_pmu_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
/* For some reason we should not add app_resident_code_offset here */
addr_code = base;
addr_data = base + pdesc->app_resident_data_offset;
addr_args = sec->falcon.data.limit;
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->total_code_size = pdesc->app_size;
desc->code_size_to_load = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
desc->overlay_dma_base = u64_to_flcn64(addr_code);
desc->argc = 1;
/* args are stored at the beginning of EMEM */
desc->argv = 0x01000000;
}
const struct acr_r352_lsf_func
acr_r361_ls_sec2_func_0 = {
.generate_bl_desc = acr_r361_generate_sec2_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
};
static const struct acr_r352_ls_func
acr_r361_ls_sec2_func = {
.load = acr_ls_ucode_load_sec2,
.version_max = 0,
.version = {
&acr_r361_ls_sec2_func_0,
}
};
const struct acr_r352_func
acr_r361_func = {
.fixup_hs_desc = acr_r352_fixup_hs_desc,
.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
.ls_ucode_img_load = acr_r352_ls_ucode_img_load,
.ls_fill_headers = acr_r352_ls_fill_headers,
.ls_write_wpr = acr_r352_ls_write_wpr,
.ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
[NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
},
};
struct nvkm_acr *
acr_r361_new(unsigned long managed_falcons)
{
return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU,
managed_falcons);
}

View File

@ -1,71 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_ACR_R361_H__
#define __NVKM_SECBOOT_ACR_R361_H__
#include "acr_r352.h"
/**
* struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
* @signature: 16B signature for secure code. 0s if no secure code
* @ctx_dma: DMA context to be used by BL while loading code/data
* @code_dma_base: 256B-aligned Physical FB Address where code is located
* (falcon's $xcbase register)
* @non_sec_code_off: offset from code_dma_base where the non-secure code is
* located. The offset must be multiple of 256 to help perf
* @non_sec_code_size: the size of the nonSecure code part.
* @sec_code_off: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @sec_code_size: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @code_entry_point: code entry point which will be invoked by BL after
* code is loaded.
* @data_dma_base: 256B aligned Physical FB Address where data is located.
* (falcon's $xdbase register)
* @data_size: size of data block. Should be multiple of 256B
*
* Structure used by the bootloader to load the rest of the code. This has
* to be filled by host and copied into DMEM at offset provided in the
* hsflcn_bl_desc.bl_desc_dmem_load_off.
*/
struct acr_r361_flcn_bl_desc {
u32 reserved[4];
u32 signature[4];
u32 ctx_dma;
struct flcn_u64 code_dma_base;
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 sec_code_off;
u32 sec_code_size;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
};
void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
#endif

View File

@ -1,117 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_r361.h"
#include <core/gpuobj.h>
/*
* r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem
* parameter.
*/
struct acr_r364_hsflcn_desc {
union {
u8 reserved_dmem[0x200];
u32 signatures[4];
} ucode_reserved_space;
u32 wpr_region_id;
u32 wpr_offset;
u32 mmu_memory_range;
struct {
u32 no_regions;
struct {
u32 start_addr;
u32 end_addr;
u32 region_id;
u32 read_mask;
u32 write_mask;
u32 client_mask;
u32 shadow_mem_start_addr;
} region_props[2];
} regions;
u32 ucode_blob_size;
u64 ucode_blob_base __aligned(8);
struct {
u32 vpr_enabled;
u32 vpr_start;
u32 vpr_end;
u32 hdcp_policies;
} vpr_desc;
};
static void
acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
void *_desc)
{
struct acr_r364_hsflcn_desc *desc = _desc;
struct nvkm_gpuobj *ls_blob = acr->ls_blob;
/* WPR region information if WPR is not fixed */
if (sb->wpr_size == 0) {
u64 wpr_start = ls_blob->addr;
u64 wpr_end = ls_blob->addr + ls_blob->size;
if (acr->func->shadow_blob)
wpr_start += ls_blob->size / 2;
desc->wpr_region_id = 1;
desc->regions.no_regions = 2;
desc->regions.region_props[0].start_addr = wpr_start >> 8;
desc->regions.region_props[0].end_addr = wpr_end >> 8;
desc->regions.region_props[0].region_id = 1;
desc->regions.region_props[0].read_mask = 0xf;
desc->regions.region_props[0].write_mask = 0xc;
desc->regions.region_props[0].client_mask = 0x2;
if (acr->func->shadow_blob)
desc->regions.region_props[0].shadow_mem_start_addr =
ls_blob->addr >> 8;
else
desc->regions.region_props[0].shadow_mem_start_addr = 0;
} else {
desc->ucode_blob_base = ls_blob->addr;
desc->ucode_blob_size = ls_blob->size;
}
}
const struct acr_r352_func
acr_r364_func = {
.fixup_hs_desc = acr_r364_fixup_hs_desc,
.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
.ls_ucode_img_load = acr_r352_ls_ucode_img_load,
.ls_fill_headers = acr_r352_ls_fill_headers,
.ls_write_wpr = acr_r352_ls_write_wpr,
.ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
},
};
struct nvkm_acr *
acr_r364_new(unsigned long managed_falcons)
{
return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU,
managed_falcons);
}

View File

@ -1,417 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_r367.h"
#include "acr_r361.h"
#include "acr_r370.h"
#include <core/gpuobj.h>
/*
* r367 ACR: new LS signature format requires a rewrite of LS firmware and
* blob creation functions. Also the hsflcn_desc layout has changed slightly.
*/
#define LSF_LSB_DEPMAP_SIZE 11
/**
* struct acr_r367_lsf_lsb_header - LS firmware header
*
* See also struct acr_r352_lsf_lsb_header for documentation.
*/
struct acr_r367_lsf_lsb_header {
/**
* LS falcon signatures
* @prd_keys: signature to use in production mode
* @dgb_keys: signature to use in debug mode
* @b_prd_present: whether the production key is present
* @b_dgb_present: whether the debug key is present
* @falcon_id: ID of the falcon the ucode applies to
*/
struct {
u8 prd_keys[2][16];
u8 dbg_keys[2][16];
u32 b_prd_present;
u32 b_dbg_present;
u32 falcon_id;
u32 supports_versioning;
u32 version;
u32 depmap_count;
u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
u8 kdf[16];
} signature;
u32 ucode_off;
u32 ucode_size;
u32 data_size;
u32 bl_code_size;
u32 bl_imem_off;
u32 bl_data_off;
u32 bl_data_size;
u32 app_code_off;
u32 app_code_size;
u32 app_data_off;
u32 app_data_size;
u32 flags;
};
/**
* struct acr_r367_lsf_wpr_header - LS blob WPR Header
*
* See also struct acr_r352_lsf_wpr_header for documentation.
*/
struct acr_r367_lsf_wpr_header {
u32 falcon_id;
u32 lsb_offset;
u32 bootstrap_owner;
u32 lazy_bootstrap;
u32 bin_version;
u32 status;
#define LSF_IMAGE_STATUS_NONE 0
#define LSF_IMAGE_STATUS_COPY 1
#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
};
/**
* struct ls_ucode_img_r367 - ucode image augmented with r367 headers
*/
struct ls_ucode_img_r367 {
struct ls_ucode_img base;
const struct acr_r352_lsf_func *func;
struct acr_r367_lsf_wpr_header wpr_header;
struct acr_r367_lsf_lsb_header lsb_header;
};
#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
struct ls_ucode_img *
acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
const struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
struct ls_ucode_img_r367 *img;
int ret;
img = kzalloc(sizeof(*img), GFP_KERNEL);
if (!img)
return ERR_PTR(-ENOMEM);
img->base.falcon_id = falcon_id;
ret = func->load(sb, func->version_max, &img->base);
if (ret < 0) {
kfree(img->base.ucode_data);
kfree(img->base.sig);
kfree(img);
return ERR_PTR(ret);
}
img->func = func->version[ret];
/* Check that the signature size matches our expectations... */
if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
nvkm_error(subdev, "invalid signature size for %s falcon!\n",
nvkm_secboot_falcon_name[falcon_id]);
return ERR_PTR(-EINVAL);
}
/* Copy signature to the right place */
memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
/* not needed? the signature should already have the right value */
img->lsb_header.signature.falcon_id = falcon_id;
return &img->base;
}
#define LSF_LSB_HEADER_ALIGN 256
#define LSF_BL_DATA_ALIGN 256
#define LSF_BL_DATA_SIZE_ALIGN 256
#define LSF_BL_CODE_SIZE_ALIGN 256
#define LSF_UCODE_DATA_ALIGN 4096
static u32
acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
struct ls_ucode_img_r367 *img, u32 offset)
{
struct ls_ucode_img *_img = &img->base;
struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
struct ls_ucode_img_desc *desc = &_img->ucode_desc;
const struct acr_r352_lsf_func *func = img->func;
/* Fill WPR header */
whdr->falcon_id = _img->falcon_id;
whdr->bootstrap_owner = acr->base.boot_falcon;
whdr->bin_version = lhdr->signature.version;
whdr->status = LSF_IMAGE_STATUS_COPY;
/* Skip bootstrapping falcons started by someone else than ACR */
if (acr->lazy_bootstrap & BIT(_img->falcon_id))
whdr->lazy_bootstrap = 1;
/* Align, save off, and include an LSB header size */
offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
whdr->lsb_offset = offset;
offset += sizeof(*lhdr);
/*
* Align, save off, and include the original (static) ucode
* image size
*/
offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
_img->ucode_off = lhdr->ucode_off = offset;
offset += _img->ucode_size;
/*
* For falcons that use a boot loader (BL), we append a loader
* desc structure on the end of the ucode image and consider
* this the boot loader data. The host will then copy the loader
* desc args to this space within the WPR region (before locking
* down) and the HS bin will then copy them to DMEM 0 for the
* loader.
*/
lhdr->bl_code_size = ALIGN(desc->bootloader_size,
LSF_BL_CODE_SIZE_ALIGN);
lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
lhdr->bl_code_size - lhdr->ucode_size;
/*
* Though the BL is located at 0th offset of the image, the VA
* is different to make sure that it doesn't collide the actual
* OS VA range
*/
lhdr->bl_imem_off = desc->bootloader_imem_offset;
lhdr->app_code_off = desc->app_start_offset +
desc->app_resident_code_offset;
lhdr->app_code_size = desc->app_resident_code_size;
lhdr->app_data_off = desc->app_start_offset +
desc->app_resident_data_offset;
lhdr->app_data_size = desc->app_resident_data_size;
lhdr->flags = func->lhdr_flags;
if (_img->falcon_id == acr->base.boot_falcon)
lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
/* Align and save off BL descriptor size */
lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
/*
* Align, save off, and include the additional BL data
*/
offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
lhdr->bl_data_off = offset;
offset += lhdr->bl_data_size;
return offset;
}
int
acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
{
struct ls_ucode_img_r367 *img;
struct list_head *l;
u32 count = 0;
u32 offset;
/* Count the number of images to manage */
list_for_each(l, imgs)
count++;
/*
* Start with an array of WPR headers at the base of the WPR.
* The expectation here is that the secure falcon will do a single DMA
* read of this array and cache it internally so it's ok to pack these.
* Also, we add 1 to the falcon count to indicate the end of the array.
*/
offset = sizeof(img->wpr_header) * (count + 1);
/*
* Walk the managed falcons, accounting for the LSB structs
* as well as the ucode images.
*/
list_for_each_entry(img, imgs, base.node) {
offset = acr_r367_ls_img_fill_headers(acr, img, offset);
}
return offset;
}
int
acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
{
struct ls_ucode_img *_img;
u32 pos = 0;
u32 max_desc_size = 0;
u8 *gdesc;
list_for_each_entry(_img, imgs, node) {
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
const struct acr_r352_lsf_func *ls_func = img->func;
max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
}
gdesc = kmalloc(max_desc_size, GFP_KERNEL);
if (!gdesc)
return -ENOMEM;
nvkm_kmap(wpr_blob);
list_for_each_entry(_img, imgs, node) {
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
const struct acr_r352_lsf_func *ls_func = img->func;
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
&img->lsb_header, sizeof(img->lsb_header));
/* Generate and write BL descriptor */
memset(gdesc, 0, ls_func->bl_desc_size);
ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
gdesc, ls_func->bl_desc_size);
/* Copy ucode */
nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
_img->ucode_data, _img->ucode_size);
pos += sizeof(img->wpr_header);
}
nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
nvkm_done(wpr_blob);
kfree(gdesc);
return 0;
}
struct acr_r367_hsflcn_desc {
u8 reserved_dmem[0x200];
u32 signatures[4];
u32 wpr_region_id;
u32 wpr_offset;
u32 mmu_memory_range;
#define FLCN_ACR_MAX_REGIONS 2
struct {
u32 no_regions;
struct {
u32 start_addr;
u32 end_addr;
u32 region_id;
u32 read_mask;
u32 write_mask;
u32 client_mask;
u32 shadow_mem_start_addr;
} region_props[FLCN_ACR_MAX_REGIONS];
} regions;
u32 ucode_blob_size;
u64 ucode_blob_base __aligned(8);
struct {
u32 vpr_enabled;
u32 vpr_start;
u32 vpr_end;
u32 hdcp_policies;
} vpr_desc;
};
void
acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
void *_desc)
{
struct acr_r367_hsflcn_desc *desc = _desc;
struct nvkm_gpuobj *ls_blob = acr->ls_blob;
/* WPR region information if WPR is not fixed */
if (sb->wpr_size == 0) {
u64 wpr_start = ls_blob->addr;
u64 wpr_end = ls_blob->addr + ls_blob->size;
if (acr->func->shadow_blob)
wpr_start += ls_blob->size / 2;
desc->wpr_region_id = 1;
desc->regions.no_regions = 2;
desc->regions.region_props[0].start_addr = wpr_start >> 8;
desc->regions.region_props[0].end_addr = wpr_end >> 8;
desc->regions.region_props[0].region_id = 1;
desc->regions.region_props[0].read_mask = 0xf;
desc->regions.region_props[0].write_mask = 0xc;
desc->regions.region_props[0].client_mask = 0x2;
if (acr->func->shadow_blob)
desc->regions.region_props[0].shadow_mem_start_addr =
ls_blob->addr >> 8;
else
desc->regions.region_props[0].shadow_mem_start_addr = 0;
} else {
desc->ucode_blob_base = ls_blob->addr;
desc->ucode_blob_size = ls_blob->size;
}
}
static const struct acr_r352_ls_func
acr_r367_ls_sec2_func = {
.load = acr_ls_ucode_load_sec2,
.version_max = 1,
.version = {
&acr_r361_ls_sec2_func_0,
&acr_r370_ls_sec2_func_0,
}
};
const struct acr_r352_func
acr_r367_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
.shadow_blob = true,
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
.ls_fill_headers = acr_r367_ls_fill_headers,
.ls_write_wpr = acr_r367_ls_write_wpr,
.ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
[NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
},
};
struct nvkm_acr *
acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
unsigned long managed_falcons)
{
return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
}

View File

@ -1,36 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_ACR_R367_H__
#define __NVKM_SECBOOT_ACR_R367_H__
#include "acr_r352.h"
void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
struct nvkm_gpuobj *, u64);
#endif

View File

@ -1,167 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_r370.h"
#include "acr_r367.h"
#include <core/msgqueue.h>
#include <engine/falcon.h>
#include <engine/sec2.h>
static void
acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
struct acr_r370_flcn_bl_desc *desc = _desc;
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
u64 base, addr_code, addr_data;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
desc->ctx_dma = FALCON_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->non_sec_code_off = pdesc->app_resident_code_offset;
desc->non_sec_code_size = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
}
static const struct acr_r352_lsf_func
acr_r370_ls_fecs_func_0 = {
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
};
const struct acr_r352_ls_func
acr_r370_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
.version_max = 0,
.version = {
&acr_r370_ls_fecs_func_0,
}
};
static const struct acr_r352_lsf_func
acr_r370_ls_gpccs_func_0 = {
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
/* GPCCS will be loaded using PRI */
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
const struct acr_r352_ls_func
acr_r370_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
.version_max = 0,
.version = {
&acr_r370_ls_gpccs_func_0,
}
};
static void
acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
struct acr_r370_flcn_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
/* For some reason we should not add app_resident_code_offset here */
addr_code = base;
addr_data = base + pdesc->app_resident_data_offset;
addr_args = sec->falcon.data.limit;
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->non_sec_code_off = pdesc->app_resident_code_offset;
desc->non_sec_code_size = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
desc->argc = 1;
/* args are stored at the beginning of EMEM */
desc->argv = 0x01000000;
}
const struct acr_r352_lsf_func
acr_r370_ls_sec2_func_0 = {
.generate_bl_desc = acr_r370_generate_sec2_bl_desc,
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
};
const struct acr_r352_ls_func
acr_r370_ls_sec2_func = {
.load = acr_ls_ucode_load_sec2,
.version_max = 0,
.version = {
&acr_r370_ls_sec2_func_0,
}
};
void
acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
u64 offset)
{
struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
bl_desc->code_entry_point = 0;
bl_desc->code_dma_base = u64_to_flcn64(offset);
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
bl_desc->data_size = hdr->data_size;
}
const struct acr_r352_func
acr_r370_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
.generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.shadow_blob = true,
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
.ls_fill_headers = acr_r367_ls_fill_headers,
.ls_write_wpr = acr_r367_ls_write_wpr,
.ls_func = {
[NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
[NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
},
};
struct nvkm_acr *
acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
unsigned long managed_falcons)
{
return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
}

View File

@ -1,49 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_ACR_R370_H__
#define __NVKM_SECBOOT_ACR_R370_H__
#include "priv.h"
struct hsf_load_header;
/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
struct acr_r370_flcn_bl_desc {
u32 reserved[4];
u32 signature[4];
u32 ctx_dma;
struct flcn_u64 code_dma_base;
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 sec_code_off;
u32 sec_code_size;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
u32 argc;
u32 argv;
};
void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
#endif

View File

@ -1,93 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_r370.h"
#include "acr_r367.h"
#include <core/msgqueue.h>
#include <subdev/pmu.h>
static void
acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
struct acr_r370_flcn_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
addr_args = pmu->falcon.data.limit;
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
desc->ctx_dma = FALCON_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->non_sec_code_off = pdesc->app_resident_code_offset;
desc->non_sec_code_size = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
desc->argc = 1;
desc->argv = addr_args;
}
static const struct acr_r352_lsf_func
acr_r375_ls_pmu_func_0 = {
.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
};
const struct acr_r352_ls_func
acr_r375_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
.version_max = 0,
.version = {
&acr_r375_ls_pmu_func_0,
}
};
const struct acr_r352_func
acr_r375_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
.generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.shadow_blob = true,
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
.ls_fill_headers = acr_r367_ls_fill_headers,
.ls_write_wpr = acr_r367_ls_write_wpr,
.ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
},
};
struct nvkm_acr *
acr_r375_new(enum nvkm_secboot_falcon boot_falcon,
unsigned long managed_falcons)
{
return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons);
}

View File

@ -1,213 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Secure boot is the process by which NVIDIA-signed firmware is loaded into
* some of the falcons of a GPU. For production devices this is the only way
* for the firmware to access useful (but sensitive) registers.
*
* A Falcon microprocessor supporting advanced security modes can run in one of
* three modes:
*
* - Non-secure (NS). In this mode, functionality is similar to Falcon
* architectures before security modes were introduced (pre-Maxwell), but
* capability is restricted. In particular, certain registers may be
* inaccessible for reads and/or writes, and physical memory access may be
* disabled (on certain Falcon instances). This is the only possible mode that
* can be used if you don't have microcode cryptographically signed by NVIDIA.
*
* - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
* not possible to read or write any Falcon internal state or Falcon registers
* from outside the Falcon (for example, from the host system). The only way
* to enable this mode is by loading microcode that has been signed by NVIDIA.
* (The loading process involves tagging the IMEM block as secure, writing the
* signature into a Falcon register, and starting execution. The hardware will
* validate the signature, and if valid, grant HS privileges.)
*
* - Light Secure (LS). In this mode, the microprocessor has more privileges
* than NS but fewer than HS. Some of the microprocessor state is visible to
* host software to ease debugging. The only way to enable this mode is by HS
* microcode enabling LS mode. Some privileges available to HS mode are not
* available here. LS mode is introduced in GM20x.
*
* Secure boot consists in temporarily switching a HS-capable falcon (typically
* PMU) into HS mode in order to validate the LS firmwares of managed falcons,
* load them, and switch managed falcons into LS mode. Once secure boot
* completes, no falcon remains in HS mode.
*
* Secure boot requires a write-protected memory region (WPR) which can only be
* written by the secure falcon. On dGPU, the driver sets up the WPR region in
* video memory. On Tegra, it is set up by the bootloader and its location and
* size written into memory controller registers.
*
* The secure boot process takes place as follows:
*
* 1) A LS blob is constructed that contains all the LS firmwares we want to
* load, along with their signatures and bootloaders.
*
* 2) A HS blob (also called ACR) is created that contains the signed HS
* firmware in charge of loading the LS firmwares into their respective
* falcons.
*
* 3) The HS blob is loaded (via its own bootloader) and executed on the
* HS-capable falcon. It authenticates itself, switches the secure falcon to
* HS mode and setup the WPR region around the LS blob (dGPU) or copies the
* LS blob into the WPR region (Tegra).
*
* 4) The LS blob is now secure from all external tampering. The HS falcon
* checks the signatures of the LS firmwares and, if valid, switches the
* managed falcons to LS mode and makes them ready to run the LS firmware.
*
* 5) The managed falcons remain in LS mode and can be started.
*
*/
#include "priv.h"
#include "acr.h"
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/pmu.h>
#include <engine/sec2.h>
const char *
nvkm_secboot_falcon_name[] = {
[NVKM_SECBOOT_FALCON_PMU] = "PMU",
[NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
[NVKM_SECBOOT_FALCON_FECS] = "FECS",
[NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
[NVKM_SECBOOT_FALCON_SEC2] = "SEC2",
[NVKM_SECBOOT_FALCON_END] = "<invalid>",
};
/**
* nvkm_secboot_reset() - reset specified falcon
*/
int
nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask)
{
/* Unmanaged falcon? */
if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) {
nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
return -EINVAL;
}
return sb->acr->func->reset(sb->acr, sb, falcon_mask);
}
/**
* nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
*/
bool
nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid)
{
if (!sb)
return false;
return sb->acr->managed_falcons & BIT(fid);
}
static int
nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_secboot *sb = nvkm_secboot(subdev);
int ret = 0;
switch (sb->acr->boot_falcon) {
case NVKM_SECBOOT_FALCON_PMU:
sb->halt_falcon = sb->boot_falcon = &subdev->device->pmu->falcon;
break;
case NVKM_SECBOOT_FALCON_SEC2:
/* we must keep SEC2 alive forever since ACR will run on it */
nvkm_engine_ref(&subdev->device->sec2->engine);
sb->boot_falcon = &subdev->device->sec2->falcon;
sb->halt_falcon = &subdev->device->pmu->falcon;
break;
default:
nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
return -EINVAL;
}
nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name);
/* Call chip-specific init function */
if (sb->func->oneinit)
ret = sb->func->oneinit(sb);
if (ret) {
nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
ret);
return ret;
}
return 0;
}
static int
nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_secboot *sb = nvkm_secboot(subdev);
int ret = 0;
if (sb->func->fini)
ret = sb->func->fini(sb, suspend);
return ret;
}
static void *
nvkm_secboot_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_secboot *sb = nvkm_secboot(subdev);
void *ret = NULL;
if (sb->func->dtor)
ret = sb->func->dtor(sb);
return ret;
}
static const struct nvkm_subdev_func
nvkm_secboot = {
.oneinit = nvkm_secboot_oneinit,
.fini = nvkm_secboot_fini,
.dtor = nvkm_secboot_dtor,
};
int
nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr,
struct nvkm_device *device, int index,
struct nvkm_secboot *sb)
{
unsigned long fid;
nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
sb->func = func;
sb->acr = acr;
acr->subdev = &sb->subdev;
nvkm_debug(&sb->subdev, "securely managed falcons:\n");
for_each_set_bit(fid, &sb->acr->managed_falcons,
NVKM_SECBOOT_FALCON_END)
nvkm_debug(&sb->subdev, "- %s\n",
nvkm_secboot_falcon_name[fid]);
return 0;
}

View File

@ -1,197 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr.h"
#include "gm200.h"
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <engine/falcon.h>
#include <subdev/mc.h>
/**
* gm200_secboot_run_blob() - run the given high-secure blob
*
*/
int
gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
struct nvkm_falcon *falcon)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_subdev *subdev = &gsb->base.subdev;
struct nvkm_vma *vma = NULL;
u32 start_address;
int ret;
ret = nvkm_falcon_get(falcon, subdev);
if (ret)
return ret;
/* Map the HS firmware so the HS bootloader can see it */
ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma);
if (ret) {
nvkm_falcon_put(falcon, subdev);
return ret;
}
ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0);
if (ret)
goto end;
/* Reset and set the falcon up */
ret = nvkm_falcon_reset(falcon);
if (ret)
goto end;
nvkm_falcon_bind_context(falcon, gsb->inst);
/* Load the HS bootloader into the falcon's IMEM/DMEM */
ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr);
if (ret < 0)
goto end;
start_address = ret;
/* Disable interrupts as we will poll for the HALT bit */
nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
/* Set default error value in mailbox register */
nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
/* Start the HS bootloader */
nvkm_falcon_set_start_addr(falcon, start_address);
nvkm_falcon_start(falcon);
ret = nvkm_falcon_wait_for_halt(falcon, 100);
if (ret)
goto end;
/*
* The mailbox register contains the (positive) error code - return this
* to the caller
*/
ret = nvkm_falcon_rd32(falcon, 0x040);
end:
/* Reenable interrupts */
nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
/* We don't need the ACR firmware anymore */
nvkm_vmm_put(gsb->vmm, &vma);
nvkm_falcon_put(falcon, subdev);
return ret;
}
int
gm200_secboot_oneinit(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_device *device = sb->subdev.device;
int ret;
/* Allocate instance block and VM */
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
&gsb->inst);
if (ret)
return ret;
ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr",
&gsb->vmm);
if (ret)
return ret;
atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]);
gsb->vmm->debug = gsb->base.subdev.debug;
ret = nvkm_vmm_join(gsb->vmm, gsb->inst);
if (ret)
return ret;
if (sb->acr->func->oneinit) {
ret = sb->acr->func->oneinit(sb->acr, sb);
if (ret)
return ret;
}
return 0;
}
int
gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
{
int ret = 0;
if (sb->acr->func->fini)
ret = sb->acr->func->fini(sb->acr, sb, suspend);
return ret;
}
void *
gm200_secboot_dtor(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
sb->acr->func->dtor(sb->acr);
nvkm_vmm_part(gsb->vmm, gsb->inst);
nvkm_vmm_unref(&gsb->vmm);
nvkm_memory_unref(&gsb->inst);
return gsb;
}
static const struct nvkm_secboot_func
gm200_secboot = {
.dtor = gm200_secboot_dtor,
.oneinit = gm200_secboot_oneinit,
.fini = gm200_secboot_fini,
.run_blob = gm200_secboot_run_blob,
};
int
gm200_secboot_new(struct nvkm_device *device, int index,
struct nvkm_secboot **psb)
{
int ret;
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_GPCCS));
if (IS_ERR(acr))
return PTR_ERR(acr);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) {
psb = NULL;
return -ENOMEM;
}
*psb = &gsb->base;
ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base);
if (ret)
return ret;
return 0;
}

View File

@ -1,46 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_GM200_H__
#define __NVKM_SECBOOT_GM200_H__
#include "priv.h"
struct gm200_secboot {
struct nvkm_secboot base;
/* Instance block & address space used for HS FW execution */
struct nvkm_memory *inst;
struct nvkm_vmm *vmm;
};
#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
int gm200_secboot_oneinit(struct nvkm_secboot *);
int gm200_secboot_fini(struct nvkm_secboot *, bool);
void *gm200_secboot_dtor(struct nvkm_secboot *);
int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
struct nvkm_falcon *);
/* Tegra-only */
int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *);
#endif

View File

@ -1,150 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr.h"
#include "gm200.h"
#ifdef CONFIG_ARCH_TEGRA
/**
* gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra
*
* On dGPU, we can manage the WPR region ourselves, but on Tegra this region
* is allocated from system memory by the secure firmware. The region is then
* marked as a "secure carveout" and irreversibly locked. Furthermore, the WPR
* secure carveout is also configured to be sent to the GPU via a dedicated
* serial bus between the memory controller and the GPU. The GPU requests this
* information upon leaving reset and exposes it through a FIFO register at
* offset 0x100cd4.
*
* The FIFO register's lower 4 bits can be used to set the read index into the
* FIFO. After each read of the FIFO register, the read index is incremented.
*
* Indices 2 and 3 contain the lower and upper addresses of the WPR. These are
* stored in units of 256 B. The WPR is inclusive of both addresses.
*
* Unfortunately, for some reason the WPR info register doesn't contain the
* correct values for the secure carveout. It seems like the upper address is
* always too small by 128 KiB - 1. Given that the secure carvout size in the
* memory controller configuration is specified in units of 128 KiB, it's
* possible that the computation of the upper address of the WPR is wrong and
* causes this difference.
*/
int
gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb)
{
struct nvkm_device *device = gsb->base.subdev.device;
struct nvkm_secboot *sb = &gsb->base;
u64 base, limit;
u32 value;
/* set WPR info register to point at WPR base address register */
value = nvkm_rd32(device, 0x100cd4);
value &= ~0xf;
value |= 0x2;
nvkm_wr32(device, 0x100cd4, value);
/* read base address */
value = nvkm_rd32(device, 0x100cd4);
base = (u64)(value >> 4) << 12;
/* read limit */
value = nvkm_rd32(device, 0x100cd4);
limit = (u64)(value >> 4) << 12;
/*
* The upper address of the WPR seems to be computed wrongly and is
* actually SZ_128K - 1 bytes lower than it should be. Adjust the
* value accordingly.
*/
limit += SZ_128K - 1;
sb->wpr_size = limit - base + 1;
sb->wpr_addr = base;
nvkm_info(&sb->subdev, "WPR: %016llx-%016llx\n", sb->wpr_addr,
sb->wpr_addr + sb->wpr_size - 1);
/* Check that WPR settings are valid */
if (sb->wpr_size == 0) {
nvkm_error(&sb->subdev, "WPR region is empty\n");
return -EINVAL;
}
return 0;
}
#else
int
gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb)
{
nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
return -EINVAL;
}
#endif
static int
gm20b_secboot_oneinit(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
ret = gm20b_secboot_tegra_read_wpr(gsb);
if (ret)
return ret;
return gm200_secboot_oneinit(sb);
}
static const struct nvkm_secboot_func
gm20b_secboot = {
.dtor = gm200_secboot_dtor,
.oneinit = gm20b_secboot_oneinit,
.fini = gm200_secboot_fini,
.run_blob = gm200_secboot_run_blob,
};
int
gm20b_secboot_new(struct nvkm_device *device, int index,
struct nvkm_secboot **psb)
{
int ret;
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
*psb = NULL;
acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_PMU));
if (IS_ERR(acr))
return PTR_ERR(acr);
/* Support the initial GM20B firmware release without PMU */
acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb)
return -ENOMEM;
*psb = &gsb->base;
ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);
if (ret)
return ret;
return 0;
}

View File

@ -1,61 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr.h"
#include "gm200.h"
const struct nvkm_secboot_func
gp102_secboot = {
.dtor = gm200_secboot_dtor,
.oneinit = gm200_secboot_oneinit,
.fini = gm200_secboot_fini,
.run_blob = gm200_secboot_run_blob,
};
int
gp102_secboot_new(struct nvkm_device *device, int index,
struct nvkm_secboot **psb)
{
int ret;
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2,
BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_GPCCS) |
BIT(NVKM_SECBOOT_FALCON_SEC2));
if (IS_ERR(acr))
return PTR_ERR(acr);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) {
psb = NULL;
return -ENOMEM;
}
*psb = &gsb->base;
ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
if (ret)
return ret;
return 0;
}

View File

@ -1,46 +0,0 @@
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "gm200.h"
#include "acr.h"
int
gp108_secboot_new(struct nvkm_device *device, int index,
struct nvkm_secboot **psb)
{
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_GPCCS) |
BIT(NVKM_SECBOOT_FALCON_SEC2));
if (IS_ERR(acr))
return PTR_ERR(acr);
if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
acr->func->dtor(acr);
return -ENOMEM;
}
*psb = &gsb->base;
return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
}

View File

@ -1,73 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr.h"
#include "gm200.h"
static int
gp10b_secboot_oneinit(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
ret = gm20b_secboot_tegra_read_wpr(gsb);
if (ret)
return ret;
return gm200_secboot_oneinit(sb);
}
static const struct nvkm_secboot_func
gp10b_secboot = {
.dtor = gm200_secboot_dtor,
.oneinit = gp10b_secboot_oneinit,
.fini = gm200_secboot_fini,
.run_blob = gm200_secboot_run_blob,
};
int
gp10b_secboot_new(struct nvkm_device *device, int index,
struct nvkm_secboot **psb)
{
int ret;
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_GPCCS) |
BIT(NVKM_SECBOOT_FALCON_PMU));
if (IS_ERR(acr))
return PTR_ERR(acr);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) {
psb = NULL;
return -ENOMEM;
}
*psb = &gsb->base;
ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base);
if (ret)
return ret;
return 0;
}

View File

@ -1,97 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "hs_ucode.h"
#include "ls_ucode.h"
#include "acr.h"
#include <engine/falcon.h>
/**
* hs_ucode_patch_signature() - patch HS blob with correct signature for
* specified falcon.
*/
static void
hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image,
bool new_format)
{
struct fw_bin_header *hsbin_hdr = acr_image;
struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
void *hs_data = acr_image + hsbin_hdr->data_offset;
void *sig;
u32 sig_size;
u32 patch_loc, patch_sig;
/*
* I had the brilliant idea to "improve" the binary format by
* removing this useless indirection. However to make NVIDIA files
* directly compatible, let's support both format.
*/
if (new_format) {
patch_loc = fw_hdr->patch_loc;
patch_sig = fw_hdr->patch_sig;
} else {
patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc);
patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig);
}
/* Falcon in debug or production mode? */
if (falcon->debug) {
sig = acr_image + fw_hdr->sig_dbg_offset;
sig_size = fw_hdr->sig_dbg_size;
} else {
sig = acr_image + fw_hdr->sig_prod_offset;
sig_size = fw_hdr->sig_prod_size;
}
/* Patch signature */
memcpy(hs_data + patch_loc, sig + patch_sig, sig_size);
}
void *
hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon,
const char *fw)
{
void *acr_image;
bool new_format;
acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
if (IS_ERR(acr_image))
return acr_image;
/* detect the format to define how signature should be patched */
switch (((u32 *)acr_image)[0]) {
case 0x3b1d14f0:
new_format = true;
break;
case 0x000010de:
new_format = false;
break;
default:
nvkm_error(subdev, "unknown header for HS blob %s\n", fw);
return ERR_PTR(-EINVAL);
}
hs_ucode_patch_signature(falcon, acr_image, new_format);
return acr_image;
}

View File

@ -1,81 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_HS_UCODE_H__
#define __NVKM_SECBOOT_HS_UCODE_H__
#include <core/os.h>
#include <core/subdev.h>
struct nvkm_falcon;
/**
* struct hsf_fw_header - HS firmware descriptor
* @sig_dbg_offset: offset of the debug signature
* @sig_dbg_size: size of the debug signature
* @sig_prod_offset: offset of the production signature
* @sig_prod_size: size of the production signature
* @patch_loc: offset of the offset (sic) of where the signature is
* @patch_sig: offset of the offset (sic) to add to sig_*_offset
* @hdr_offset: offset of the load header (see struct hs_load_header)
* @hdr_size: size of above header
*
* This structure is embedded in the HS firmware image at
* hs_bin_hdr.header_offset.
*/
struct hsf_fw_header {
u32 sig_dbg_offset;
u32 sig_dbg_size;
u32 sig_prod_offset;
u32 sig_prod_size;
u32 patch_loc;
u32 patch_sig;
u32 hdr_offset;
u32 hdr_size;
};
/**
* struct hsf_load_header - HS firmware load header
*/
struct hsf_load_header {
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 data_dma_base;
u32 data_size;
u32 num_apps;
/*
* Organized as follows:
* - app0_code_off
* - app1_code_off
* - ...
* - appn_code_off
* - app0_code_size
* - app1_code_size
* - ...
*/
u32 apps[0];
};
void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *,
const char *);
#endif

View File

@ -1,161 +0,0 @@
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_LS_UCODE_H__
#define __NVKM_SECBOOT_LS_UCODE_H__
#include <core/os.h>
#include <core/subdev.h>
#include <subdev/secboot.h>
struct nvkm_acr;
/**
* struct ls_ucode_img_desc - descriptor of firmware image
* @descriptor_size: size of this descriptor
* @image_size: size of the whole image
* @bootloader_start_offset: start offset of the bootloader in ucode image
* @bootloader_size: size of the bootloader
* @bootloader_imem_offset: start off set of the bootloader in IMEM
* @bootloader_entry_point: entry point of the bootloader in IMEM
* @app_start_offset: start offset of the LS firmware
* @app_size: size of the LS firmware's code and data
* @app_imem_offset: offset of the app in IMEM
* @app_imem_entry: entry point of the app in IMEM
* @app_dmem_offset: offset of the data in DMEM
* @app_resident_code_offset: offset of app code from app_start_offset
* @app_resident_code_size: size of the code
* @app_resident_data_offset: offset of data from app_start_offset
* @app_resident_data_size: size of data
*
* A firmware image contains the code, data, and bootloader of a given LS
* falcon in a single blob. This structure describes where everything is.
*
* This can be generated from a (bootloader, code, data) set if they have
* been loaded separately, or come directly from a file.
*/
struct ls_ucode_img_desc {
u32 descriptor_size;
u32 image_size;
u32 tools_version;
u32 app_version;
char date[64];
u32 bootloader_start_offset;
u32 bootloader_size;
u32 bootloader_imem_offset;
u32 bootloader_entry_point;
u32 app_start_offset;
u32 app_size;
u32 app_imem_offset;
u32 app_imem_entry;
u32 app_dmem_offset;
u32 app_resident_code_offset;
u32 app_resident_code_size;
u32 app_resident_data_offset;
u32 app_resident_data_size;
u32 nb_overlays;
struct {u32 start; u32 size; } load_ovl[64];
u32 compressed;
};
/**
* struct ls_ucode_img - temporary storage for loaded LS firmwares
* @node: to link within lsf_ucode_mgr
* @falcon_id: ID of the falcon this LS firmware is for
* @ucode_desc: loaded or generated map of ucode_data
* @ucode_data: firmware payload (code and data)
* @ucode_size: size in bytes of data in ucode_data
* @ucode_off: offset of the ucode in ucode_data
* @sig: signature for this firmware
* @sig:size: size of the signature in bytes
*
* Preparing the WPR LS blob requires information about all the LS firmwares
* (size, etc) to be known. This structure contains all the data of one LS
* firmware.
*/
struct ls_ucode_img {
struct list_head node;
enum nvkm_secboot_falcon falcon_id;
struct ls_ucode_img_desc ucode_desc;
u8 *ucode_data;
u32 ucode_size;
u32 ucode_off;
u8 *sig;
u32 sig_size;
};
/**
* struct fw_bin_header - header of firmware files
* @bin_magic: always 0x3b1d14f0
* @bin_ver: version of the bin format
* @bin_size: entire image size including this header
* @header_offset: offset of the firmware/bootloader header in the file
* @data_offset: offset of the firmware/bootloader payload in the file
* @data_size: size of the payload
*
* This header is located at the beginning of the HS firmware and HS bootloader
* files, to describe where the headers and data can be found.
*/
struct fw_bin_header {
u32 bin_magic;
u32 bin_ver;
u32 bin_size;
u32 header_offset;
u32 data_offset;
u32 data_size;
};
/**
* struct fw_bl_desc - firmware bootloader descriptor
* @start_tag: starting tag of bootloader
* @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc
* @code_off: offset of code section
* @code_size: size of code section
* @data_off: offset of data section
* @data_size: size of data section
*
* This structure is embedded in bootloader firmware files at to describe the
* IMEM and DMEM layout expected by the bootloader.
*/
struct fw_bl_desc {
u32 start_tag;
u32 dmem_load_off;
u32 code_off;
u32 code_size;
u32 data_off;
u32 data_size;
};
int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
struct ls_ucode_img *);
int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
struct ls_ucode_img *);
int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
struct ls_ucode_img *);
int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
struct ls_ucode_img *);
int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
#endif

View File

@ -1,160 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "ls_ucode.h"
#include "acr.h"
#include <core/firmware.h>
#define BL_DESC_BLK_SIZE 256
/**
* Build a ucode image and descriptor from provided bootloader, code and data.
*
* @bl: bootloader image, including 16-bytes descriptor
* @code: LS firmware code segment
* @data: LS firmware data segment
* @desc: ucode descriptor to be written
*
* Return: allocated ucode image with corresponding descriptor information. desc
* is also updated to contain the right offsets within returned image.
*/
static void *
ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
const struct firmware *data, struct ls_ucode_img_desc *desc)
{
struct fw_bin_header *bin_hdr = (void *)bl->data;
struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
void *bl_data = (void *)bl->data + bin_hdr->data_offset;
u32 pos = 0;
void *image;
desc->bootloader_start_offset = pos;
desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
desc->bootloader_imem_offset = bl_desc->start_tag * 256;
desc->bootloader_entry_point = bl_desc->start_tag * 256;
pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
desc->app_start_offset = pos;
desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
ALIGN(data->size, BL_DESC_BLK_SIZE);
desc->app_imem_offset = 0;
desc->app_imem_entry = 0;
desc->app_dmem_offset = 0;
desc->app_resident_code_offset = 0;
desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
desc->app_resident_data_offset = pos - desc->app_start_offset;
desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
desc->app_size;
image = kzalloc(desc->image_size, GFP_KERNEL);
if (!image)
return ERR_PTR(-ENOMEM);
memcpy(image + desc->bootloader_start_offset, bl_data,
bl_desc->code_size);
memcpy(image + desc->app_start_offset, code->data, code->size);
memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
data->data, data->size);
return image;
}
/**
* ls_ucode_img_load_gr() - load and prepare a LS GR ucode image
*
* Load the LS microcode, bootloader and signature and pack them into a single
* blob. Also generate the corresponding ucode descriptor.
*/
static int
ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
struct ls_ucode_img *img, const char *falcon_name)
{
const struct firmware *bl, *code, *data, *sig;
char f[64];
int ret;
snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
ret = nvkm_firmware_get(subdev, f, &bl);
if (ret)
goto error;
snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
ret = nvkm_firmware_get(subdev, f, &code);
if (ret)
goto free_bl;
snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
ret = nvkm_firmware_get(subdev, f, &data);
if (ret)
goto free_inst;
snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
ret = nvkm_firmware_get(subdev, f, &sig);
if (ret)
goto free_data;
img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
if (!img->sig) {
ret = -ENOMEM;
goto free_sig;
}
img->sig_size = sig->size;
img->ucode_data = ls_ucode_img_build(bl, code, data,
&img->ucode_desc);
if (IS_ERR(img->ucode_data)) {
kfree(img->sig);
ret = PTR_ERR(img->ucode_data);
goto free_sig;
}
img->ucode_size = img->ucode_desc.image_size;
free_sig:
nvkm_firmware_put(sig);
free_data:
nvkm_firmware_put(data);
free_inst:
nvkm_firmware_put(code);
free_bl:
nvkm_firmware_put(bl);
error:
return ret;
}
int
acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
struct ls_ucode_img *img)
{
return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
}
int
acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
struct ls_ucode_img *img)
{
return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
}

View File

@ -1,102 +0,0 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "ls_ucode.h"
#include "acr.h"
#include <core/firmware.h>
#include <core/msgqueue.h>
#include <subdev/pmu.h>
#include <engine/sec2.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
/**
* acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
*
* Load the LS microcode, desc and signature and pack them into a single
* blob.
*/
static int
acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
int maxver, struct ls_ucode_img *img)
{
const struct firmware *image, *desc, *sig;
char f[64];
int ver, ret;
snprintf(f, sizeof(f), "%s/image", name);
ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
if (ver < 0)
return ver;
img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
nvkm_firmware_put(image);
if (!img->ucode_data)
return -ENOMEM;
snprintf(f, sizeof(f), "%s/desc", name);
ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
if (ret < 0)
return ret;
memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
nvkm_firmware_put(desc);
snprintf(f, sizeof(f), "%s/sig", name);
ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
if (ret < 0)
return ret;
img->sig_size = sig->size;
img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
nvkm_firmware_put(sig);
if (!img->sig)
return -ENOMEM;
return ver;
}
int
acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
struct ls_ucode_img *img)
{
int ret;
ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
if (ret)
return ret;
return 0;
}
int
acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
struct ls_ucode_img *img)
{
int ver;
ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
if (ver < 0)
return ver;
return ver;
}

View File

@ -1,65 +0,0 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_PRIV_H__
#define __NVKM_SECBOOT_PRIV_H__
#include <subdev/secboot.h>
#include <subdev/mmu.h>
struct nvkm_gpuobj;
struct nvkm_secboot_func {
int (*oneinit)(struct nvkm_secboot *);
int (*fini)(struct nvkm_secboot *, bool suspend);
void *(*dtor)(struct nvkm_secboot *);
int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *,
struct nvkm_falcon *);
};
int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
struct nvkm_device *, int, struct nvkm_secboot *);
int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
int nvkm_secboot_falcon_run(struct nvkm_secboot *);
extern const struct nvkm_secboot_func gp102_secboot;
struct flcn_u64 {
u32 lo;
u32 hi;
};
static inline u64 flcn64_to_u64(const struct flcn_u64 f)
{
return ((u64)f.hi) << 32 | f.lo;
}
static inline struct flcn_u64 u64_to_flcn64(u64 u)
{
struct flcn_u64 ret;
ret.hi = upper_32_bits(u);
ret.lo = lower_32_bits(u);
return ret;
}
#endif