linux_dsm_epyc7002/drivers/tee/optee/core.c
Victor Chong 3c15ddb97c tee: optee: log message if dynamic shm is enabled
When dynamic shared memory support is enabled in the OP-TEE Trusted
OS, it doesn't mean that the driver supports it, which can confuse
users during debugging. Log a message when dynamic shared memory is
enabled in the driver, to let users know for sure.

Suggested-by: Jerome Forissier <jerome.forissier@linaro.org>
Signed-off-by: Victor Chong <victor.chong@linaro.org>
Reviewed-by: Jerome Forissier <jerome.forissier@linaro.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
2018-12-11 14:38:40 +01:00

732 lines
18 KiB
C

/*
* Copyright (c) 2015, Linaro Limited
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/arm-smccc.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include "optee_private.h"
#include "optee_smc.h"
#include "shm_pool.h"
#define DRIVER_NAME "optee"
#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
/**
* optee_from_msg_param() - convert from OPTEE_MSG parameters to
* struct tee_param
* @params: subsystem internal parameter representation
* @num_params: number of elements in the parameter arrays
* @msg_params: OPTEE_MSG parameters
* Returns 0 on success or <0 on failure
*/
int optee_from_msg_param(struct tee_param *params, size_t num_params,
const struct optee_msg_param *msg_params)
{
int rc;
size_t n;
struct tee_shm *shm;
phys_addr_t pa;
for (n = 0; n < num_params; n++) {
struct tee_param *p = params + n;
const struct optee_msg_param *mp = msg_params + n;
u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
switch (attr) {
case OPTEE_MSG_ATTR_TYPE_NONE:
p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
memset(&p->u, 0, sizeof(p->u));
break;
case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
p->u.value.a = mp->u.value.a;
p->u.value.b = mp->u.value.b;
p->u.value.c = mp->u.value.c;
break;
case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
p->u.memref.size = mp->u.tmem.size;
shm = (struct tee_shm *)(unsigned long)
mp->u.tmem.shm_ref;
if (!shm) {
p->u.memref.shm_offs = 0;
p->u.memref.shm = NULL;
break;
}
rc = tee_shm_get_pa(shm, 0, &pa);
if (rc)
return rc;
p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
p->u.memref.shm = shm;
/* Check that the memref is covered by the shm object */
if (p->u.memref.size) {
size_t o = p->u.memref.shm_offs +
p->u.memref.size - 1;
rc = tee_shm_get_pa(shm, o, NULL);
if (rc)
return rc;
}
break;
case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
p->u.memref.size = mp->u.rmem.size;
shm = (struct tee_shm *)(unsigned long)
mp->u.rmem.shm_ref;
if (!shm) {
p->u.memref.shm_offs = 0;
p->u.memref.shm = NULL;
break;
}
p->u.memref.shm_offs = mp->u.rmem.offs;
p->u.memref.shm = shm;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
const struct tee_param *p)
{
int rc;
phys_addr_t pa;
mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
mp->u.tmem.size = p->u.memref.size;
if (!p->u.memref.shm) {
mp->u.tmem.buf_ptr = 0;
return 0;
}
rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
if (rc)
return rc;
mp->u.tmem.buf_ptr = pa;
mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
OPTEE_MSG_ATTR_CACHE_SHIFT;
return 0;
}
static int to_msg_param_reg_mem(struct optee_msg_param *mp,
const struct tee_param *p)
{
mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
mp->u.rmem.size = p->u.memref.size;
mp->u.rmem.offs = p->u.memref.shm_offs;
return 0;
}
/**
* optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
* @msg_params: OPTEE_MSG parameters
* @num_params: number of elements in the parameter arrays
* @params: subsystem itnernal parameter representation
* Returns 0 on success or <0 on failure
*/
int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
const struct tee_param *params)
{
int rc;
size_t n;
for (n = 0; n < num_params; n++) {
const struct tee_param *p = params + n;
struct optee_msg_param *mp = msg_params + n;
switch (p->attr) {
case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
memset(&mp->u, 0, sizeof(mp->u));
break;
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
mp->u.value.a = p->u.value.a;
mp->u.value.b = p->u.value.b;
mp->u.value.c = p->u.value.c;
break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
if (tee_shm_is_registered(p->u.memref.shm))
rc = to_msg_param_reg_mem(mp, p);
else
rc = to_msg_param_tmp_mem(mp, p);
if (rc)
return rc;
break;
default:
return -EINVAL;
}
}
return 0;
}
static void optee_get_version(struct tee_device *teedev,
struct tee_ioctl_version_data *vers)
{
struct tee_ioctl_version_data v = {
.impl_id = TEE_IMPL_ID_OPTEE,
.impl_caps = TEE_OPTEE_CAP_TZ,
.gen_caps = TEE_GEN_CAP_GP,
};
struct optee *optee = tee_get_drvdata(teedev);
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
v.gen_caps |= TEE_GEN_CAP_REG_MEM;
*vers = v;
}
static int optee_open(struct tee_context *ctx)
{
struct optee_context_data *ctxdata;
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
if (!ctxdata)
return -ENOMEM;
if (teedev == optee->supp_teedev) {
bool busy = true;
mutex_lock(&optee->supp.mutex);
if (!optee->supp.ctx) {
busy = false;
optee->supp.ctx = ctx;
}
mutex_unlock(&optee->supp.mutex);
if (busy) {
kfree(ctxdata);
return -EBUSY;
}
}
mutex_init(&ctxdata->mutex);
INIT_LIST_HEAD(&ctxdata->sess_list);
ctx->data = ctxdata;
return 0;
}
static void optee_release(struct tee_context *ctx)
{
struct optee_context_data *ctxdata = ctx->data;
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
struct tee_shm *shm;
struct optee_msg_arg *arg = NULL;
phys_addr_t parg;
struct optee_session *sess;
struct optee_session *sess_tmp;
if (!ctxdata)
return;
shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
if (!IS_ERR(shm)) {
arg = tee_shm_get_va(shm, 0);
/*
* If va2pa fails for some reason, we can't call into
* secure world, only free the memory. Secure OS will leak
* sessions and finally refuse more sessions, but we will
* at least let normal world reclaim its memory.
*/
if (!IS_ERR(arg))
if (tee_shm_va2pa(shm, arg, &parg))
arg = NULL; /* prevent usage of parg below */
}
list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
list_node) {
list_del(&sess->list_node);
if (!IS_ERR_OR_NULL(arg)) {
memset(arg, 0, sizeof(*arg));
arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
arg->session = sess->session_id;
optee_do_call_with_arg(ctx, parg);
}
kfree(sess);
}
kfree(ctxdata);
if (!IS_ERR(shm))
tee_shm_free(shm);
ctx->data = NULL;
if (teedev == optee->supp_teedev)
optee_supp_release(&optee->supp);
}
static const struct tee_driver_ops optee_ops = {
.get_version = optee_get_version,
.open = optee_open,
.release = optee_release,
.open_session = optee_open_session,
.close_session = optee_close_session,
.invoke_func = optee_invoke_func,
.cancel_req = optee_cancel_req,
.shm_register = optee_shm_register,
.shm_unregister = optee_shm_unregister,
};
static const struct tee_desc optee_desc = {
.name = DRIVER_NAME "-clnt",
.ops = &optee_ops,
.owner = THIS_MODULE,
};
static const struct tee_driver_ops optee_supp_ops = {
.get_version = optee_get_version,
.open = optee_open,
.release = optee_release,
.supp_recv = optee_supp_recv,
.supp_send = optee_supp_send,
.shm_register = optee_shm_register_supp,
.shm_unregister = optee_shm_unregister_supp,
};
static const struct tee_desc optee_supp_desc = {
.name = DRIVER_NAME "-supp",
.ops = &optee_supp_ops,
.owner = THIS_MODULE,
.flags = TEE_DESC_PRIVILEGED,
};
static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
{
struct arm_smccc_res res;
invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
return true;
return false;
}
static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
{
union {
struct arm_smccc_res smccc;
struct optee_smc_call_get_os_revision_result result;
} res = {
.result = {
.build_id = 0
}
};
invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
&res.smccc);
if (res.result.build_id)
pr_info("revision %lu.%lu (%08lx)", res.result.major,
res.result.minor, res.result.build_id);
else
pr_info("revision %lu.%lu", res.result.major, res.result.minor);
}
static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
{
union {
struct arm_smccc_res smccc;
struct optee_smc_calls_revision_result result;
} res;
invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
(int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
return true;
return false;
}
static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
u32 *sec_caps)
{
union {
struct arm_smccc_res smccc;
struct optee_smc_exchange_capabilities_result result;
} res;
u32 a1 = 0;
/*
* TODO This isn't enough to tell if it's UP system (from kernel
* point of view) or not, is_smp() returns the the information
* needed, but can't be called directly from here.
*/
if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
&res.smccc);
if (res.result.status != OPTEE_SMC_RETURN_OK)
return false;
*sec_caps = res.result.capabilities;
return true;
}
static struct tee_shm_pool *
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
u32 sec_caps)
{
union {
struct arm_smccc_res smccc;
struct optee_smc_get_shm_config_result result;
} res;
unsigned long vaddr;
phys_addr_t paddr;
size_t size;
phys_addr_t begin;
phys_addr_t end;
void *va;
struct tee_shm_pool_mgr *priv_mgr;
struct tee_shm_pool_mgr *dmabuf_mgr;
void *rc;
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
if (res.result.status != OPTEE_SMC_RETURN_OK) {
pr_info("shm service not available\n");
return ERR_PTR(-ENOENT);
}
if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
pr_err("only normal cached shared memory supported\n");
return ERR_PTR(-EINVAL);
}
begin = roundup(res.result.start, PAGE_SIZE);
end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
paddr = begin;
size = end - begin;
if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
pr_err("too small shared memory area\n");
return ERR_PTR(-EINVAL);
}
va = memremap(paddr, size, MEMREMAP_WB);
if (!va) {
pr_err("shared memory ioremap failed\n");
return ERR_PTR(-EINVAL);
}
vaddr = (unsigned long)va;
/*
* If OP-TEE can work with unregistered SHM, we will use own pool
* for private shm
*/
if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
rc = optee_shm_pool_alloc_pages();
if (IS_ERR(rc))
goto err_memunmap;
priv_mgr = rc;
} else {
const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
3 /* 8 bytes aligned */);
if (IS_ERR(rc))
goto err_memunmap;
priv_mgr = rc;
vaddr += sz;
paddr += sz;
size -= sz;
}
rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
if (IS_ERR(rc))
goto err_free_priv_mgr;
dmabuf_mgr = rc;
rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
if (IS_ERR(rc))
goto err_free_dmabuf_mgr;
*memremaped_shm = va;
return rc;
err_free_dmabuf_mgr:
tee_shm_pool_mgr_destroy(dmabuf_mgr);
err_free_priv_mgr:
tee_shm_pool_mgr_destroy(priv_mgr);
err_memunmap:
memunmap(va);
return rc;
}
/* Simple wrapper functions to be able to use a function pointer */
static void optee_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7,
struct arm_smccc_res *res)
{
arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
}
static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7,
struct arm_smccc_res *res)
{
arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
}
static optee_invoke_fn *get_invoke_func(struct device_node *np)
{
const char *method;
pr_info("probing for conduit method from DT.\n");
if (of_property_read_string(np, "method", &method)) {
pr_warn("missing \"method\" property\n");
return ERR_PTR(-ENXIO);
}
if (!strcmp("hvc", method))
return optee_smccc_hvc;
else if (!strcmp("smc", method))
return optee_smccc_smc;
pr_warn("invalid \"method\" property: %s\n", method);
return ERR_PTR(-EINVAL);
}
static struct optee *optee_probe(struct device_node *np)
{
optee_invoke_fn *invoke_fn;
struct tee_shm_pool *pool;
struct optee *optee = NULL;
void *memremaped_shm = NULL;
struct tee_device *teedev;
u32 sec_caps;
int rc;
invoke_fn = get_invoke_func(np);
if (IS_ERR(invoke_fn))
return (void *)invoke_fn;
if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
pr_warn("api uid mismatch\n");
return ERR_PTR(-EINVAL);
}
optee_msg_get_os_revision(invoke_fn);
if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
pr_warn("api revision mismatch\n");
return ERR_PTR(-EINVAL);
}
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
pr_warn("capabilities mismatch\n");
return ERR_PTR(-EINVAL);
}
/*
* We have no other option for shared memory, if secure world
* doesn't have any reserved memory we can use we can't continue.
*/
if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
return ERR_PTR(-EINVAL);
pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
if (IS_ERR(pool))
return (void *)pool;
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) {
rc = -ENOMEM;
goto err;
}
optee->invoke_fn = invoke_fn;
optee->sec_caps = sec_caps;
teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
goto err;
}
optee->teedev = teedev;
teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
goto err;
}
optee->supp_teedev = teedev;
rc = tee_device_register(optee->teedev);
if (rc)
goto err;
rc = tee_device_register(optee->supp_teedev);
if (rc)
goto err;
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
optee_wait_queue_init(&optee->wait_queue);
optee_supp_init(&optee->supp);
optee->memremaped_shm = memremaped_shm;
optee->pool = pool;
optee_enable_shm_cache(optee);
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
pr_info("initialized driver\n");
return optee;
err:
if (optee) {
/*
* tee_device_unregister() is safe to call even if the
* devices hasn't been registered with
* tee_device_register() yet.
*/
tee_device_unregister(optee->supp_teedev);
tee_device_unregister(optee->teedev);
kfree(optee);
}
if (pool)
tee_shm_pool_free(pool);
if (memremaped_shm)
memunmap(memremaped_shm);
return ERR_PTR(rc);
}
static void optee_remove(struct optee *optee)
{
/*
* Ask OP-TEE to free all cached shared memory objects to decrease
* reference counters and also avoid wild pointers in secure world
* into the old shared memory range.
*/
optee_disable_shm_cache(optee);
/*
* The two devices has to be unregistered before we can free the
* other resources.
*/
tee_device_unregister(optee->supp_teedev);
tee_device_unregister(optee->teedev);
tee_shm_pool_free(optee->pool);
if (optee->memremaped_shm)
memunmap(optee->memremaped_shm);
optee_wait_queue_exit(&optee->wait_queue);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
kfree(optee);
}
static const struct of_device_id optee_match[] = {
{ .compatible = "linaro,optee-tz" },
{},
};
static struct optee *optee_svc;
static int __init optee_driver_init(void)
{
struct device_node *fw_np;
struct device_node *np;
struct optee *optee;
/* Node is supposed to be below /firmware */
fw_np = of_find_node_by_name(NULL, "firmware");
if (!fw_np)
return -ENODEV;
np = of_find_matching_node(fw_np, optee_match);
if (!np || !of_device_is_available(np))
return -ENODEV;
optee = optee_probe(np);
of_node_put(np);
if (IS_ERR(optee))
return PTR_ERR(optee);
optee_svc = optee;
return 0;
}
module_init(optee_driver_init);
static void __exit optee_driver_exit(void)
{
struct optee *optee = optee_svc;
optee_svc = NULL;
if (optee)
optee_remove(optee);
}
module_exit(optee_driver_exit);
MODULE_AUTHOR("Linaro");
MODULE_DESCRIPTION("OP-TEE driver");
MODULE_SUPPORTED_DEVICE("");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");