2014-09-01 16:30:44 +07:00
|
|
|
/* * CAAM control-plane driver backend
|
2011-03-13 15:54:26 +07:00
|
|
|
* Controller-level driver, kernel property detection, initialization
|
|
|
|
*
|
2012-06-23 07:48:52 +07:00
|
|
|
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
2011-03-13 15:54:26 +07:00
|
|
|
*/
|
|
|
|
|
crypto: caam - Introduce the use of the managed version of kzalloc
This patch moves data allocated using kzalloc to managed data allocated
using devm_kzalloc and cleans now unnecessary kfrees in probe and remove
functions. Also, linux/device.h is added to make sure the devm_*()
routine declarations are unambiguously available. Earlier, in the probe
function ctrlpriv was leaked on the failure of ctrl = of_iomap(nprop, 0);
as well as on the failure of ctrlpriv->jrpdev = kzalloc(...); . These
two bugs have been fixed by the patch.
The following Coccinelle semantic patch was used for making the change:
identifier p, probefn, removefn;
@@
struct platform_driver p = {
.probe = probefn,
.remove = removefn,
};
@prb@
identifier platform.probefn, pdev;
expression e, e1, e2;
@@
probefn(struct platform_device *pdev, ...) {
<+...
- e = kzalloc(e1, e2)
+ e = devm_kzalloc(&pdev->dev, e1, e2)
...
?-kfree(e);
...+>
}
@rem depends on prb@
identifier platform.removefn;
expression e;
@@
removefn(...) {
<...
- kfree(e);
...>
}
Signed-off-by: Himangi Saraogi <himangi774@gmail.com>
Acked-by: Julia Lawall <julia.lawall@lip6.fr>
Reviewed-by: Marek Vasut <marex@denx.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-05-28 01:25:48 +07:00
|
|
|
#include <linux/device.h>
|
2013-09-18 02:28:33 +07:00
|
|
|
#include <linux/of_address.h>
|
|
|
|
#include <linux/of_irq.h>
|
2017-09-01 21:12:59 +07:00
|
|
|
#include <linux/sys_soc.h>
|
2013-09-18 02:28:33 +07:00
|
|
|
|
2011-03-13 15:54:26 +07:00
|
|
|
#include "compat.h"
|
|
|
|
#include "regs.h"
|
|
|
|
#include "intern.h"
|
|
|
|
#include "jr.h"
|
2012-06-23 07:48:52 +07:00
|
|
|
#include "desc_constr.h"
|
2016-08-26 16:56:24 +07:00
|
|
|
#include "ctrl.h"
|
2011-03-13 15:54:26 +07:00
|
|
|
|
2016-05-19 22:11:26 +07:00
|
|
|
bool caam_little_end;
|
|
|
|
EXPORT_SYMBOL(caam_little_end);
|
2017-07-18 22:30:47 +07:00
|
|
|
bool caam_dpaa2;
|
|
|
|
EXPORT_SYMBOL(caam_dpaa2);
|
2017-09-01 21:12:59 +07:00
|
|
|
bool caam_imx;
|
|
|
|
EXPORT_SYMBOL(caam_imx);
|
2016-05-19 22:11:26 +07:00
|
|
|
|
2017-03-17 17:06:01 +07:00
|
|
|
#ifdef CONFIG_CAAM_QI
|
|
|
|
#include "qi.h"
|
|
|
|
#endif
|
|
|
|
|
2015-08-06 01:28:37 +07:00
|
|
|
/*
|
2015-08-17 19:24:10 +07:00
|
|
|
* i.MX targets tend to have clock control subsystems that can
|
2015-08-06 01:28:37 +07:00
|
|
|
* enable/disable clocking to our device.
|
|
|
|
*/
|
|
|
|
static inline struct clk *caam_drv_identify_clk(struct device *dev,
|
|
|
|
char *clk_name)
|
|
|
|
{
|
2017-09-01 21:12:59 +07:00
|
|
|
return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
|
2015-08-06 01:28:37 +07:00
|
|
|
}
|
|
|
|
|
2012-06-23 07:48:52 +07:00
|
|
|
/*
|
|
|
|
* Descriptor to instantiate RNG State Handle 0 in normal mode and
|
|
|
|
* load the JDKEK, TDKEK and TDSK registers
|
|
|
|
*/
|
2013-09-09 22:56:34 +07:00
|
|
|
static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
|
2012-06-23 07:48:52 +07:00
|
|
|
{
|
2013-09-09 22:56:34 +07:00
|
|
|
u32 *jump_cmd, op_flags;
|
2012-06-23 07:48:52 +07:00
|
|
|
|
|
|
|
init_job_desc(desc, 0);
|
|
|
|
|
2013-09-09 22:56:34 +07:00
|
|
|
op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
|
|
|
|
(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
|
|
|
|
|
2012-06-23 07:48:52 +07:00
|
|
|
/* INIT RNG in non-test mode */
|
2013-09-09 22:56:34 +07:00
|
|
|
append_operation(desc, op_flags);
|
2012-06-23 07:48:52 +07:00
|
|
|
|
2013-09-09 22:56:34 +07:00
|
|
|
if (!handle && do_sk) {
|
|
|
|
/*
|
|
|
|
* For SH0, Secure Keys must be generated as well
|
|
|
|
*/
|
2012-06-23 07:48:52 +07:00
|
|
|
|
2013-09-09 22:56:34 +07:00
|
|
|
/* wait for done */
|
|
|
|
jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
|
|
|
|
set_jump_tgt_here(desc, jump_cmd);
|
2012-06-23 07:48:52 +07:00
|
|
|
|
2013-09-09 22:56:34 +07:00
|
|
|
/*
|
|
|
|
* load 1 to clear written reg:
|
|
|
|
* resets the done interrrupt and returns the RNG to idle.
|
|
|
|
*/
|
|
|
|
append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
|
|
|
|
|
|
|
|
/* Initialize State Handle */
|
|
|
|
append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
|
|
|
|
OP_ALG_AAI_RNG4_SK);
|
|
|
|
}
|
2012-06-23 07:48:52 +07:00
|
|
|
|
2013-09-09 22:56:28 +07:00
|
|
|
append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
|
2012-06-23 07:48:52 +07:00
|
|
|
}
|
|
|
|
|
2013-09-09 22:56:32 +07:00
|
|
|
/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
|
2013-09-09 22:56:34 +07:00
|
|
|
static void build_deinstantiation_desc(u32 *desc, int handle)
|
2013-09-09 22:56:32 +07:00
|
|
|
{
|
|
|
|
init_job_desc(desc, 0);
|
2012-06-23 07:48:52 +07:00
|
|
|
|
2013-09-09 22:56:32 +07:00
|
|
|
/* Uninstantiate State Handle 0 */
|
2012-06-23 07:48:52 +07:00
|
|
|
append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
|
2013-09-09 22:56:34 +07:00
|
|
|
(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
|
2013-09-09 22:56:32 +07:00
|
|
|
|
|
|
|
append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
|
2012-06-23 07:48:52 +07:00
|
|
|
}
|
|
|
|
|
2013-09-09 22:56:31 +07:00
|
|
|
/*
|
|
|
|
* run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
|
|
|
|
* the software (no JR/QI used).
|
|
|
|
* @ctrldev - pointer to device
|
2013-09-09 22:56:34 +07:00
|
|
|
* @status - descriptor status, after being run
|
|
|
|
*
|
2013-09-09 22:56:31 +07:00
|
|
|
* Return: - 0 if no error occurred
|
|
|
|
* - -ENODEV if the DECO couldn't be acquired
|
|
|
|
* - -EAGAIN if an error occurred while executing the descriptor
|
|
|
|
*/
|
2013-09-09 22:56:34 +07:00
|
|
|
static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
|
|
|
|
u32 *status)
|
2012-06-23 07:48:52 +07:00
|
|
|
{
|
2013-07-04 12:56:03 +07:00
|
|
|
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
|
2014-09-01 16:30:44 +07:00
|
|
|
struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
|
|
|
|
struct caam_deco __iomem *deco = ctrlpriv->deco;
|
2013-07-04 12:56:03 +07:00
|
|
|
unsigned int timeout = 100000;
|
2013-09-09 22:56:31 +07:00
|
|
|
u32 deco_dbg_reg, flags;
|
2013-09-09 22:56:32 +07:00
|
|
|
int i;
|
2013-07-04 12:56:03 +07:00
|
|
|
|
2014-06-23 19:12:33 +07:00
|
|
|
|
2014-07-21 20:03:21 +07:00
|
|
|
if (ctrlpriv->virt_en == 1) {
|
2016-05-19 22:11:26 +07:00
|
|
|
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
|
2014-06-23 19:12:33 +07:00
|
|
|
|
2014-09-01 16:30:44 +07:00
|
|
|
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
|
2014-07-21 20:03:21 +07:00
|
|
|
--timeout)
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
timeout = 100000;
|
|
|
|
}
|
2014-06-23 19:12:33 +07:00
|
|
|
|
2016-05-19 22:11:26 +07:00
|
|
|
clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
|
2013-07-04 12:56:03 +07:00
|
|
|
|
2014-09-01 16:30:44 +07:00
|
|
|
while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
|
2013-07-04 12:56:03 +07:00
|
|
|
--timeout)
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
if (!timeout) {
|
|
|
|
dev_err(ctrldev, "failed to acquire DECO 0\n");
|
2016-05-19 22:11:26 +07:00
|
|
|
clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
|
2013-09-09 22:56:31 +07:00
|
|
|
return -ENODEV;
|
2012-06-23 07:48:52 +07:00
|
|
|
}
|
|
|
|
|
2013-07-04 12:56:03 +07:00
|
|
|
for (i = 0; i < desc_len(desc); i++)
|
2016-05-19 22:11:26 +07:00
|
|
|
wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
|
2012-06-23 07:48:52 +07:00
|
|
|
|
2013-09-09 22:56:31 +07:00
|
|
|
flags = DECO_JQCR_WHL;
|
|
|
|
/*
|
|
|
|
* If the descriptor length is longer than 4 words, then the
|
|
|
|
* FOUR bit in JRCTRL register must be set.
|
|
|
|
*/
|
|
|
|
if (desc_len(desc) >= 4)
|
|
|
|
flags |= DECO_JQCR_FOUR;
|
|
|
|
|
|
|
|
/* Instruct the DECO to execute it */
|
2016-05-19 22:11:26 +07:00
|
|
|
clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
|
2013-07-04 12:56:03 +07:00
|
|
|
|
|
|
|
timeout = 10000000;
|
2013-09-09 22:56:30 +07:00
|
|
|
do {
|
2014-09-01 16:30:44 +07:00
|
|
|
deco_dbg_reg = rd_reg32(&deco->desc_dbg);
|
2013-09-09 22:56:30 +07:00
|
|
|
/*
|
|
|
|
* If an error occured in the descriptor, then
|
|
|
|
* the DECO status field will be set to 0x0D
|
|
|
|
*/
|
|
|
|
if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
|
|
|
|
DESC_DBG_DECO_STAT_HOST_ERR)
|
|
|
|
break;
|
2013-07-04 12:56:03 +07:00
|
|
|
cpu_relax();
|
2013-09-09 22:56:30 +07:00
|
|
|
} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
|
2012-06-23 07:48:52 +07:00
|
|
|
|
2014-09-01 16:30:44 +07:00
|
|
|
*status = rd_reg32(&deco->op_status_hi) &
|
2013-09-09 22:56:34 +07:00
|
|
|
DECO_OP_STATUS_HI_ERR_MASK;
|
2013-07-04 12:56:03 +07:00
|
|
|
|
2014-06-23 19:12:33 +07:00
|
|
|
if (ctrlpriv->virt_en == 1)
|
2016-05-19 22:11:26 +07:00
|
|
|
clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
|
2014-06-23 19:12:33 +07:00
|
|
|
|
2013-09-09 22:56:31 +07:00
|
|
|
/* Mark the DECO as free */
|
2016-05-19 22:11:26 +07:00
|
|
|
clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
|
2013-09-09 22:56:31 +07:00
|
|
|
|
|
|
|
if (!timeout)
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* instantiate_rng - builds and executes a descriptor on DECO0,
|
|
|
|
* which initializes the RNG block.
|
|
|
|
* @ctrldev - pointer to device
|
2013-09-09 22:56:34 +07:00
|
|
|
* @state_handle_mask - bitmask containing the instantiation status
|
|
|
|
* for the RNG4 state handles which exist in
|
|
|
|
* the RNG4 block: 1 if it's been instantiated
|
|
|
|
* by an external entry, 0 otherwise.
|
|
|
|
* @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
|
|
|
|
* Caution: this can be done only once; if the keys need to be
|
|
|
|
* regenerated, a POR is required
|
|
|
|
*
|
2013-09-09 22:56:31 +07:00
|
|
|
* Return: - 0 if no error occurred
|
|
|
|
* - -ENOMEM if there isn't enough memory to allocate the descriptor
|
|
|
|
* - -ENODEV if DECO0 couldn't be acquired
|
|
|
|
* - -EAGAIN if an error occurred when executing the descriptor
|
|
|
|
* f.i. there was a RNG hardware error due to not "good enough"
|
|
|
|
* entropy being aquired.
|
|
|
|
*/
|
2013-09-09 22:56:34 +07:00
|
|
|
static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
|
|
|
|
int gen_sk)
|
2013-09-09 22:56:31 +07:00
|
|
|
{
|
2013-09-09 22:56:34 +07:00
|
|
|
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
|
2014-09-01 16:30:44 +07:00
|
|
|
struct caam_ctrl __iomem *ctrl;
|
2015-07-17 20:54:53 +07:00
|
|
|
u32 *desc, status = 0, rdsta_val;
|
2013-09-09 22:56:34 +07:00
|
|
|
int ret = 0, sh_idx;
|
|
|
|
|
2014-09-01 16:30:44 +07:00
|
|
|
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
|
2013-09-09 22:56:31 +07:00
|
|
|
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
|
|
|
|
if (!desc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-09-09 22:56:34 +07:00
|
|
|
for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
|
|
|
|
/*
|
|
|
|
* If the corresponding bit is set, this state handle
|
|
|
|
* was initialized by somebody else, so it's left alone.
|
|
|
|
*/
|
|
|
|
if ((1 << sh_idx) & state_handle_mask)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Create the descriptor for instantiating RNG State Handle */
|
|
|
|
build_instantiation_desc(desc, sh_idx, gen_sk);
|
|
|
|
|
|
|
|
/* Try to run it through DECO0 */
|
|
|
|
ret = run_descriptor_deco0(ctrldev, desc, &status);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If ret is not 0, or descriptor status is not 0, then
|
|
|
|
* something went wrong. No need to try the next state
|
|
|
|
* handle (if available), bail out here.
|
|
|
|
* Also, if for some reason, the State Handle didn't get
|
|
|
|
* instantiated although the descriptor has finished
|
|
|
|
* without any error (HW optimizations for later
|
|
|
|
* CAAM eras), then try again.
|
|
|
|
*/
|
2015-01-21 16:53:31 +07:00
|
|
|
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
|
2015-07-17 20:54:53 +07:00
|
|
|
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
|
|
|
|
!(rdsta_val & (1 << sh_idx)))
|
2013-09-09 22:56:34 +07:00
|
|
|
ret = -EAGAIN;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
|
|
|
|
/* Clear the contents before recreating the descriptor */
|
|
|
|
memset(desc, 0x00, CAAM_CMD_SZ * 7);
|
|
|
|
}
|
2013-09-09 22:56:31 +07:00
|
|
|
|
2013-07-04 12:56:03 +07:00
|
|
|
kfree(desc);
|
2013-09-09 22:56:31 +07:00
|
|
|
|
2012-06-23 07:48:52 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-09-09 22:56:32 +07:00
|
|
|
* deinstantiate_rng - builds and executes a descriptor on DECO0,
|
|
|
|
* which deinitializes the RNG block.
|
|
|
|
* @ctrldev - pointer to device
|
2013-09-09 22:56:34 +07:00
|
|
|
* @state_handle_mask - bitmask containing the instantiation status
|
|
|
|
* for the RNG4 state handles which exist in
|
|
|
|
* the RNG4 block: 1 if it's been instantiated
|
2013-09-09 22:56:32 +07:00
|
|
|
*
|
|
|
|
* Return: - 0 if no error occurred
|
|
|
|
* - -ENOMEM if there isn't enough memory to allocate the descriptor
|
|
|
|
* - -ENODEV if DECO0 couldn't be acquired
|
|
|
|
* - -EAGAIN if an error occurred when executing the descriptor
|
2012-06-23 07:48:52 +07:00
|
|
|
*/
|
2013-09-09 22:56:34 +07:00
|
|
|
static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
|
2013-09-09 22:56:32 +07:00
|
|
|
{
|
2013-09-09 22:56:34 +07:00
|
|
|
u32 *desc, status;
|
|
|
|
int sh_idx, ret = 0;
|
2013-09-09 22:56:32 +07:00
|
|
|
|
|
|
|
desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
|
|
|
|
if (!desc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-09-09 22:56:34 +07:00
|
|
|
for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
|
|
|
|
/*
|
|
|
|
* If the corresponding bit is set, then it means the state
|
|
|
|
* handle was initialized by us, and thus it needs to be
|
2017-02-28 05:29:45 +07:00
|
|
|
* deinitialized as well
|
2013-09-09 22:56:34 +07:00
|
|
|
*/
|
|
|
|
if ((1 << sh_idx) & state_handle_mask) {
|
|
|
|
/*
|
|
|
|
* Create the descriptor for deinstantating this state
|
|
|
|
* handle
|
|
|
|
*/
|
|
|
|
build_deinstantiation_desc(desc, sh_idx);
|
|
|
|
|
|
|
|
/* Try to run it through DECO0 */
|
|
|
|
ret = run_descriptor_deco0(ctrldev, desc, &status);
|
|
|
|
|
2017-04-05 15:41:03 +07:00
|
|
|
if (ret ||
|
|
|
|
(status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
|
2013-09-09 22:56:34 +07:00
|
|
|
dev_err(ctrldev,
|
|
|
|
"Failed to deinstantiate RNG4 SH%d\n",
|
|
|
|
sh_idx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
|
|
|
|
}
|
|
|
|
}
|
2013-09-09 22:56:32 +07:00
|
|
|
|
|
|
|
kfree(desc);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-09-09 22:56:31 +07:00
|
|
|
static int caam_remove(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct device *ctrldev;
|
|
|
|
struct caam_drv_private *ctrlpriv;
|
2014-09-01 16:30:44 +07:00
|
|
|
struct caam_ctrl __iomem *ctrl;
|
2013-09-09 22:56:31 +07:00
|
|
|
|
|
|
|
ctrldev = &pdev->dev;
|
|
|
|
ctrlpriv = dev_get_drvdata(ctrldev);
|
2014-09-01 16:30:44 +07:00
|
|
|
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
|
2013-09-09 22:56:31 +07:00
|
|
|
|
crypto: caam - fix JR platform device subsequent (re)creations
The way Job Ring platform devices are created and released does not
allow for multiple create-release cycles.
JR0 Platform device creation error
JR0 Platform device creation error
caam 2100000.caam: no queues configured, terminating
caam: probe of 2100000.caam failed with error -12
The reason is that platform devices are created for each job ring:
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
which sets OF_POPULATED on the device node, but then it cleans these up:
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
if (ctrlpriv->jrpdev[ring])
of_device_unregister(ctrlpriv->jrpdev[ring]);
}
which leaves OF_POPULATED set.
Use of_platform_populate / of_platform_depopulate instead.
This allows for a bit of driver clean-up, jrpdev is no longer needed.
Logic changes a bit too:
-exit in case of_platform_populate fails, since currently even QI backend
depends on JR; true, we no longer support the case when "some" of the JR
DT nodes are incorrect
-when cleaning up, caam_remove() would also depopulate RTIC in case
it would have been populated somewhere else - not the case for now
Cc: <stable@vger.kernel.org>
Fixes: 313ea293e9c4d ("crypto: caam - Add Platform driver for Job Ring")
Reported-by: Russell King <rmk+kernel@armlinux.org.uk>
Suggested-by: Rob Herring <robh+dt@kernel.org>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2017-04-03 22:12:04 +07:00
|
|
|
/* Remove platform devices under the crypto node */
|
|
|
|
of_platform_depopulate(ctrldev);
|
2013-09-09 22:56:31 +07:00
|
|
|
|
2017-03-17 17:06:01 +07:00
|
|
|
#ifdef CONFIG_CAAM_QI
|
|
|
|
if (ctrlpriv->qidev)
|
|
|
|
caam_qi_shutdown(ctrlpriv->qidev);
|
|
|
|
#endif
|
|
|
|
|
2017-07-18 22:30:47 +07:00
|
|
|
/*
|
|
|
|
* De-initialize RNG state handles initialized by this driver.
|
|
|
|
* In case of DPAA 2.x, RNG is managed by MC firmware.
|
|
|
|
*/
|
|
|
|
if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
|
2013-09-09 22:56:34 +07:00
|
|
|
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
|
2013-09-09 22:56:32 +07:00
|
|
|
|
2013-09-09 22:56:31 +07:00
|
|
|
/* Shut down debug views */
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
debugfs_remove_recursive(ctrlpriv->dfs_root);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Unmap controller region */
|
2015-06-16 06:52:58 +07:00
|
|
|
iounmap(ctrl);
|
2013-09-09 22:56:31 +07:00
|
|
|
|
2015-08-06 01:28:37 +07:00
|
|
|
/* shut clocks off before finalizing shutdown */
|
|
|
|
clk_disable_unprepare(ctrlpriv->caam_ipg);
|
|
|
|
clk_disable_unprepare(ctrlpriv->caam_mem);
|
|
|
|
clk_disable_unprepare(ctrlpriv->caam_aclk);
|
2016-11-29 00:53:28 +07:00
|
|
|
if (ctrlpriv->caam_emi_slow)
|
2016-10-17 18:28:00 +07:00
|
|
|
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
|
2015-08-13 00:39:38 +07:00
|
|
|
return 0;
|
2012-06-23 07:48:52 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-09-09 22:56:30 +07:00
|
|
|
* kick_trng - sets the various parameters for enabling the initialization
|
|
|
|
* of the RNG4 block in CAAM
|
|
|
|
* @pdev - pointer to the platform device
|
|
|
|
* @ent_delay - Defines the length (in system clocks) of each entropy sample.
|
2012-06-23 07:48:52 +07:00
|
|
|
*/
|
2013-09-09 22:56:30 +07:00
|
|
|
static void kick_trng(struct platform_device *pdev, int ent_delay)
|
2012-06-23 07:48:52 +07:00
|
|
|
{
|
|
|
|
struct device *ctrldev = &pdev->dev;
|
|
|
|
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
|
2014-09-01 16:30:44 +07:00
|
|
|
struct caam_ctrl __iomem *ctrl;
|
2012-06-23 07:48:52 +07:00
|
|
|
struct rng4tst __iomem *r4tst;
|
|
|
|
u32 val;
|
|
|
|
|
2014-09-01 16:30:44 +07:00
|
|
|
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
|
|
|
|
r4tst = &ctrl->r4tst[0];
|
2012-06-23 07:48:52 +07:00
|
|
|
|
|
|
|
/* put RNG4 into program mode */
|
2016-05-19 22:11:26 +07:00
|
|
|
clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
|
2013-09-09 22:56:30 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Performance-wise, it does not make sense to
|
|
|
|
* set the delay to a value that is lower
|
|
|
|
* than the last one that worked (i.e. the state handles
|
|
|
|
* were instantiated properly. Thus, instead of wasting
|
|
|
|
* time trying to set the values controlling the sample
|
|
|
|
* frequency, the function simply returns.
|
|
|
|
*/
|
|
|
|
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
|
|
|
|
>> RTSDCTL_ENT_DLY_SHIFT;
|
2016-11-09 15:46:14 +07:00
|
|
|
if (ent_delay <= val)
|
|
|
|
goto start_rng;
|
2013-09-09 22:56:30 +07:00
|
|
|
|
2012-06-23 07:48:52 +07:00
|
|
|
val = rd_reg32(&r4tst->rtsdctl);
|
2013-09-09 22:56:30 +07:00
|
|
|
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
|
|
|
|
(ent_delay << RTSDCTL_ENT_DLY_SHIFT);
|
2012-06-23 07:48:52 +07:00
|
|
|
wr_reg32(&r4tst->rtsdctl, val);
|
2013-09-09 22:56:30 +07:00
|
|
|
/* min. freq. count, equal to 1/4 of the entropy sample length */
|
|
|
|
wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
|
2014-08-11 15:40:15 +07:00
|
|
|
/* disable maximum frequency count */
|
|
|
|
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
|
2014-08-11 15:40:17 +07:00
|
|
|
/* read the control register */
|
|
|
|
val = rd_reg32(&r4tst->rtmctl);
|
2016-11-09 15:46:14 +07:00
|
|
|
start_rng:
|
2014-08-11 15:40:17 +07:00
|
|
|
/*
|
|
|
|
* select raw sampling in both entropy shifter
|
2016-11-09 15:46:14 +07:00
|
|
|
* and statistical checker; ; put RNG4 into run mode
|
2014-08-11 15:40:17 +07:00
|
|
|
*/
|
2016-11-09 15:46:14 +07:00
|
|
|
clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
|
2012-06-23 07:48:52 +07:00
|
|
|
}
|
|
|
|
|
2012-07-11 10:06:11 +07:00
|
|
|
/**
|
|
|
|
* caam_get_era() - Return the ERA of the SEC on SoC, based
|
2014-02-06 15:27:19 +07:00
|
|
|
* on "sec-era" propery in the DTS. This property is updated by u-boot.
|
2012-07-11 10:06:11 +07:00
|
|
|
**/
|
2014-02-06 15:27:19 +07:00
|
|
|
int caam_get_era(void)
|
2012-07-11 10:06:11 +07:00
|
|
|
{
|
2014-02-06 15:27:19 +07:00
|
|
|
struct device_node *caam_node;
|
2015-07-17 20:54:51 +07:00
|
|
|
int ret;
|
|
|
|
u32 prop;
|
|
|
|
|
|
|
|
caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
|
|
|
ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
|
|
|
|
of_node_put(caam_node);
|
2012-07-11 10:06:11 +07:00
|
|
|
|
remove lots of IS_ERR_VALUE abuses
Most users of IS_ERR_VALUE() in the kernel are wrong, as they
pass an 'int' into a function that takes an 'unsigned long'
argument. This happens to work because the type is sign-extended
on 64-bit architectures before it gets converted into an
unsigned type.
However, anything that passes an 'unsigned short' or 'unsigned int'
argument into IS_ERR_VALUE() is guaranteed to be broken, as are
8-bit integers and types that are wider than 'unsigned long'.
Andrzej Hajda has already fixed a lot of the worst abusers that
were causing actual bugs, but it would be nice to prevent any
users that are not passing 'unsigned long' arguments.
This patch changes all users of IS_ERR_VALUE() that I could find
on 32-bit ARM randconfig builds and x86 allmodconfig. For the
moment, this doesn't change the definition of IS_ERR_VALUE()
because there are probably still architecture specific users
elsewhere.
Almost all the warnings I got are for files that are better off
using 'if (err)' or 'if (err < 0)'.
The only legitimate user I could find that we get a warning for
is the (32-bit only) freescale fman driver, so I did not remove
the IS_ERR_VALUE() there but changed the type to 'unsigned long'.
For 9pfs, I just worked around one user whose calling conventions
are so obscure that I did not dare change the behavior.
I was using this definition for testing:
#define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \
unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO))
which ends up making all 16-bit or wider types work correctly with
the most plausible interpretation of what IS_ERR_VALUE() was supposed
to return according to its users, but also causes a compile-time
warning for any users that do not pass an 'unsigned long' argument.
I suggested this approach earlier this year, but back then we ended
up deciding to just fix the users that are obviously broken. After
the initial warning that caused me to get involved in the discussion
(fs/gfs2/dir.c) showed up again in the mainline kernel, Linus
asked me to send the whole thing again.
[ Updated the 9p parts as per Al Viro - Linus ]
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Andrzej Hajda <a.hajda@samsung.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lkml.org/lkml/2016/1/7/363
Link: https://lkml.org/lkml/2016/5/27/486
Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 04:23:25 +07:00
|
|
|
return ret ? -ENOTSUPP : prop;
|
2012-07-11 10:06:11 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(caam_get_era);
|
|
|
|
|
crypto: caam - fix JR platform device subsequent (re)creations
The way Job Ring platform devices are created and released does not
allow for multiple create-release cycles.
JR0 Platform device creation error
JR0 Platform device creation error
caam 2100000.caam: no queues configured, terminating
caam: probe of 2100000.caam failed with error -12
The reason is that platform devices are created for each job ring:
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
which sets OF_POPULATED on the device node, but then it cleans these up:
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
if (ctrlpriv->jrpdev[ring])
of_device_unregister(ctrlpriv->jrpdev[ring]);
}
which leaves OF_POPULATED set.
Use of_platform_populate / of_platform_depopulate instead.
This allows for a bit of driver clean-up, jrpdev is no longer needed.
Logic changes a bit too:
-exit in case of_platform_populate fails, since currently even QI backend
depends on JR; true, we no longer support the case when "some" of the JR
DT nodes are incorrect
-when cleaning up, caam_remove() would also depopulate RTIC in case
it would have been populated somewhere else - not the case for now
Cc: <stable@vger.kernel.org>
Fixes: 313ea293e9c4d ("crypto: caam - Add Platform driver for Job Ring")
Reported-by: Russell King <rmk+kernel@armlinux.org.uk>
Suggested-by: Rob Herring <robh+dt@kernel.org>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2017-04-03 22:12:04 +07:00
|
|
|
static const struct of_device_id caam_match[] = {
|
|
|
|
{
|
|
|
|
.compatible = "fsl,sec-v4.0",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.compatible = "fsl,sec4.0",
|
|
|
|
},
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, caam_match);
|
|
|
|
|
2011-03-13 15:54:26 +07:00
|
|
|
/* Probe routine for CAAM top (controller) level */
|
2011-05-15 10:07:55 +07:00
|
|
|
static int caam_probe(struct platform_device *pdev)
|
2011-03-13 15:54:26 +07:00
|
|
|
{
|
crypto: caam - fix JR platform device subsequent (re)creations
The way Job Ring platform devices are created and released does not
allow for multiple create-release cycles.
JR0 Platform device creation error
JR0 Platform device creation error
caam 2100000.caam: no queues configured, terminating
caam: probe of 2100000.caam failed with error -12
The reason is that platform devices are created for each job ring:
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
which sets OF_POPULATED on the device node, but then it cleans these up:
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
if (ctrlpriv->jrpdev[ring])
of_device_unregister(ctrlpriv->jrpdev[ring]);
}
which leaves OF_POPULATED set.
Use of_platform_populate / of_platform_depopulate instead.
This allows for a bit of driver clean-up, jrpdev is no longer needed.
Logic changes a bit too:
-exit in case of_platform_populate fails, since currently even QI backend
depends on JR; true, we no longer support the case when "some" of the JR
DT nodes are incorrect
-when cleaning up, caam_remove() would also depopulate RTIC in case
it would have been populated somewhere else - not the case for now
Cc: <stable@vger.kernel.org>
Fixes: 313ea293e9c4d ("crypto: caam - Add Platform driver for Job Ring")
Reported-by: Russell King <rmk+kernel@armlinux.org.uk>
Suggested-by: Rob Herring <robh+dt@kernel.org>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2017-04-03 22:12:04 +07:00
|
|
|
int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
|
2012-07-11 10:06:11 +07:00
|
|
|
u64 caam_id;
|
2017-09-01 21:12:59 +07:00
|
|
|
static const struct soc_device_attribute imx_soc[] = {
|
|
|
|
{.family = "Freescale i.MX"},
|
|
|
|
{},
|
|
|
|
};
|
2011-03-13 15:54:26 +07:00
|
|
|
struct device *dev;
|
|
|
|
struct device_node *nprop, *np;
|
|
|
|
struct caam_ctrl __iomem *ctrl;
|
|
|
|
struct caam_drv_private *ctrlpriv;
|
2015-08-06 01:28:37 +07:00
|
|
|
struct clk *clk;
|
2011-06-06 04:42:54 +07:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
struct caam_perfmon *perfmon;
|
|
|
|
#endif
|
2014-06-23 19:12:33 +07:00
|
|
|
u32 scfgr, comp_params;
|
2014-06-23 16:38:28 +07:00
|
|
|
u32 cha_vid_ls;
|
2014-09-01 16:30:44 +07:00
|
|
|
int pg_size;
|
|
|
|
int BLOCK_OFFSET = 0;
|
2011-03-13 15:54:26 +07:00
|
|
|
|
2015-08-21 23:52:00 +07:00
|
|
|
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
|
2011-03-13 15:54:26 +07:00
|
|
|
if (!ctrlpriv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dev = &pdev->dev;
|
|
|
|
dev_set_drvdata(dev, ctrlpriv);
|
|
|
|
nprop = pdev->dev.of_node;
|
|
|
|
|
2017-09-01 21:12:59 +07:00
|
|
|
caam_imx = (bool)soc_device_match(imx_soc);
|
|
|
|
|
2015-08-06 01:28:37 +07:00
|
|
|
/* Enable clocking */
|
|
|
|
clk = caam_drv_identify_clk(&pdev->dev, "ipg");
|
|
|
|
if (IS_ERR(clk)) {
|
|
|
|
ret = PTR_ERR(clk);
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"can't identify CAAM ipg clk: %d\n", ret);
|
2015-08-21 23:51:59 +07:00
|
|
|
return ret;
|
2015-08-06 01:28:37 +07:00
|
|
|
}
|
|
|
|
ctrlpriv->caam_ipg = clk;
|
|
|
|
|
|
|
|
clk = caam_drv_identify_clk(&pdev->dev, "mem");
|
|
|
|
if (IS_ERR(clk)) {
|
|
|
|
ret = PTR_ERR(clk);
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"can't identify CAAM mem clk: %d\n", ret);
|
2015-08-21 23:51:59 +07:00
|
|
|
return ret;
|
2015-08-06 01:28:37 +07:00
|
|
|
}
|
|
|
|
ctrlpriv->caam_mem = clk;
|
|
|
|
|
|
|
|
clk = caam_drv_identify_clk(&pdev->dev, "aclk");
|
|
|
|
if (IS_ERR(clk)) {
|
|
|
|
ret = PTR_ERR(clk);
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"can't identify CAAM aclk clk: %d\n", ret);
|
2015-08-21 23:51:59 +07:00
|
|
|
return ret;
|
2015-08-06 01:28:37 +07:00
|
|
|
}
|
|
|
|
ctrlpriv->caam_aclk = clk;
|
|
|
|
|
2016-10-17 18:28:00 +07:00
|
|
|
if (!of_machine_is_compatible("fsl,imx6ul")) {
|
|
|
|
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
|
|
|
|
if (IS_ERR(clk)) {
|
|
|
|
ret = PTR_ERR(clk);
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"can't identify CAAM emi_slow clk: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ctrlpriv->caam_emi_slow = clk;
|
2015-08-06 01:28:37 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
|
2015-08-21 23:51:58 +07:00
|
|
|
return ret;
|
2015-08-06 01:28:37 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = clk_prepare_enable(ctrlpriv->caam_mem);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
|
|
|
|
ret);
|
2015-08-21 23:51:58 +07:00
|
|
|
goto disable_caam_ipg;
|
2015-08-06 01:28:37 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = clk_prepare_enable(ctrlpriv->caam_aclk);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
|
2015-08-21 23:51:58 +07:00
|
|
|
goto disable_caam_mem;
|
2015-08-06 01:28:37 +07:00
|
|
|
}
|
|
|
|
|
2016-11-29 00:53:28 +07:00
|
|
|
if (ctrlpriv->caam_emi_slow) {
|
2016-10-17 18:28:00 +07:00
|
|
|
ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
|
|
|
|
ret);
|
|
|
|
goto disable_caam_aclk;
|
|
|
|
}
|
2015-08-06 01:28:37 +07:00
|
|
|
}
|
|
|
|
|
2011-03-13 15:54:26 +07:00
|
|
|
/* Get configuration properties from device tree */
|
|
|
|
/* First, get register page */
|
|
|
|
ctrl = of_iomap(nprop, 0);
|
|
|
|
if (ctrl == NULL) {
|
|
|
|
dev_err(dev, "caam: of_iomap() failed\n");
|
2015-08-21 23:51:58 +07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto disable_caam_emi_slow;
|
2011-03-13 15:54:26 +07:00
|
|
|
}
|
2016-05-19 22:11:26 +07:00
|
|
|
|
|
|
|
caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
|
|
|
|
(CSTA_PLEND | CSTA_ALT_PLEND));
|
|
|
|
|
2014-09-01 16:30:44 +07:00
|
|
|
/* Finding the page size for using the CTPR_MS register */
|
|
|
|
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
|
|
|
|
pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
|
2011-03-13 15:54:26 +07:00
|
|
|
|
2014-09-01 16:30:44 +07:00
|
|
|
/* Allocating the BLOCK_OFFSET based on the supported page size on
|
|
|
|
* the platform
|
|
|
|
*/
|
|
|
|
if (pg_size == 0)
|
|
|
|
BLOCK_OFFSET = PG_SIZE_4K;
|
|
|
|
else
|
|
|
|
BLOCK_OFFSET = PG_SIZE_64K;
|
|
|
|
|
2016-11-09 15:46:14 +07:00
|
|
|
ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
|
|
|
|
ctrlpriv->assure = (struct caam_assurance __iomem __force *)
|
|
|
|
((__force uint8_t *)ctrl +
|
2014-09-01 16:30:44 +07:00
|
|
|
BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
|
|
|
|
);
|
2016-11-09 15:46:14 +07:00
|
|
|
ctrlpriv->deco = (struct caam_deco __iomem __force *)
|
|
|
|
((__force uint8_t *)ctrl +
|
2014-09-01 16:30:44 +07:00
|
|
|
BLOCK_OFFSET * DECO_BLOCK_NUMBER
|
|
|
|
);
|
2011-03-13 15:54:26 +07:00
|
|
|
|
|
|
|
/* Get the IRQ of the controller (for security violations only) */
|
2013-09-18 20:24:44 +07:00
|
|
|
ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
|
2011-03-13 15:54:26 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
|
2017-07-18 22:30:47 +07:00
|
|
|
* long pointers in master configuration register.
|
|
|
|
* In case of DPAA 2.x, Management Complex firmware performs
|
|
|
|
* the configuration.
|
2011-03-13 15:54:26 +07:00
|
|
|
*/
|
2017-07-18 22:30:47 +07:00
|
|
|
caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
|
|
|
|
if (!caam_dpaa2)
|
|
|
|
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
|
|
|
|
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
|
|
|
|
MCFGR_WDENABLE | MCFGR_LARGE_BURST |
|
|
|
|
(sizeof(dma_addr_t) == sizeof(u64) ?
|
|
|
|
MCFGR_LONG_PTR : 0));
|
2011-03-13 15:54:26 +07:00
|
|
|
|
2014-06-23 19:12:33 +07:00
|
|
|
/*
|
|
|
|
* Read the Compile Time paramters and SCFGR to determine
|
|
|
|
* if Virtualization is enabled for this platform
|
|
|
|
*/
|
2014-09-01 16:30:44 +07:00
|
|
|
scfgr = rd_reg32(&ctrl->scfgr);
|
2014-06-23 19:12:33 +07:00
|
|
|
|
|
|
|
ctrlpriv->virt_en = 0;
|
|
|
|
if (comp_params & CTPR_MS_VIRT_EN_INCL) {
|
|
|
|
/* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
|
|
|
|
* VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
|
|
|
|
*/
|
|
|
|
if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
|
|
|
|
(!(comp_params & CTPR_MS_VIRT_EN_POR) &&
|
|
|
|
(scfgr & SCFGR_VIRT_EN)))
|
|
|
|
ctrlpriv->virt_en = 1;
|
|
|
|
} else {
|
|
|
|
/* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
|
|
|
|
if (comp_params & CTPR_MS_VIRT_EN_POR)
|
|
|
|
ctrlpriv->virt_en = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrlpriv->virt_en == 1)
|
2016-05-19 22:11:26 +07:00
|
|
|
clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
|
|
|
|
JRSTART_JR1_START | JRSTART_JR2_START |
|
|
|
|
JRSTART_JR3_START);
|
2014-06-23 19:12:33 +07:00
|
|
|
|
2017-02-10 19:07:15 +07:00
|
|
|
if (sizeof(dma_addr_t) == sizeof(u64)) {
|
2017-07-18 22:30:47 +07:00
|
|
|
if (caam_dpaa2)
|
|
|
|
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
|
|
|
|
else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
|
2017-02-10 19:07:15 +07:00
|
|
|
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
|
2012-06-23 07:48:51 +07:00
|
|
|
else
|
2017-02-10 19:07:15 +07:00
|
|
|
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
|
|
|
|
} else {
|
|
|
|
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
|
|
|
|
goto iounmap_ctrl;
|
|
|
|
}
|
2011-03-13 15:54:26 +07:00
|
|
|
|
2017-12-19 17:16:06 +07:00
|
|
|
ctrlpriv->era = caam_get_era();
|
|
|
|
|
crypto: caam - fix JR platform device subsequent (re)creations
The way Job Ring platform devices are created and released does not
allow for multiple create-release cycles.
JR0 Platform device creation error
JR0 Platform device creation error
caam 2100000.caam: no queues configured, terminating
caam: probe of 2100000.caam failed with error -12
The reason is that platform devices are created for each job ring:
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
which sets OF_POPULATED on the device node, but then it cleans these up:
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
if (ctrlpriv->jrpdev[ring])
of_device_unregister(ctrlpriv->jrpdev[ring]);
}
which leaves OF_POPULATED set.
Use of_platform_populate / of_platform_depopulate instead.
This allows for a bit of driver clean-up, jrpdev is no longer needed.
Logic changes a bit too:
-exit in case of_platform_populate fails, since currently even QI backend
depends on JR; true, we no longer support the case when "some" of the JR
DT nodes are incorrect
-when cleaning up, caam_remove() would also depopulate RTIC in case
it would have been populated somewhere else - not the case for now
Cc: <stable@vger.kernel.org>
Fixes: 313ea293e9c4d ("crypto: caam - Add Platform driver for Job Ring")
Reported-by: Russell King <rmk+kernel@armlinux.org.uk>
Suggested-by: Rob Herring <robh+dt@kernel.org>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2017-04-03 22:12:04 +07:00
|
|
|
ret = of_platform_populate(nprop, caam_match, NULL, dev);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "JR platform devices creation error\n");
|
2015-08-21 23:51:58 +07:00
|
|
|
goto iounmap_ctrl;
|
2011-03-13 15:54:26 +07:00
|
|
|
}
|
|
|
|
|
2017-03-17 17:06:01 +07:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
/*
|
|
|
|
* FIXME: needs better naming distinction, as some amalgamation of
|
|
|
|
* "caam" and nprop->full_name. The OF name isn't distinctive,
|
|
|
|
* but does separate instances
|
|
|
|
*/
|
|
|
|
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
|
|
|
|
|
|
|
|
ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
|
|
|
|
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
|
|
|
|
#endif
|
2017-04-05 20:57:07 +07:00
|
|
|
|
2011-03-13 15:54:26 +07:00
|
|
|
ring = 0;
|
2014-02-09 08:59:13 +07:00
|
|
|
for_each_available_child_of_node(nprop, np)
|
|
|
|
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
|
|
|
|
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
|
2016-11-09 15:46:14 +07:00
|
|
|
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
|
|
|
|
((__force uint8_t *)ctrl +
|
crypto: caam - fix JR platform device subsequent (re)creations
The way Job Ring platform devices are created and released does not
allow for multiple create-release cycles.
JR0 Platform device creation error
JR0 Platform device creation error
caam 2100000.caam: no queues configured, terminating
caam: probe of 2100000.caam failed with error -12
The reason is that platform devices are created for each job ring:
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
which sets OF_POPULATED on the device node, but then it cleans these up:
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
if (ctrlpriv->jrpdev[ring])
of_device_unregister(ctrlpriv->jrpdev[ring]);
}
which leaves OF_POPULATED set.
Use of_platform_populate / of_platform_depopulate instead.
This allows for a bit of driver clean-up, jrpdev is no longer needed.
Logic changes a bit too:
-exit in case of_platform_populate fails, since currently even QI backend
depends on JR; true, we no longer support the case when "some" of the JR
DT nodes are incorrect
-when cleaning up, caam_remove() would also depopulate RTIC in case
it would have been populated somewhere else - not the case for now
Cc: <stable@vger.kernel.org>
Fixes: 313ea293e9c4d ("crypto: caam - Add Platform driver for Job Ring")
Reported-by: Russell King <rmk+kernel@armlinux.org.uk>
Suggested-by: Rob Herring <robh+dt@kernel.org>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2017-04-03 22:12:04 +07:00
|
|
|
(ring + JR_BLOCK_NUMBER) *
|
2014-09-01 16:30:44 +07:00
|
|
|
BLOCK_OFFSET
|
|
|
|
);
|
2012-03-21 13:09:10 +07:00
|
|
|
ctrlpriv->total_jobrs++;
|
|
|
|
ring++;
|
crypto: caam - fix JR platform device subsequent (re)creations
The way Job Ring platform devices are created and released does not
allow for multiple create-release cycles.
JR0 Platform device creation error
JR0 Platform device creation error
caam 2100000.caam: no queues configured, terminating
caam: probe of 2100000.caam failed with error -12
The reason is that platform devices are created for each job ring:
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
which sets OF_POPULATED on the device node, but then it cleans these up:
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
if (ctrlpriv->jrpdev[ring])
of_device_unregister(ctrlpriv->jrpdev[ring]);
}
which leaves OF_POPULATED set.
Use of_platform_populate / of_platform_depopulate instead.
This allows for a bit of driver clean-up, jrpdev is no longer needed.
Logic changes a bit too:
-exit in case of_platform_populate fails, since currently even QI backend
depends on JR; true, we no longer support the case when "some" of the JR
DT nodes are incorrect
-when cleaning up, caam_remove() would also depopulate RTIC in case
it would have been populated somewhere else - not the case for now
Cc: <stable@vger.kernel.org>
Fixes: 313ea293e9c4d ("crypto: caam - Add Platform driver for Job Ring")
Reported-by: Russell King <rmk+kernel@armlinux.org.uk>
Suggested-by: Rob Herring <robh+dt@kernel.org>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2017-04-03 22:12:04 +07:00
|
|
|
}
|
2011-03-13 15:54:26 +07:00
|
|
|
|
2017-07-18 22:30:47 +07:00
|
|
|
/* Check to see if (DPAA 1.x) QI present. If so, enable */
|
|
|
|
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
|
|
|
|
if (ctrlpriv->qi_present && !caam_dpaa2) {
|
2016-11-09 15:46:14 +07:00
|
|
|
ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
|
|
|
|
((__force uint8_t *)ctrl +
|
2014-09-01 16:30:44 +07:00
|
|
|
BLOCK_OFFSET * QI_BLOCK_NUMBER
|
|
|
|
);
|
2011-03-13 15:54:26 +07:00
|
|
|
/* This is all that's required to physically enable QI */
|
2014-09-01 16:30:44 +07:00
|
|
|
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
|
2017-03-17 17:06:01 +07:00
|
|
|
|
|
|
|
/* If QMAN driver is present, init CAAM-QI backend */
|
|
|
|
#ifdef CONFIG_CAAM_QI
|
|
|
|
ret = caam_qi_init(pdev);
|
|
|
|
if (ret)
|
|
|
|
dev_err(dev, "caam qi i/f init failed: %d\n", ret);
|
|
|
|
#endif
|
2011-03-13 15:54:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If no QI and no rings specified, quit and go home */
|
|
|
|
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
|
|
|
|
dev_err(dev, "no queues configured, terminating\n");
|
2015-08-21 23:51:58 +07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto caam_remove;
|
2011-03-13 15:54:26 +07:00
|
|
|
}
|
|
|
|
|
2014-09-01 16:30:44 +07:00
|
|
|
cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
|
2013-04-26 17:14:54 +07:00
|
|
|
|
2012-06-23 07:48:52 +07:00
|
|
|
/*
|
2013-04-26 17:14:54 +07:00
|
|
|
* If SEC has RNG version >= 4 and RNG state handle has not been
|
2013-09-09 22:56:30 +07:00
|
|
|
* already instantiated, do RNG instantiation
|
2017-07-18 22:30:47 +07:00
|
|
|
* In case of DPAA 2.x, RNG is managed by MC firmware.
|
2012-06-23 07:48:52 +07:00
|
|
|
*/
|
2017-07-18 22:30:47 +07:00
|
|
|
if (!caam_dpaa2 &&
|
|
|
|
(cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
|
2013-09-09 22:56:34 +07:00
|
|
|
ctrlpriv->rng4_sh_init =
|
2014-09-01 16:30:44 +07:00
|
|
|
rd_reg32(&ctrl->r4tst[0].rdsta);
|
2013-09-09 22:56:34 +07:00
|
|
|
/*
|
|
|
|
* If the secure keys (TDKEK, JDKEK, TDSK), were already
|
|
|
|
* generated, signal this to the function that is instantiating
|
|
|
|
* the state handles. An error would occur if RNG4 attempts
|
|
|
|
* to regenerate these keys before the next POR.
|
|
|
|
*/
|
|
|
|
gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
|
|
|
|
ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
|
2013-09-09 22:56:30 +07:00
|
|
|
do {
|
2013-09-09 22:56:34 +07:00
|
|
|
int inst_handles =
|
2014-09-01 16:30:44 +07:00
|
|
|
rd_reg32(&ctrl->r4tst[0].rdsta) &
|
2013-09-09 22:56:34 +07:00
|
|
|
RDSTA_IFMASK;
|
|
|
|
/*
|
|
|
|
* If either SH were instantiated by somebody else
|
|
|
|
* (e.g. u-boot) then it is assumed that the entropy
|
|
|
|
* parameters are properly set and thus the function
|
|
|
|
* setting these (kick_trng(...)) is skipped.
|
|
|
|
* Also, if a handle was instantiated, do not change
|
|
|
|
* the TRNG parameters.
|
|
|
|
*/
|
|
|
|
if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
|
2014-08-11 15:40:16 +07:00
|
|
|
dev_info(dev,
|
|
|
|
"Entropy delay = %u\n",
|
|
|
|
ent_delay);
|
2013-09-09 22:56:34 +07:00
|
|
|
kick_trng(pdev, ent_delay);
|
|
|
|
ent_delay += 400;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* if instantiate_rng(...) fails, the loop will rerun
|
|
|
|
* and the kick_trng(...) function will modfiy the
|
|
|
|
* upper and lower limits of the entropy sampling
|
|
|
|
* interval, leading to a sucessful initialization of
|
|
|
|
* the RNG.
|
|
|
|
*/
|
|
|
|
ret = instantiate_rng(dev, inst_handles,
|
|
|
|
gen_sk);
|
2014-08-11 15:40:16 +07:00
|
|
|
if (ret == -EAGAIN)
|
|
|
|
/*
|
|
|
|
* if here, the loop will rerun,
|
|
|
|
* so don't hog the CPU
|
|
|
|
*/
|
|
|
|
cpu_relax();
|
2013-09-09 22:56:31 +07:00
|
|
|
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
|
2012-06-23 07:48:52 +07:00
|
|
|
if (ret) {
|
2013-09-09 22:56:30 +07:00
|
|
|
dev_err(dev, "failed to instantiate RNG");
|
2015-08-21 23:51:58 +07:00
|
|
|
goto caam_remove;
|
2012-06-23 07:48:52 +07:00
|
|
|
}
|
2013-09-09 22:56:34 +07:00
|
|
|
/*
|
|
|
|
* Set handles init'ed by this module as the complement of the
|
|
|
|
* already initialized ones
|
|
|
|
*/
|
|
|
|
ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
|
2013-03-12 15:25:21 +07:00
|
|
|
|
|
|
|
/* Enable RDB bit so that RNG works faster */
|
2016-05-19 22:11:26 +07:00
|
|
|
clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
|
2012-06-23 07:48:52 +07:00
|
|
|
}
|
|
|
|
|
2011-03-13 15:54:26 +07:00
|
|
|
/* NOTE: RTIC detection ought to go here, around Si time */
|
|
|
|
|
2014-09-01 16:30:44 +07:00
|
|
|
caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
|
|
|
|
(u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
|
2012-07-11 10:06:11 +07:00
|
|
|
|
2011-03-13 15:54:26 +07:00
|
|
|
/* Report "alive" for developer to see */
|
2012-07-11 10:06:11 +07:00
|
|
|
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
|
2017-12-19 17:16:06 +07:00
|
|
|
ctrlpriv->era);
|
2017-07-18 22:30:47 +07:00
|
|
|
dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
|
|
|
|
ctrlpriv->total_jobrs, ctrlpriv->qi_present,
|
|
|
|
caam_dpaa2 ? "yes" : "no");
|
2011-03-13 15:54:26 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
2017-08-01 20:45:01 +07:00
|
|
|
debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->req_dequeued,
|
|
|
|
&caam_fops_u64_ro);
|
|
|
|
debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->ob_enc_req,
|
|
|
|
&caam_fops_u64_ro);
|
|
|
|
debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->ib_dec_req,
|
|
|
|
&caam_fops_u64_ro);
|
|
|
|
debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->ob_enc_bytes,
|
|
|
|
&caam_fops_u64_ro);
|
|
|
|
debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->ob_prot_bytes,
|
|
|
|
&caam_fops_u64_ro);
|
|
|
|
debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->ib_dec_bytes,
|
|
|
|
&caam_fops_u64_ro);
|
|
|
|
debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->ib_valid_bytes,
|
|
|
|
&caam_fops_u64_ro);
|
2011-03-13 15:54:26 +07:00
|
|
|
|
|
|
|
/* Controller level - global status values */
|
2017-08-01 20:45:01 +07:00
|
|
|
debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->faultaddr,
|
|
|
|
&caam_fops_u32_ro);
|
|
|
|
debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->faultdetail,
|
|
|
|
&caam_fops_u32_ro);
|
|
|
|
debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl, &perfmon->status,
|
|
|
|
&caam_fops_u32_ro);
|
2011-03-13 15:54:26 +07:00
|
|
|
|
|
|
|
/* Internal covering keys (useful in non-secure mode only) */
|
2016-11-09 15:46:14 +07:00
|
|
|
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
|
2011-03-13 15:54:26 +07:00
|
|
|
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
|
|
|
|
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
|
2011-07-24 15:32:53 +07:00
|
|
|
S_IRUSR |
|
2011-03-13 15:54:26 +07:00
|
|
|
S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl,
|
|
|
|
&ctrlpriv->ctl_kek_wrap);
|
|
|
|
|
2016-11-09 15:46:14 +07:00
|
|
|
ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
|
2011-03-13 15:54:26 +07:00
|
|
|
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
|
|
|
|
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
|
2011-07-24 15:32:53 +07:00
|
|
|
S_IRUSR |
|
2011-03-13 15:54:26 +07:00
|
|
|
S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl,
|
|
|
|
&ctrlpriv->ctl_tkek_wrap);
|
|
|
|
|
2016-11-09 15:46:14 +07:00
|
|
|
ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
|
2011-03-13 15:54:26 +07:00
|
|
|
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
|
|
|
|
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
|
2011-07-24 15:32:53 +07:00
|
|
|
S_IRUSR |
|
2011-03-13 15:54:26 +07:00
|
|
|
S_IRGRP | S_IROTH,
|
|
|
|
ctrlpriv->ctl,
|
|
|
|
&ctrlpriv->ctl_tdsk_wrap);
|
|
|
|
#endif
|
|
|
|
return 0;
|
2015-08-21 23:51:58 +07:00
|
|
|
|
|
|
|
caam_remove:
|
2017-03-17 17:06:01 +07:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
debugfs_remove_recursive(ctrlpriv->dfs_root);
|
|
|
|
#endif
|
2015-08-21 23:51:58 +07:00
|
|
|
caam_remove(pdev);
|
2016-08-09 14:30:10 +07:00
|
|
|
return ret;
|
|
|
|
|
2015-08-21 23:51:58 +07:00
|
|
|
iounmap_ctrl:
|
|
|
|
iounmap(ctrl);
|
|
|
|
disable_caam_emi_slow:
|
2016-11-29 00:53:28 +07:00
|
|
|
if (ctrlpriv->caam_emi_slow)
|
2016-10-17 18:28:00 +07:00
|
|
|
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
|
2015-08-21 23:51:58 +07:00
|
|
|
disable_caam_aclk:
|
|
|
|
clk_disable_unprepare(ctrlpriv->caam_aclk);
|
|
|
|
disable_caam_mem:
|
|
|
|
clk_disable_unprepare(ctrlpriv->caam_mem);
|
|
|
|
disable_caam_ipg:
|
|
|
|
clk_disable_unprepare(ctrlpriv->caam_ipg);
|
|
|
|
return ret;
|
2011-03-13 15:54:26 +07:00
|
|
|
}
|
|
|
|
|
2011-05-15 10:07:55 +07:00
|
|
|
static struct platform_driver caam_driver = {
|
2011-03-13 15:54:26 +07:00
|
|
|
.driver = {
|
|
|
|
.name = "caam",
|
|
|
|
.of_match_table = caam_match,
|
|
|
|
},
|
|
|
|
.probe = caam_probe,
|
2012-12-22 04:14:09 +07:00
|
|
|
.remove = caam_remove,
|
2011-03-13 15:54:26 +07:00
|
|
|
};
|
|
|
|
|
2011-11-26 20:26:19 +07:00
|
|
|
module_platform_driver(caam_driver);
|
2011-03-13 15:54:26 +07:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_DESCRIPTION("FSL CAAM request backend");
|
|
|
|
MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
|