mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 01:10:54 +07:00
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 5.3: API: - Test shash interface directly in testmgr - cra_driver_name is now mandatory Algorithms: - Replace arc4 crypto_cipher with library helper - Implement 5 way interleave for ECB, CBC and CTR on arm64 - Add xxhash - Add continuous self-test on noise source to drbg - Update jitter RNG Drivers: - Add support for SHA204A random number generator - Add support for 7211 in iproc-rng200 - Fix fuzz test failures in inside-secure - Fix fuzz test failures in talitos - Fix fuzz test failures in qat" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits) crypto: stm32/hash - remove interruptible condition for dma crypto: stm32/hash - Fix hmac issue more than 256 bytes crypto: stm32/crc32 - rename driver file crypto: amcc - remove memset after dma_alloc_coherent crypto: ccp - Switch to SPDX license identifiers crypto: ccp - Validate the the error value used to index error messages crypto: doc - Fix formatting of new crypto engine content crypto: doc - Add parameter documentation crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR crypto: arm64/aes-ce - add 5 way interleave routines crypto: talitos - drop icv_ool crypto: talitos - fix hash on SEC1. crypto: talitos - move struct talitos_edesc into talitos.h lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE crypto/NX: Set receive window credits to max number of CRBs in RxFIFO crypto: asymmetric_keys - select CRYPTO_HASH where needed crypto: serpent - mark __serpent_setkey_sbox noinline crypto: testmgr - dynamically allocate crypto_shash crypto: testmgr - dynamically allocate testvec_config crypto: talitos - eliminate unneeded 'done' functions at build time ...
This commit is contained in:
commit
4d2fa8b44b
@ -4,111 +4,89 @@ Code Examples
|
|||||||
Code Example For Symmetric Key Cipher Operation
|
Code Example For Symmetric Key Cipher Operation
|
||||||
-----------------------------------------------
|
-----------------------------------------------
|
||||||
|
|
||||||
|
This code encrypts some data with AES-256-XTS. For sake of example,
|
||||||
|
all inputs are random bytes, the encryption is done in-place, and it's
|
||||||
|
assumed the code is running in a context where it can sleep.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
|
|
||||||
/* tie all data structures together */
|
|
||||||
struct skcipher_def {
|
|
||||||
struct scatterlist sg;
|
|
||||||
struct crypto_skcipher *tfm;
|
|
||||||
struct skcipher_request *req;
|
|
||||||
struct crypto_wait wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Perform cipher operation */
|
|
||||||
static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
|
|
||||||
int enc)
|
|
||||||
{
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
if (enc)
|
|
||||||
rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait);
|
|
||||||
else
|
|
||||||
rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait);
|
|
||||||
|
|
||||||
if (rc)
|
|
||||||
pr_info("skcipher encrypt returned with result %d\n", rc);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize and trigger cipher operation */
|
|
||||||
static int test_skcipher(void)
|
static int test_skcipher(void)
|
||||||
{
|
{
|
||||||
struct skcipher_def sk;
|
struct crypto_skcipher *tfm = NULL;
|
||||||
struct crypto_skcipher *skcipher = NULL;
|
struct skcipher_request *req = NULL;
|
||||||
struct skcipher_request *req = NULL;
|
u8 *data = NULL;
|
||||||
char *scratchpad = NULL;
|
const size_t datasize = 512; /* data size in bytes */
|
||||||
char *ivdata = NULL;
|
struct scatterlist sg;
|
||||||
unsigned char key[32];
|
DECLARE_CRYPTO_WAIT(wait);
|
||||||
int ret = -EFAULT;
|
u8 iv[16]; /* AES-256-XTS takes a 16-byte IV */
|
||||||
|
u8 key[64]; /* AES-256-XTS takes a 64-byte key */
|
||||||
|
int err;
|
||||||
|
|
||||||
skcipher = crypto_alloc_skcipher("cbc-aes-aesni", 0, 0);
|
/*
|
||||||
if (IS_ERR(skcipher)) {
|
* Allocate a tfm (a transformation object) and set the key.
|
||||||
pr_info("could not allocate skcipher handle\n");
|
*
|
||||||
return PTR_ERR(skcipher);
|
* In real-world use, a tfm and key are typically used for many
|
||||||
}
|
* encryption/decryption operations. But in this example, we'll just do a
|
||||||
|
* single encryption operation with it (which is not very efficient).
|
||||||
|
*/
|
||||||
|
|
||||||
req = skcipher_request_alloc(skcipher, GFP_KERNEL);
|
tfm = crypto_alloc_skcipher("xts(aes)", 0, 0);
|
||||||
if (!req) {
|
if (IS_ERR(tfm)) {
|
||||||
pr_info("could not allocate skcipher request\n");
|
pr_err("Error allocating xts(aes) handle: %ld\n", PTR_ERR(tfm));
|
||||||
ret = -ENOMEM;
|
return PTR_ERR(tfm);
|
||||||
goto out;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
get_random_bytes(key, sizeof(key));
|
||||||
crypto_req_done,
|
err = crypto_skcipher_setkey(tfm, key, sizeof(key));
|
||||||
&sk.wait);
|
if (err) {
|
||||||
|
pr_err("Error setting key: %d\n", err);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* AES 256 with random key */
|
/* Allocate a request object */
|
||||||
get_random_bytes(&key, 32);
|
req = skcipher_request_alloc(tfm, GFP_KERNEL);
|
||||||
if (crypto_skcipher_setkey(skcipher, key, 32)) {
|
if (!req) {
|
||||||
pr_info("key could not be set\n");
|
err = -ENOMEM;
|
||||||
ret = -EAGAIN;
|
goto out;
|
||||||
goto out;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* IV will be random */
|
/* Prepare the input data */
|
||||||
ivdata = kmalloc(16, GFP_KERNEL);
|
data = kmalloc(datasize, GFP_KERNEL);
|
||||||
if (!ivdata) {
|
if (!data) {
|
||||||
pr_info("could not allocate ivdata\n");
|
err = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
get_random_bytes(ivdata, 16);
|
get_random_bytes(data, datasize);
|
||||||
|
|
||||||
/* Input data will be random */
|
/* Initialize the IV */
|
||||||
scratchpad = kmalloc(16, GFP_KERNEL);
|
get_random_bytes(iv, sizeof(iv));
|
||||||
if (!scratchpad) {
|
|
||||||
pr_info("could not allocate scratchpad\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
get_random_bytes(scratchpad, 16);
|
|
||||||
|
|
||||||
sk.tfm = skcipher;
|
/*
|
||||||
sk.req = req;
|
* Encrypt the data in-place.
|
||||||
|
*
|
||||||
/* We encrypt one block */
|
* For simplicity, in this example we wait for the request to complete
|
||||||
sg_init_one(&sk.sg, scratchpad, 16);
|
* before proceeding, even if the underlying implementation is asynchronous.
|
||||||
skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata);
|
*
|
||||||
crypto_init_wait(&sk.wait);
|
* To decrypt instead of encrypt, just change crypto_skcipher_encrypt() to
|
||||||
|
* crypto_skcipher_decrypt().
|
||||||
/* encrypt data */
|
*/
|
||||||
ret = test_skcipher_encdec(&sk, 1);
|
sg_init_one(&sg, data, datasize);
|
||||||
if (ret)
|
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||||
goto out;
|
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||||
|
crypto_req_done, &wait);
|
||||||
pr_info("Encryption triggered successfully\n");
|
skcipher_request_set_crypt(req, &sg, &sg, datasize, iv);
|
||||||
|
err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
|
||||||
|
if (err) {
|
||||||
|
pr_err("Error encrypting data: %d\n", err);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_debug("Encryption was successful\n");
|
||||||
out:
|
out:
|
||||||
if (skcipher)
|
crypto_free_skcipher(tfm);
|
||||||
crypto_free_skcipher(skcipher);
|
|
||||||
if (req)
|
|
||||||
skcipher_request_free(req);
|
skcipher_request_free(req);
|
||||||
if (ivdata)
|
kfree(data);
|
||||||
kfree(ivdata);
|
return err;
|
||||||
if (scratchpad)
|
|
||||||
kfree(scratchpad);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ Block Cipher Algorithm Definitions
|
|||||||
:doc: Block Cipher Algorithm Definitions
|
:doc: Block Cipher Algorithm Definitions
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/crypto.h
|
.. kernel-doc:: include/linux/crypto.h
|
||||||
:functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg
|
:functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg compress_alg
|
||||||
|
|
||||||
Symmetric Key Cipher API
|
Symmetric Key Cipher API
|
||||||
------------------------
|
------------------------
|
||||||
|
@ -208,9 +208,7 @@ the aforementioned cipher types:
|
|||||||
- CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as
|
- CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as
|
||||||
an ECDH or DH implementation
|
an ECDH or DH implementation
|
||||||
|
|
||||||
- CRYPTO_ALG_TYPE_DIGEST Raw message digest
|
- CRYPTO_ALG_TYPE_HASH Raw message digest
|
||||||
|
|
||||||
- CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST
|
|
||||||
|
|
||||||
- CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash
|
- CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash
|
||||||
|
|
||||||
|
@ -1,50 +1,85 @@
|
|||||||
=============
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
CRYPTO ENGINE
|
Crypto Engine
|
||||||
=============
|
=============
|
||||||
|
|
||||||
Overview
|
Overview
|
||||||
--------
|
--------
|
||||||
The crypto engine API (CE), is a crypto queue manager.
|
The crypto engine (CE) API is a crypto queue manager.
|
||||||
|
|
||||||
Requirement
|
Requirement
|
||||||
-----------
|
-----------
|
||||||
You have to put at start of your tfm_ctx the struct crypto_engine_ctx::
|
You must put, at the start of your transform context your_tfm_ctx, the structure
|
||||||
|
crypto_engine:
|
||||||
|
|
||||||
struct your_tfm_ctx {
|
::
|
||||||
struct crypto_engine_ctx enginectx;
|
|
||||||
...
|
|
||||||
};
|
|
||||||
|
|
||||||
Why: Since CE manage only crypto_async_request, it cannot know the underlying
|
struct your_tfm_ctx {
|
||||||
request_type and so have access only on the TFM.
|
struct crypto_engine engine;
|
||||||
So using container_of for accessing __ctx is impossible.
|
...
|
||||||
Furthermore, the crypto engine cannot know the "struct your_tfm_ctx",
|
};
|
||||||
so it must assume that crypto_engine_ctx is at start of it.
|
|
||||||
|
The crypto engine only manages asynchronous requests in the form of
|
||||||
|
crypto_async_request. It cannot know the underlying request type and thus only
|
||||||
|
has access to the transform structure. It is not possible to access the context
|
||||||
|
using container_of. In addition, the engine knows nothing about your
|
||||||
|
structure "``struct your_tfm_ctx``". The engine assumes (requires) the placement
|
||||||
|
of the known member ``struct crypto_engine`` at the beginning.
|
||||||
|
|
||||||
Order of operations
|
Order of operations
|
||||||
-------------------
|
-------------------
|
||||||
You have to obtain a struct crypto_engine via crypto_engine_alloc_init().
|
You are required to obtain a struct crypto_engine via ``crypto_engine_alloc_init()``.
|
||||||
And start it via crypto_engine_start().
|
Start it via ``crypto_engine_start()``. When finished with your work, shut down the
|
||||||
|
engine using ``crypto_engine_stop()`` and destroy the engine with
|
||||||
|
``crypto_engine_exit()``.
|
||||||
|
|
||||||
Before transferring any request, you have to fill the enginectx.
|
Before transferring any request, you have to fill the context enginectx by
|
||||||
- prepare_request: (taking a function pointer) If you need to do some processing before doing the request
|
providing functions for the following:
|
||||||
- unprepare_request: (taking a function pointer) Undoing what's done in prepare_request
|
|
||||||
- do_one_request: (taking a function pointer) Do encryption for current request
|
|
||||||
|
|
||||||
Note: that those three functions get the crypto_async_request associated with the received request.
|
* ``prepare_crypt_hardware``: Called once before any prepare functions are
|
||||||
So your need to get the original request via container_of(areq, struct yourrequesttype_request, base);
|
called.
|
||||||
|
|
||||||
When your driver receive a crypto_request, you have to transfer it to
|
* ``unprepare_crypt_hardware``: Called once after all unprepare functions have
|
||||||
the cryptoengine via one of:
|
been called.
|
||||||
- crypto_transfer_ablkcipher_request_to_engine()
|
|
||||||
- crypto_transfer_aead_request_to_engine()
|
|
||||||
- crypto_transfer_akcipher_request_to_engine()
|
|
||||||
- crypto_transfer_hash_request_to_engine()
|
|
||||||
- crypto_transfer_skcipher_request_to_engine()
|
|
||||||
|
|
||||||
At the end of the request process, a call to one of the following function is needed:
|
* ``prepare_cipher_request``/``prepare_hash_request``: Called before each
|
||||||
- crypto_finalize_ablkcipher_request
|
corresponding request is performed. If some processing or other preparatory
|
||||||
- crypto_finalize_aead_request
|
work is required, do it here.
|
||||||
- crypto_finalize_akcipher_request
|
|
||||||
- crypto_finalize_hash_request
|
* ``unprepare_cipher_request``/``unprepare_hash_request``: Called after each
|
||||||
- crypto_finalize_skcipher_request
|
request is handled. Clean up / undo what was done in the prepare function.
|
||||||
|
|
||||||
|
* ``cipher_one_request``/``hash_one_request``: Handle the current request by
|
||||||
|
performing the operation.
|
||||||
|
|
||||||
|
Note that these functions access the crypto_async_request structure
|
||||||
|
associated with the received request. You are able to retrieve the original
|
||||||
|
request by using:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
container_of(areq, struct yourrequesttype_request, base);
|
||||||
|
|
||||||
|
When your driver receives a crypto_request, you must to transfer it to
|
||||||
|
the crypto engine via one of:
|
||||||
|
|
||||||
|
* crypto_transfer_ablkcipher_request_to_engine()
|
||||||
|
|
||||||
|
* crypto_transfer_aead_request_to_engine()
|
||||||
|
|
||||||
|
* crypto_transfer_akcipher_request_to_engine()
|
||||||
|
|
||||||
|
* crypto_transfer_hash_request_to_engine()
|
||||||
|
|
||||||
|
* crypto_transfer_skcipher_request_to_engine()
|
||||||
|
|
||||||
|
At the end of the request process, a call to one of the following functions is needed:
|
||||||
|
|
||||||
|
* crypto_finalize_ablkcipher_request()
|
||||||
|
|
||||||
|
* crypto_finalize_aead_request()
|
||||||
|
|
||||||
|
* crypto_finalize_akcipher_request()
|
||||||
|
|
||||||
|
* crypto_finalize_hash_request()
|
||||||
|
|
||||||
|
* crypto_finalize_skcipher_request()
|
||||||
|
@ -66,16 +66,3 @@ sha@f8034000 {
|
|||||||
dmas = <&dma1 2 17>;
|
dmas = <&dma1 2 17>;
|
||||||
dma-names = "tx";
|
dma-names = "tx";
|
||||||
};
|
};
|
||||||
|
|
||||||
* Eliptic Curve Cryptography (I2C)
|
|
||||||
|
|
||||||
Required properties:
|
|
||||||
- compatible : must be "atmel,atecc508a".
|
|
||||||
- reg: I2C bus address of the device.
|
|
||||||
- clock-frequency: must be present in the i2c controller node.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
atecc508a@c0 {
|
|
||||||
compatible = "atmel,atecc508a";
|
|
||||||
reg = <0xC0>;
|
|
||||||
};
|
|
||||||
|
@ -2,6 +2,7 @@ HWRNG support for the iproc-rng200 driver
|
|||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible : Must be one of:
|
- compatible : Must be one of:
|
||||||
|
"brcm,bcm7211-rng200"
|
||||||
"brcm,bcm7278-rng200"
|
"brcm,bcm7278-rng200"
|
||||||
"brcm,iproc-rng200"
|
"brcm,iproc-rng200"
|
||||||
- reg : base address and size of control register block
|
- reg : base address and size of control register block
|
||||||
|
@ -52,6 +52,10 @@ properties:
|
|||||||
- at,24c08
|
- at,24c08
|
||||||
# i2c trusted platform module (TPM)
|
# i2c trusted platform module (TPM)
|
||||||
- atmel,at97sc3204t
|
- atmel,at97sc3204t
|
||||||
|
# i2c h/w symmetric crypto module
|
||||||
|
- atmel,atsha204a
|
||||||
|
# i2c h/w elliptic curve crypto module
|
||||||
|
- atmel,atecc508a
|
||||||
# CM32181: Ambient Light Sensor
|
# CM32181: Ambient Light Sensor
|
||||||
- capella,cm32181
|
- capella,cm32181
|
||||||
# CM3232: Ambient Light Sensor
|
# CM3232: Ambient Light Sensor
|
||||||
|
@ -4257,6 +4257,7 @@ F: crypto/
|
|||||||
F: drivers/crypto/
|
F: drivers/crypto/
|
||||||
F: include/crypto/
|
F: include/crypto/
|
||||||
F: include/linux/crypto*
|
F: include/linux/crypto*
|
||||||
|
F: lib/crypto/
|
||||||
|
|
||||||
CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
|
CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
|
||||||
M: Neil Horman <nhorman@tuxdriver.com>
|
M: Neil Horman <nhorman@tuxdriver.com>
|
||||||
|
@ -100,6 +100,29 @@ ahbbridge0: bus@40000000 {
|
|||||||
reg = <0x40000000 0x800000>;
|
reg = <0x40000000 0x800000>;
|
||||||
ranges;
|
ranges;
|
||||||
|
|
||||||
|
crypto: crypto@40240000 {
|
||||||
|
compatible = "fsl,sec-v4.0";
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <1>;
|
||||||
|
reg = <0x40240000 0x10000>;
|
||||||
|
ranges = <0 0x40240000 0x10000>;
|
||||||
|
clocks = <&pcc2 IMX7ULP_CLK_CAAM>,
|
||||||
|
<&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>;
|
||||||
|
clock-names = "aclk", "ipg";
|
||||||
|
|
||||||
|
sec_jr0: jr0@1000 {
|
||||||
|
compatible = "fsl,sec-v4.0-job-ring";
|
||||||
|
reg = <0x1000 0x1000>;
|
||||||
|
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
};
|
||||||
|
|
||||||
|
sec_jr1: jr1@2000 {
|
||||||
|
compatible = "fsl,sec-v4.0-job-ring";
|
||||||
|
reg = <0x2000 0x1000>;
|
||||||
|
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
lpuart4: serial@402d0000 {
|
lpuart4: serial@402d0000 {
|
||||||
compatible = "fsl,imx7ulp-lpuart";
|
compatible = "fsl,imx7ulp-lpuart";
|
||||||
reg = <0x402d0000 0x1000>;
|
reg = <0x402d0000 0x1000>;
|
||||||
|
@ -63,7 +63,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int chacha_neon_stream_xor(struct skcipher_request *req,
|
static int chacha_neon_stream_xor(struct skcipher_request *req,
|
||||||
struct chacha_ctx *ctx, u8 *iv)
|
const struct chacha_ctx *ctx, const u8 *iv)
|
||||||
{
|
{
|
||||||
struct skcipher_walk walk;
|
struct skcipher_walk walk;
|
||||||
u32 state[16];
|
u32 state[16];
|
||||||
|
@ -34,7 +34,7 @@ int sha512_arm_update(struct shash_desc *desc, const u8 *data,
|
|||||||
(sha512_block_fn *)sha512_block_data_order);
|
(sha512_block_fn *)sha512_block_data_order);
|
||||||
}
|
}
|
||||||
|
|
||||||
int sha512_arm_final(struct shash_desc *desc, u8 *out)
|
static int sha512_arm_final(struct shash_desc *desc, u8 *out)
|
||||||
{
|
{
|
||||||
sha512_base_do_finalize(desc,
|
sha512_base_do_finalize(desc,
|
||||||
(sha512_block_fn *)sha512_block_data_order);
|
(sha512_block_fn *)sha512_block_data_order);
|
||||||
|
@ -15,6 +15,8 @@
|
|||||||
.arch armv8-a+crypto
|
.arch armv8-a+crypto
|
||||||
|
|
||||||
xtsmask .req v16
|
xtsmask .req v16
|
||||||
|
cbciv .req v16
|
||||||
|
vctr .req v16
|
||||||
|
|
||||||
.macro xts_reload_mask, tmp
|
.macro xts_reload_mask, tmp
|
||||||
.endm
|
.endm
|
||||||
@ -49,7 +51,7 @@
|
|||||||
load_round_keys \rounds, \temp
|
load_round_keys \rounds, \temp
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro do_enc_Nx, de, mc, k, i0, i1, i2, i3
|
.macro do_enc_Nx, de, mc, k, i0, i1, i2, i3, i4
|
||||||
aes\de \i0\().16b, \k\().16b
|
aes\de \i0\().16b, \k\().16b
|
||||||
aes\mc \i0\().16b, \i0\().16b
|
aes\mc \i0\().16b, \i0\().16b
|
||||||
.ifnb \i1
|
.ifnb \i1
|
||||||
@ -60,27 +62,34 @@
|
|||||||
aes\mc \i2\().16b, \i2\().16b
|
aes\mc \i2\().16b, \i2\().16b
|
||||||
aes\de \i3\().16b, \k\().16b
|
aes\de \i3\().16b, \k\().16b
|
||||||
aes\mc \i3\().16b, \i3\().16b
|
aes\mc \i3\().16b, \i3\().16b
|
||||||
|
.ifnb \i4
|
||||||
|
aes\de \i4\().16b, \k\().16b
|
||||||
|
aes\mc \i4\().16b, \i4\().16b
|
||||||
|
.endif
|
||||||
.endif
|
.endif
|
||||||
.endif
|
.endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* up to 4 interleaved encryption rounds with the same round key */
|
/* up to 5 interleaved encryption rounds with the same round key */
|
||||||
.macro round_Nx, enc, k, i0, i1, i2, i3
|
.macro round_Nx, enc, k, i0, i1, i2, i3, i4
|
||||||
.ifc \enc, e
|
.ifc \enc, e
|
||||||
do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3
|
do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3, \i4
|
||||||
.else
|
.else
|
||||||
do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3
|
do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3, \i4
|
||||||
.endif
|
.endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* up to 4 interleaved final rounds */
|
/* up to 5 interleaved final rounds */
|
||||||
.macro fin_round_Nx, de, k, k2, i0, i1, i2, i3
|
.macro fin_round_Nx, de, k, k2, i0, i1, i2, i3, i4
|
||||||
aes\de \i0\().16b, \k\().16b
|
aes\de \i0\().16b, \k\().16b
|
||||||
.ifnb \i1
|
.ifnb \i1
|
||||||
aes\de \i1\().16b, \k\().16b
|
aes\de \i1\().16b, \k\().16b
|
||||||
.ifnb \i3
|
.ifnb \i3
|
||||||
aes\de \i2\().16b, \k\().16b
|
aes\de \i2\().16b, \k\().16b
|
||||||
aes\de \i3\().16b, \k\().16b
|
aes\de \i3\().16b, \k\().16b
|
||||||
|
.ifnb \i4
|
||||||
|
aes\de \i4\().16b, \k\().16b
|
||||||
|
.endif
|
||||||
.endif
|
.endif
|
||||||
.endif
|
.endif
|
||||||
eor \i0\().16b, \i0\().16b, \k2\().16b
|
eor \i0\().16b, \i0\().16b, \k2\().16b
|
||||||
@ -89,47 +98,52 @@
|
|||||||
.ifnb \i3
|
.ifnb \i3
|
||||||
eor \i2\().16b, \i2\().16b, \k2\().16b
|
eor \i2\().16b, \i2\().16b, \k2\().16b
|
||||||
eor \i3\().16b, \i3\().16b, \k2\().16b
|
eor \i3\().16b, \i3\().16b, \k2\().16b
|
||||||
|
.ifnb \i4
|
||||||
|
eor \i4\().16b, \i4\().16b, \k2\().16b
|
||||||
|
.endif
|
||||||
.endif
|
.endif
|
||||||
.endif
|
.endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* up to 4 interleaved blocks */
|
/* up to 5 interleaved blocks */
|
||||||
.macro do_block_Nx, enc, rounds, i0, i1, i2, i3
|
.macro do_block_Nx, enc, rounds, i0, i1, i2, i3, i4
|
||||||
cmp \rounds, #12
|
cmp \rounds, #12
|
||||||
blo 2222f /* 128 bits */
|
blo 2222f /* 128 bits */
|
||||||
beq 1111f /* 192 bits */
|
beq 1111f /* 192 bits */
|
||||||
round_Nx \enc, v17, \i0, \i1, \i2, \i3
|
round_Nx \enc, v17, \i0, \i1, \i2, \i3, \i4
|
||||||
round_Nx \enc, v18, \i0, \i1, \i2, \i3
|
round_Nx \enc, v18, \i0, \i1, \i2, \i3, \i4
|
||||||
1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3
|
1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3, \i4
|
||||||
round_Nx \enc, v20, \i0, \i1, \i2, \i3
|
round_Nx \enc, v20, \i0, \i1, \i2, \i3, \i4
|
||||||
2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
|
2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
|
||||||
round_Nx \enc, \key, \i0, \i1, \i2, \i3
|
round_Nx \enc, \key, \i0, \i1, \i2, \i3, \i4
|
||||||
.endr
|
.endr
|
||||||
fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3
|
fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3, \i4
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro encrypt_block, in, rounds, t0, t1, t2
|
.macro encrypt_block, in, rounds, t0, t1, t2
|
||||||
do_block_Nx e, \rounds, \in
|
do_block_Nx e, \rounds, \in
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro encrypt_block2x, i0, i1, rounds, t0, t1, t2
|
|
||||||
do_block_Nx e, \rounds, \i0, \i1
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
|
.macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
|
||||||
do_block_Nx e, \rounds, \i0, \i1, \i2, \i3
|
do_block_Nx e, \rounds, \i0, \i1, \i2, \i3
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro encrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2
|
||||||
|
do_block_Nx e, \rounds, \i0, \i1, \i2, \i3, \i4
|
||||||
|
.endm
|
||||||
|
|
||||||
.macro decrypt_block, in, rounds, t0, t1, t2
|
.macro decrypt_block, in, rounds, t0, t1, t2
|
||||||
do_block_Nx d, \rounds, \in
|
do_block_Nx d, \rounds, \in
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro decrypt_block2x, i0, i1, rounds, t0, t1, t2
|
|
||||||
do_block_Nx d, \rounds, \i0, \i1
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
|
.macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
|
||||||
do_block_Nx d, \rounds, \i0, \i1, \i2, \i3
|
do_block_Nx d, \rounds, \i0, \i1, \i2, \i3
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro decrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2
|
||||||
|
do_block_Nx d, \rounds, \i0, \i1, \i2, \i3, \i4
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#define MAX_STRIDE 5
|
||||||
|
|
||||||
#include "aes-modes.S"
|
#include "aes-modes.S"
|
||||||
|
@ -10,6 +10,18 @@
|
|||||||
.text
|
.text
|
||||||
.align 4
|
.align 4
|
||||||
|
|
||||||
|
#ifndef MAX_STRIDE
|
||||||
|
#define MAX_STRIDE 4
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if MAX_STRIDE == 4
|
||||||
|
#define ST4(x...) x
|
||||||
|
#define ST5(x...)
|
||||||
|
#else
|
||||||
|
#define ST4(x...)
|
||||||
|
#define ST5(x...) x
|
||||||
|
#endif
|
||||||
|
|
||||||
aes_encrypt_block4x:
|
aes_encrypt_block4x:
|
||||||
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
||||||
ret
|
ret
|
||||||
@ -20,6 +32,18 @@ aes_decrypt_block4x:
|
|||||||
ret
|
ret
|
||||||
ENDPROC(aes_decrypt_block4x)
|
ENDPROC(aes_decrypt_block4x)
|
||||||
|
|
||||||
|
#if MAX_STRIDE == 5
|
||||||
|
aes_encrypt_block5x:
|
||||||
|
encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
||||||
|
ret
|
||||||
|
ENDPROC(aes_encrypt_block5x)
|
||||||
|
|
||||||
|
aes_decrypt_block5x:
|
||||||
|
decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
||||||
|
ret
|
||||||
|
ENDPROC(aes_decrypt_block5x)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
* aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
||||||
* int blocks)
|
* int blocks)
|
||||||
@ -34,14 +58,17 @@ AES_ENTRY(aes_ecb_encrypt)
|
|||||||
enc_prepare w3, x2, x5
|
enc_prepare w3, x2, x5
|
||||||
|
|
||||||
.LecbencloopNx:
|
.LecbencloopNx:
|
||||||
subs w4, w4, #4
|
subs w4, w4, #MAX_STRIDE
|
||||||
bmi .Lecbenc1x
|
bmi .Lecbenc1x
|
||||||
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
||||||
bl aes_encrypt_block4x
|
ST4( bl aes_encrypt_block4x )
|
||||||
|
ST5( ld1 {v4.16b}, [x1], #16 )
|
||||||
|
ST5( bl aes_encrypt_block5x )
|
||||||
st1 {v0.16b-v3.16b}, [x0], #64
|
st1 {v0.16b-v3.16b}, [x0], #64
|
||||||
|
ST5( st1 {v4.16b}, [x0], #16 )
|
||||||
b .LecbencloopNx
|
b .LecbencloopNx
|
||||||
.Lecbenc1x:
|
.Lecbenc1x:
|
||||||
adds w4, w4, #4
|
adds w4, w4, #MAX_STRIDE
|
||||||
beq .Lecbencout
|
beq .Lecbencout
|
||||||
.Lecbencloop:
|
.Lecbencloop:
|
||||||
ld1 {v0.16b}, [x1], #16 /* get next pt block */
|
ld1 {v0.16b}, [x1], #16 /* get next pt block */
|
||||||
@ -62,14 +89,17 @@ AES_ENTRY(aes_ecb_decrypt)
|
|||||||
dec_prepare w3, x2, x5
|
dec_prepare w3, x2, x5
|
||||||
|
|
||||||
.LecbdecloopNx:
|
.LecbdecloopNx:
|
||||||
subs w4, w4, #4
|
subs w4, w4, #MAX_STRIDE
|
||||||
bmi .Lecbdec1x
|
bmi .Lecbdec1x
|
||||||
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
||||||
bl aes_decrypt_block4x
|
ST4( bl aes_decrypt_block4x )
|
||||||
|
ST5( ld1 {v4.16b}, [x1], #16 )
|
||||||
|
ST5( bl aes_decrypt_block5x )
|
||||||
st1 {v0.16b-v3.16b}, [x0], #64
|
st1 {v0.16b-v3.16b}, [x0], #64
|
||||||
|
ST5( st1 {v4.16b}, [x0], #16 )
|
||||||
b .LecbdecloopNx
|
b .LecbdecloopNx
|
||||||
.Lecbdec1x:
|
.Lecbdec1x:
|
||||||
adds w4, w4, #4
|
adds w4, w4, #MAX_STRIDE
|
||||||
beq .Lecbdecout
|
beq .Lecbdecout
|
||||||
.Lecbdecloop:
|
.Lecbdecloop:
|
||||||
ld1 {v0.16b}, [x1], #16 /* get next ct block */
|
ld1 {v0.16b}, [x1], #16 /* get next ct block */
|
||||||
@ -129,39 +159,56 @@ AES_ENTRY(aes_cbc_decrypt)
|
|||||||
stp x29, x30, [sp, #-16]!
|
stp x29, x30, [sp, #-16]!
|
||||||
mov x29, sp
|
mov x29, sp
|
||||||
|
|
||||||
ld1 {v7.16b}, [x5] /* get iv */
|
ld1 {cbciv.16b}, [x5] /* get iv */
|
||||||
dec_prepare w3, x2, x6
|
dec_prepare w3, x2, x6
|
||||||
|
|
||||||
.LcbcdecloopNx:
|
.LcbcdecloopNx:
|
||||||
subs w4, w4, #4
|
subs w4, w4, #MAX_STRIDE
|
||||||
bmi .Lcbcdec1x
|
bmi .Lcbcdec1x
|
||||||
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
||||||
|
#if MAX_STRIDE == 5
|
||||||
|
ld1 {v4.16b}, [x1], #16 /* get 1 ct block */
|
||||||
|
mov v5.16b, v0.16b
|
||||||
|
mov v6.16b, v1.16b
|
||||||
|
mov v7.16b, v2.16b
|
||||||
|
bl aes_decrypt_block5x
|
||||||
|
sub x1, x1, #32
|
||||||
|
eor v0.16b, v0.16b, cbciv.16b
|
||||||
|
eor v1.16b, v1.16b, v5.16b
|
||||||
|
ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */
|
||||||
|
ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
|
||||||
|
eor v2.16b, v2.16b, v6.16b
|
||||||
|
eor v3.16b, v3.16b, v7.16b
|
||||||
|
eor v4.16b, v4.16b, v5.16b
|
||||||
|
#else
|
||||||
mov v4.16b, v0.16b
|
mov v4.16b, v0.16b
|
||||||
mov v5.16b, v1.16b
|
mov v5.16b, v1.16b
|
||||||
mov v6.16b, v2.16b
|
mov v6.16b, v2.16b
|
||||||
bl aes_decrypt_block4x
|
bl aes_decrypt_block4x
|
||||||
sub x1, x1, #16
|
sub x1, x1, #16
|
||||||
eor v0.16b, v0.16b, v7.16b
|
eor v0.16b, v0.16b, cbciv.16b
|
||||||
eor v1.16b, v1.16b, v4.16b
|
eor v1.16b, v1.16b, v4.16b
|
||||||
ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */
|
ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
|
||||||
eor v2.16b, v2.16b, v5.16b
|
eor v2.16b, v2.16b, v5.16b
|
||||||
eor v3.16b, v3.16b, v6.16b
|
eor v3.16b, v3.16b, v6.16b
|
||||||
|
#endif
|
||||||
st1 {v0.16b-v3.16b}, [x0], #64
|
st1 {v0.16b-v3.16b}, [x0], #64
|
||||||
|
ST5( st1 {v4.16b}, [x0], #16 )
|
||||||
b .LcbcdecloopNx
|
b .LcbcdecloopNx
|
||||||
.Lcbcdec1x:
|
.Lcbcdec1x:
|
||||||
adds w4, w4, #4
|
adds w4, w4, #MAX_STRIDE
|
||||||
beq .Lcbcdecout
|
beq .Lcbcdecout
|
||||||
.Lcbcdecloop:
|
.Lcbcdecloop:
|
||||||
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
||||||
mov v0.16b, v1.16b /* ...and copy to v0 */
|
mov v0.16b, v1.16b /* ...and copy to v0 */
|
||||||
decrypt_block v0, w3, x2, x6, w7
|
decrypt_block v0, w3, x2, x6, w7
|
||||||
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
|
eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */
|
||||||
mov v7.16b, v1.16b /* ct is next iv */
|
mov cbciv.16b, v1.16b /* ct is next iv */
|
||||||
st1 {v0.16b}, [x0], #16
|
st1 {v0.16b}, [x0], #16
|
||||||
subs w4, w4, #1
|
subs w4, w4, #1
|
||||||
bne .Lcbcdecloop
|
bne .Lcbcdecloop
|
||||||
.Lcbcdecout:
|
.Lcbcdecout:
|
||||||
st1 {v7.16b}, [x5] /* return iv */
|
st1 {cbciv.16b}, [x5] /* return iv */
|
||||||
ldp x29, x30, [sp], #16
|
ldp x29, x30, [sp], #16
|
||||||
ret
|
ret
|
||||||
AES_ENDPROC(aes_cbc_decrypt)
|
AES_ENDPROC(aes_cbc_decrypt)
|
||||||
@ -255,51 +302,60 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||||||
mov x29, sp
|
mov x29, sp
|
||||||
|
|
||||||
enc_prepare w3, x2, x6
|
enc_prepare w3, x2, x6
|
||||||
ld1 {v4.16b}, [x5]
|
ld1 {vctr.16b}, [x5]
|
||||||
|
|
||||||
umov x6, v4.d[1] /* keep swabbed ctr in reg */
|
umov x6, vctr.d[1] /* keep swabbed ctr in reg */
|
||||||
rev x6, x6
|
rev x6, x6
|
||||||
cmn w6, w4 /* 32 bit overflow? */
|
cmn w6, w4 /* 32 bit overflow? */
|
||||||
bcs .Lctrloop
|
bcs .Lctrloop
|
||||||
.LctrloopNx:
|
.LctrloopNx:
|
||||||
subs w4, w4, #4
|
subs w4, w4, #MAX_STRIDE
|
||||||
bmi .Lctr1x
|
bmi .Lctr1x
|
||||||
add w7, w6, #1
|
add w7, w6, #1
|
||||||
mov v0.16b, v4.16b
|
mov v0.16b, vctr.16b
|
||||||
add w8, w6, #2
|
add w8, w6, #2
|
||||||
mov v1.16b, v4.16b
|
mov v1.16b, vctr.16b
|
||||||
|
add w9, w6, #3
|
||||||
|
mov v2.16b, vctr.16b
|
||||||
add w9, w6, #3
|
add w9, w6, #3
|
||||||
mov v2.16b, v4.16b
|
|
||||||
rev w7, w7
|
rev w7, w7
|
||||||
mov v3.16b, v4.16b
|
mov v3.16b, vctr.16b
|
||||||
rev w8, w8
|
rev w8, w8
|
||||||
|
ST5( mov v4.16b, vctr.16b )
|
||||||
mov v1.s[3], w7
|
mov v1.s[3], w7
|
||||||
rev w9, w9
|
rev w9, w9
|
||||||
|
ST5( add w10, w6, #4 )
|
||||||
mov v2.s[3], w8
|
mov v2.s[3], w8
|
||||||
|
ST5( rev w10, w10 )
|
||||||
mov v3.s[3], w9
|
mov v3.s[3], w9
|
||||||
|
ST5( mov v4.s[3], w10 )
|
||||||
ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
|
ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
|
||||||
bl aes_encrypt_block4x
|
ST4( bl aes_encrypt_block4x )
|
||||||
|
ST5( bl aes_encrypt_block5x )
|
||||||
eor v0.16b, v5.16b, v0.16b
|
eor v0.16b, v5.16b, v0.16b
|
||||||
ld1 {v5.16b}, [x1], #16 /* get 1 input block */
|
ST4( ld1 {v5.16b}, [x1], #16 )
|
||||||
eor v1.16b, v6.16b, v1.16b
|
eor v1.16b, v6.16b, v1.16b
|
||||||
|
ST5( ld1 {v5.16b-v6.16b}, [x1], #32 )
|
||||||
eor v2.16b, v7.16b, v2.16b
|
eor v2.16b, v7.16b, v2.16b
|
||||||
eor v3.16b, v5.16b, v3.16b
|
eor v3.16b, v5.16b, v3.16b
|
||||||
|
ST5( eor v4.16b, v6.16b, v4.16b )
|
||||||
st1 {v0.16b-v3.16b}, [x0], #64
|
st1 {v0.16b-v3.16b}, [x0], #64
|
||||||
add x6, x6, #4
|
ST5( st1 {v4.16b}, [x0], #16 )
|
||||||
|
add x6, x6, #MAX_STRIDE
|
||||||
rev x7, x6
|
rev x7, x6
|
||||||
ins v4.d[1], x7
|
ins vctr.d[1], x7
|
||||||
cbz w4, .Lctrout
|
cbz w4, .Lctrout
|
||||||
b .LctrloopNx
|
b .LctrloopNx
|
||||||
.Lctr1x:
|
.Lctr1x:
|
||||||
adds w4, w4, #4
|
adds w4, w4, #MAX_STRIDE
|
||||||
beq .Lctrout
|
beq .Lctrout
|
||||||
.Lctrloop:
|
.Lctrloop:
|
||||||
mov v0.16b, v4.16b
|
mov v0.16b, vctr.16b
|
||||||
encrypt_block v0, w3, x2, x8, w7
|
encrypt_block v0, w3, x2, x8, w7
|
||||||
|
|
||||||
adds x6, x6, #1 /* increment BE ctr */
|
adds x6, x6, #1 /* increment BE ctr */
|
||||||
rev x7, x6
|
rev x7, x6
|
||||||
ins v4.d[1], x7
|
ins vctr.d[1], x7
|
||||||
bcs .Lctrcarry /* overflow? */
|
bcs .Lctrcarry /* overflow? */
|
||||||
|
|
||||||
.Lctrcarrydone:
|
.Lctrcarrydone:
|
||||||
@ -311,7 +367,7 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||||||
bne .Lctrloop
|
bne .Lctrloop
|
||||||
|
|
||||||
.Lctrout:
|
.Lctrout:
|
||||||
st1 {v4.16b}, [x5] /* return next CTR value */
|
st1 {vctr.16b}, [x5] /* return next CTR value */
|
||||||
ldp x29, x30, [sp], #16
|
ldp x29, x30, [sp], #16
|
||||||
ret
|
ret
|
||||||
|
|
||||||
@ -320,11 +376,11 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||||||
b .Lctrout
|
b .Lctrout
|
||||||
|
|
||||||
.Lctrcarry:
|
.Lctrcarry:
|
||||||
umov x7, v4.d[0] /* load upper word of ctr */
|
umov x7, vctr.d[0] /* load upper word of ctr */
|
||||||
rev x7, x7 /* ... to handle the carry */
|
rev x7, x7 /* ... to handle the carry */
|
||||||
add x7, x7, #1
|
add x7, x7, #1
|
||||||
rev x7, x7
|
rev x7, x7
|
||||||
ins v4.d[0], x7
|
ins vctr.d[0], x7
|
||||||
b .Lctrcarrydone
|
b .Lctrcarrydone
|
||||||
AES_ENDPROC(aes_ctr_encrypt)
|
AES_ENDPROC(aes_ctr_encrypt)
|
||||||
|
|
||||||
|
@ -12,6 +12,8 @@
|
|||||||
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
|
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
|
||||||
|
|
||||||
xtsmask .req v7
|
xtsmask .req v7
|
||||||
|
cbciv .req v7
|
||||||
|
vctr .req v4
|
||||||
|
|
||||||
.macro xts_reload_mask, tmp
|
.macro xts_reload_mask, tmp
|
||||||
xts_load_mask \tmp
|
xts_load_mask \tmp
|
||||||
@ -114,26 +116,9 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Interleaved versions: functionally equivalent to the
|
* Interleaved versions: functionally equivalent to the
|
||||||
* ones above, but applied to 2 or 4 AES states in parallel.
|
* ones above, but applied to AES states in parallel.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.macro sub_bytes_2x, in0, in1
|
|
||||||
sub v8.16b, \in0\().16b, v15.16b
|
|
||||||
tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
|
|
||||||
sub v9.16b, \in1\().16b, v15.16b
|
|
||||||
tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
|
|
||||||
sub v10.16b, v8.16b, v15.16b
|
|
||||||
tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
|
|
||||||
sub v11.16b, v9.16b, v15.16b
|
|
||||||
tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
|
|
||||||
sub v8.16b, v10.16b, v15.16b
|
|
||||||
tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b
|
|
||||||
sub v9.16b, v11.16b, v15.16b
|
|
||||||
tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b
|
|
||||||
tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
|
|
||||||
tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro sub_bytes_4x, in0, in1, in2, in3
|
.macro sub_bytes_4x, in0, in1, in2, in3
|
||||||
sub v8.16b, \in0\().16b, v15.16b
|
sub v8.16b, \in0\().16b, v15.16b
|
||||||
tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
|
tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
|
||||||
@ -212,25 +197,6 @@
|
|||||||
eor \in1\().16b, \in1\().16b, v11.16b
|
eor \in1\().16b, \in1\().16b, v11.16b
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro do_block_2x, enc, in0, in1, rounds, rk, rkp, i
|
|
||||||
ld1 {v15.4s}, [\rk]
|
|
||||||
add \rkp, \rk, #16
|
|
||||||
mov \i, \rounds
|
|
||||||
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
|
|
||||||
eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
|
|
||||||
movi v15.16b, #0x40
|
|
||||||
tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
|
|
||||||
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
|
|
||||||
sub_bytes_2x \in0, \in1
|
|
||||||
subs \i, \i, #1
|
|
||||||
ld1 {v15.4s}, [\rkp], #16
|
|
||||||
beq 2222f
|
|
||||||
mix_columns_2x \in0, \in1, \enc
|
|
||||||
b 1111b
|
|
||||||
2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
|
|
||||||
eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
|
.macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
|
||||||
ld1 {v15.4s}, [\rk]
|
ld1 {v15.4s}, [\rk]
|
||||||
add \rkp, \rk, #16
|
add \rkp, \rk, #16
|
||||||
@ -257,14 +223,6 @@
|
|||||||
eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
|
eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro encrypt_block2x, in0, in1, rounds, rk, rkp, i
|
|
||||||
do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro decrypt_block2x, in0, in1, rounds, rk, rkp, i
|
|
||||||
do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
|
.macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
|
||||||
do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
|
do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
|
||||||
.endm
|
.endm
|
||||||
|
@ -60,7 +60,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int chacha_neon_stream_xor(struct skcipher_request *req,
|
static int chacha_neon_stream_xor(struct skcipher_request *req,
|
||||||
struct chacha_ctx *ctx, u8 *iv)
|
const struct chacha_ctx *ctx, const u8 *iv)
|
||||||
{
|
{
|
||||||
struct skcipher_walk walk;
|
struct skcipher_walk walk;
|
||||||
u32 state[16];
|
u32 state[16];
|
||||||
|
@ -52,7 +52,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||||||
unsigned int len, u8 *out)
|
unsigned int len, u8 *out)
|
||||||
{
|
{
|
||||||
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
|
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
|
||||||
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
|
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
|
||||||
|
|
||||||
if (!crypto_simd_usable())
|
if (!crypto_simd_usable())
|
||||||
return crypto_sha1_finup(desc, data, len, out);
|
return crypto_sha1_finup(desc, data, len, out);
|
||||||
|
@ -57,7 +57,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
|
|||||||
unsigned int len, u8 *out)
|
unsigned int len, u8 *out)
|
||||||
{
|
{
|
||||||
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
|
||||||
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
|
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
|
||||||
|
|
||||||
if (!crypto_simd_usable()) {
|
if (!crypto_simd_usable()) {
|
||||||
if (len)
|
if (len)
|
||||||
|
@ -371,20 +371,6 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|
||||||
{
|
|
||||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
|
||||||
|
|
||||||
aesni_enc(ctx, dst, src);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|
||||||
{
|
|
||||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
|
||||||
|
|
||||||
aesni_dec(ctx, dst, src);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
@ -920,7 +906,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct crypto_alg aesni_algs[] = { {
|
static struct crypto_alg aesni_cipher_alg = {
|
||||||
.cra_name = "aes",
|
.cra_name = "aes",
|
||||||
.cra_driver_name = "aes-aesni",
|
.cra_driver_name = "aes-aesni",
|
||||||
.cra_priority = 300,
|
.cra_priority = 300,
|
||||||
@ -937,24 +923,7 @@ static struct crypto_alg aesni_algs[] = { {
|
|||||||
.cia_decrypt = aes_decrypt
|
.cia_decrypt = aes_decrypt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, {
|
};
|
||||||
.cra_name = "__aes",
|
|
||||||
.cra_driver_name = "__aes-aesni",
|
|
||||||
.cra_priority = 300,
|
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
|
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
|
||||||
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
|
|
||||||
.cra_module = THIS_MODULE,
|
|
||||||
.cra_u = {
|
|
||||||
.cipher = {
|
|
||||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
|
||||||
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
|
||||||
.cia_setkey = aes_set_key,
|
|
||||||
.cia_encrypt = __aes_encrypt,
|
|
||||||
.cia_decrypt = __aes_decrypt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} };
|
|
||||||
|
|
||||||
static struct skcipher_alg aesni_skciphers[] = {
|
static struct skcipher_alg aesni_skciphers[] = {
|
||||||
{
|
{
|
||||||
@ -1150,7 +1119,7 @@ static int __init aesni_init(void)
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
|
err = crypto_register_alg(&aesni_cipher_alg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -1158,7 +1127,7 @@ static int __init aesni_init(void)
|
|||||||
ARRAY_SIZE(aesni_skciphers),
|
ARRAY_SIZE(aesni_skciphers),
|
||||||
aesni_simd_skciphers);
|
aesni_simd_skciphers);
|
||||||
if (err)
|
if (err)
|
||||||
goto unregister_algs;
|
goto unregister_cipher;
|
||||||
|
|
||||||
err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
|
err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
|
||||||
aesni_simd_aeads);
|
aesni_simd_aeads);
|
||||||
@ -1170,8 +1139,8 @@ static int __init aesni_init(void)
|
|||||||
unregister_skciphers:
|
unregister_skciphers:
|
||||||
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
|
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
|
||||||
aesni_simd_skciphers);
|
aesni_simd_skciphers);
|
||||||
unregister_algs:
|
unregister_cipher:
|
||||||
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
|
crypto_unregister_alg(&aesni_cipher_alg);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1181,7 +1150,7 @@ static void __exit aesni_exit(void)
|
|||||||
aesni_simd_aeads);
|
aesni_simd_aeads);
|
||||||
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
|
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
|
||||||
aesni_simd_skciphers);
|
aesni_simd_skciphers);
|
||||||
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
|
crypto_unregister_alg(&aesni_cipher_alg);
|
||||||
}
|
}
|
||||||
|
|
||||||
late_initcall(aesni_init);
|
late_initcall(aesni_init);
|
||||||
|
@ -124,7 +124,7 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int chacha_simd_stream_xor(struct skcipher_walk *walk,
|
static int chacha_simd_stream_xor(struct skcipher_walk *walk,
|
||||||
struct chacha_ctx *ctx, u8 *iv)
|
const struct chacha_ctx *ctx, const u8 *iv)
|
||||||
{
|
{
|
||||||
u32 *state, state_buf[16 + 2] __aligned(8);
|
u32 *state, state_buf[16 + 2] __aligned(8);
|
||||||
int next_yield = 4096; /* bytes until next FPU yield */
|
int next_yield = 4096; /* bytes until next FPU yield */
|
||||||
|
@ -61,7 +61,6 @@ config CRYPTO_BLKCIPHER2
|
|||||||
tristate
|
tristate
|
||||||
select CRYPTO_ALGAPI2
|
select CRYPTO_ALGAPI2
|
||||||
select CRYPTO_RNG2
|
select CRYPTO_RNG2
|
||||||
select CRYPTO_WORKQUEUE
|
|
||||||
|
|
||||||
config CRYPTO_HASH
|
config CRYPTO_HASH
|
||||||
tristate
|
tristate
|
||||||
@ -137,10 +136,11 @@ config CRYPTO_USER
|
|||||||
Userspace configuration for cryptographic instantiations such as
|
Userspace configuration for cryptographic instantiations such as
|
||||||
cbc(aes).
|
cbc(aes).
|
||||||
|
|
||||||
|
if CRYPTO_MANAGER2
|
||||||
|
|
||||||
config CRYPTO_MANAGER_DISABLE_TESTS
|
config CRYPTO_MANAGER_DISABLE_TESTS
|
||||||
bool "Disable run-time self tests"
|
bool "Disable run-time self tests"
|
||||||
default y
|
default y
|
||||||
depends on CRYPTO_MANAGER2
|
|
||||||
help
|
help
|
||||||
Disable run-time self tests that normally take place at
|
Disable run-time self tests that normally take place at
|
||||||
algorithm registration.
|
algorithm registration.
|
||||||
@ -155,14 +155,10 @@ config CRYPTO_MANAGER_EXTRA_TESTS
|
|||||||
This is intended for developer use only, as these tests take much
|
This is intended for developer use only, as these tests take much
|
||||||
longer to run than the normal self tests.
|
longer to run than the normal self tests.
|
||||||
|
|
||||||
|
endif # if CRYPTO_MANAGER2
|
||||||
|
|
||||||
config CRYPTO_GF128MUL
|
config CRYPTO_GF128MUL
|
||||||
tristate "GF(2^128) multiplication functions"
|
tristate
|
||||||
help
|
|
||||||
Efficient table driven implementation of multiplications in the
|
|
||||||
field GF(2^128). This is needed by some cypher modes. This
|
|
||||||
option will be selected automatically if you select such a
|
|
||||||
cipher mode. Only select this option by hand if you expect to load
|
|
||||||
an external module that requires these functions.
|
|
||||||
|
|
||||||
config CRYPTO_NULL
|
config CRYPTO_NULL
|
||||||
tristate "Null algorithms"
|
tristate "Null algorithms"
|
||||||
@ -186,15 +182,11 @@ config CRYPTO_PCRYPT
|
|||||||
This converts an arbitrary crypto algorithm into a parallel
|
This converts an arbitrary crypto algorithm into a parallel
|
||||||
algorithm that executes in kernel threads.
|
algorithm that executes in kernel threads.
|
||||||
|
|
||||||
config CRYPTO_WORKQUEUE
|
|
||||||
tristate
|
|
||||||
|
|
||||||
config CRYPTO_CRYPTD
|
config CRYPTO_CRYPTD
|
||||||
tristate "Software async crypto daemon"
|
tristate "Software async crypto daemon"
|
||||||
select CRYPTO_BLKCIPHER
|
select CRYPTO_BLKCIPHER
|
||||||
select CRYPTO_HASH
|
select CRYPTO_HASH
|
||||||
select CRYPTO_MANAGER
|
select CRYPTO_MANAGER
|
||||||
select CRYPTO_WORKQUEUE
|
|
||||||
help
|
help
|
||||||
This is a generic software asynchronous crypto daemon that
|
This is a generic software asynchronous crypto daemon that
|
||||||
converts an arbitrary synchronous software crypto algorithm
|
converts an arbitrary synchronous software crypto algorithm
|
||||||
@ -279,6 +271,7 @@ config CRYPTO_CCM
|
|||||||
select CRYPTO_CTR
|
select CRYPTO_CTR
|
||||||
select CRYPTO_HASH
|
select CRYPTO_HASH
|
||||||
select CRYPTO_AEAD
|
select CRYPTO_AEAD
|
||||||
|
select CRYPTO_MANAGER
|
||||||
help
|
help
|
||||||
Support for Counter with CBC MAC. Required for IPsec.
|
Support for Counter with CBC MAC. Required for IPsec.
|
||||||
|
|
||||||
@ -288,6 +281,7 @@ config CRYPTO_GCM
|
|||||||
select CRYPTO_AEAD
|
select CRYPTO_AEAD
|
||||||
select CRYPTO_GHASH
|
select CRYPTO_GHASH
|
||||||
select CRYPTO_NULL
|
select CRYPTO_NULL
|
||||||
|
select CRYPTO_MANAGER
|
||||||
help
|
help
|
||||||
Support for Galois/Counter Mode (GCM) and Galois Message
|
Support for Galois/Counter Mode (GCM) and Galois Message
|
||||||
Authentication Code (GMAC). Required for IPSec.
|
Authentication Code (GMAC). Required for IPSec.
|
||||||
@ -297,6 +291,7 @@ config CRYPTO_CHACHA20POLY1305
|
|||||||
select CRYPTO_CHACHA20
|
select CRYPTO_CHACHA20
|
||||||
select CRYPTO_POLY1305
|
select CRYPTO_POLY1305
|
||||||
select CRYPTO_AEAD
|
select CRYPTO_AEAD
|
||||||
|
select CRYPTO_MANAGER
|
||||||
help
|
help
|
||||||
ChaCha20-Poly1305 AEAD support, RFC7539.
|
ChaCha20-Poly1305 AEAD support, RFC7539.
|
||||||
|
|
||||||
@ -411,6 +406,7 @@ config CRYPTO_SEQIV
|
|||||||
select CRYPTO_BLKCIPHER
|
select CRYPTO_BLKCIPHER
|
||||||
select CRYPTO_NULL
|
select CRYPTO_NULL
|
||||||
select CRYPTO_RNG_DEFAULT
|
select CRYPTO_RNG_DEFAULT
|
||||||
|
select CRYPTO_MANAGER
|
||||||
help
|
help
|
||||||
This IV generator generates an IV based on a sequence number by
|
This IV generator generates an IV based on a sequence number by
|
||||||
xoring it with a salt. This algorithm is mainly useful for CTR
|
xoring it with a salt. This algorithm is mainly useful for CTR
|
||||||
@ -420,7 +416,7 @@ config CRYPTO_ECHAINIV
|
|||||||
select CRYPTO_AEAD
|
select CRYPTO_AEAD
|
||||||
select CRYPTO_NULL
|
select CRYPTO_NULL
|
||||||
select CRYPTO_RNG_DEFAULT
|
select CRYPTO_RNG_DEFAULT
|
||||||
default m
|
select CRYPTO_MANAGER
|
||||||
help
|
help
|
||||||
This IV generator generates an IV based on the encryption of
|
This IV generator generates an IV based on the encryption of
|
||||||
a sequence number xored with a salt. This is the default
|
a sequence number xored with a salt. This is the default
|
||||||
@ -456,6 +452,7 @@ config CRYPTO_CTR
|
|||||||
config CRYPTO_CTS
|
config CRYPTO_CTS
|
||||||
tristate "CTS support"
|
tristate "CTS support"
|
||||||
select CRYPTO_BLKCIPHER
|
select CRYPTO_BLKCIPHER
|
||||||
|
select CRYPTO_MANAGER
|
||||||
help
|
help
|
||||||
CTS: Cipher Text Stealing
|
CTS: Cipher Text Stealing
|
||||||
This is the Cipher Text Stealing mode as described by
|
This is the Cipher Text Stealing mode as described by
|
||||||
@ -521,6 +518,7 @@ config CRYPTO_XTS
|
|||||||
config CRYPTO_KEYWRAP
|
config CRYPTO_KEYWRAP
|
||||||
tristate "Key wrapping support"
|
tristate "Key wrapping support"
|
||||||
select CRYPTO_BLKCIPHER
|
select CRYPTO_BLKCIPHER
|
||||||
|
select CRYPTO_MANAGER
|
||||||
help
|
help
|
||||||
Support for key wrapping (NIST SP800-38F / RFC3394) without
|
Support for key wrapping (NIST SP800-38F / RFC3394) without
|
||||||
padding.
|
padding.
|
||||||
@ -551,6 +549,7 @@ config CRYPTO_ADIANTUM
|
|||||||
select CRYPTO_CHACHA20
|
select CRYPTO_CHACHA20
|
||||||
select CRYPTO_POLY1305
|
select CRYPTO_POLY1305
|
||||||
select CRYPTO_NHPOLY1305
|
select CRYPTO_NHPOLY1305
|
||||||
|
select CRYPTO_MANAGER
|
||||||
help
|
help
|
||||||
Adiantum is a tweakable, length-preserving encryption mode
|
Adiantum is a tweakable, length-preserving encryption mode
|
||||||
designed for fast and secure disk encryption, especially on
|
designed for fast and secure disk encryption, especially on
|
||||||
@ -684,6 +683,14 @@ config CRYPTO_CRC32_MIPS
|
|||||||
instructions, when available.
|
instructions, when available.
|
||||||
|
|
||||||
|
|
||||||
|
config CRYPTO_XXHASH
|
||||||
|
tristate "xxHash hash algorithm"
|
||||||
|
select CRYPTO_HASH
|
||||||
|
select XXHASH
|
||||||
|
help
|
||||||
|
xxHash non-cryptographic hash algorithm. Extremely fast, working at
|
||||||
|
speeds close to RAM limits.
|
||||||
|
|
||||||
config CRYPTO_CRCT10DIF
|
config CRYPTO_CRCT10DIF
|
||||||
tristate "CRCT10DIF algorithm"
|
tristate "CRCT10DIF algorithm"
|
||||||
select CRYPTO_HASH
|
select CRYPTO_HASH
|
||||||
@ -1230,9 +1237,13 @@ config CRYPTO_ANUBIS
|
|||||||
<https://www.cosic.esat.kuleuven.be/nessie/reports/>
|
<https://www.cosic.esat.kuleuven.be/nessie/reports/>
|
||||||
<http://www.larc.usp.br/~pbarreto/AnubisPage.html>
|
<http://www.larc.usp.br/~pbarreto/AnubisPage.html>
|
||||||
|
|
||||||
|
config CRYPTO_LIB_ARC4
|
||||||
|
tristate
|
||||||
|
|
||||||
config CRYPTO_ARC4
|
config CRYPTO_ARC4
|
||||||
tristate "ARC4 cipher algorithm"
|
tristate "ARC4 cipher algorithm"
|
||||||
select CRYPTO_BLKCIPHER
|
select CRYPTO_BLKCIPHER
|
||||||
|
select CRYPTO_LIB_ARC4
|
||||||
help
|
help
|
||||||
ARC4 cipher algorithm.
|
ARC4 cipher algorithm.
|
||||||
|
|
||||||
|
@ -6,8 +6,6 @@
|
|||||||
obj-$(CONFIG_CRYPTO) += crypto.o
|
obj-$(CONFIG_CRYPTO) += crypto.o
|
||||||
crypto-y := api.o cipher.o compress.o memneq.o
|
crypto-y := api.o cipher.o compress.o memneq.o
|
||||||
|
|
||||||
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
|
|
||||||
|
|
||||||
obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
|
obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
|
||||||
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
|
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
|
||||||
|
|
||||||
@ -131,6 +129,7 @@ obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
|
|||||||
obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o
|
obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o
|
||||||
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
|
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
|
||||||
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
|
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
|
||||||
|
obj-$(CONFIG_CRYPTO_XXHASH) += xxhash_generic.o
|
||||||
obj-$(CONFIG_CRYPTO_842) += 842.o
|
obj-$(CONFIG_CRYPTO_842) += 842.o
|
||||||
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
|
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
|
||||||
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
|
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
|
||||||
|
@ -84,6 +84,42 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
|
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
|
||||||
|
|
||||||
|
int crypto_aead_encrypt(struct aead_request *req)
|
||||||
|
{
|
||||||
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||||
|
struct crypto_alg *alg = aead->base.__crt_alg;
|
||||||
|
unsigned int cryptlen = req->cryptlen;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
crypto_stats_get(alg);
|
||||||
|
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
|
||||||
|
ret = -ENOKEY;
|
||||||
|
else
|
||||||
|
ret = crypto_aead_alg(aead)->encrypt(req);
|
||||||
|
crypto_stats_aead_encrypt(cryptlen, alg, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
|
||||||
|
|
||||||
|
int crypto_aead_decrypt(struct aead_request *req)
|
||||||
|
{
|
||||||
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||||
|
struct crypto_alg *alg = aead->base.__crt_alg;
|
||||||
|
unsigned int cryptlen = req->cryptlen;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
crypto_stats_get(alg);
|
||||||
|
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
|
||||||
|
ret = -ENOKEY;
|
||||||
|
else if (req->cryptlen < crypto_aead_authsize(aead))
|
||||||
|
ret = -EINVAL;
|
||||||
|
else
|
||||||
|
ret = crypto_aead_alg(aead)->decrypt(req);
|
||||||
|
crypto_stats_aead_decrypt(cryptlen, alg, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
|
||||||
|
|
||||||
static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
|
static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct crypto_aead *aead = __crypto_aead_cast(tfm);
|
struct crypto_aead *aead = __crypto_aead_cast(tfm);
|
||||||
|
@ -21,23 +21,6 @@
|
|||||||
|
|
||||||
static LIST_HEAD(crypto_template_list);
|
static LIST_HEAD(crypto_template_list);
|
||||||
|
|
||||||
static inline int crypto_set_driver_name(struct crypto_alg *alg)
|
|
||||||
{
|
|
||||||
static const char suffix[] = "-generic";
|
|
||||||
char *driver_name = alg->cra_driver_name;
|
|
||||||
int len;
|
|
||||||
|
|
||||||
if (*driver_name)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
|
|
||||||
if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME)
|
|
||||||
return -ENAMETOOLONG;
|
|
||||||
|
|
||||||
memcpy(driver_name + len, suffix, sizeof(suffix));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void crypto_check_module_sig(struct module *mod)
|
static inline void crypto_check_module_sig(struct module *mod)
|
||||||
{
|
{
|
||||||
if (fips_enabled && mod && !module_sig_ok(mod))
|
if (fips_enabled && mod && !module_sig_ok(mod))
|
||||||
@ -49,6 +32,9 @@ static int crypto_check_alg(struct crypto_alg *alg)
|
|||||||
{
|
{
|
||||||
crypto_check_module_sig(alg->cra_module);
|
crypto_check_module_sig(alg->cra_module);
|
||||||
|
|
||||||
|
if (!alg->cra_name[0] || !alg->cra_driver_name[0])
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
|
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -74,7 +60,7 @@ static int crypto_check_alg(struct crypto_alg *alg)
|
|||||||
|
|
||||||
refcount_set(&alg->cra_refcnt, 1);
|
refcount_set(&alg->cra_refcnt, 1);
|
||||||
|
|
||||||
return crypto_set_driver_name(alg);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void crypto_free_instance(struct crypto_instance *inst)
|
static void crypto_free_instance(struct crypto_instance *inst)
|
||||||
@ -947,19 +933,6 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_dequeue_request);
|
EXPORT_SYMBOL_GPL(crypto_dequeue_request);
|
||||||
|
|
||||||
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct crypto_async_request *req;
|
|
||||||
|
|
||||||
list_for_each_entry(req, &queue->list, list) {
|
|
||||||
if (req->tfm == tfm)
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(crypto_tfm_in_queue);
|
|
||||||
|
|
||||||
static inline void crypto_inc_byte(u8 *a, unsigned int size)
|
static inline void crypto_inc_byte(u8 *a, unsigned int size)
|
||||||
{
|
{
|
||||||
u8 *b = (a + size);
|
u8 *b = (a + size);
|
||||||
|
@ -673,6 +673,7 @@ static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||||||
|
|
||||||
static struct crypto_alg anubis_alg = {
|
static struct crypto_alg anubis_alg = {
|
||||||
.cra_name = "anubis",
|
.cra_name = "anubis",
|
||||||
|
.cra_driver_name = "anubis-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||||
.cra_blocksize = ANUBIS_BLOCK_SIZE,
|
.cra_blocksize = ANUBIS_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof (struct anubis_ctx),
|
.cra_ctxsize = sizeof (struct anubis_ctx),
|
||||||
|
125
crypto/arc4.c
125
crypto/arc4.c
@ -13,84 +13,15 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
struct arc4_ctx {
|
static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
||||||
u32 S[256];
|
unsigned int key_len)
|
||||||
u32 x, y;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|
||||||
unsigned int key_len)
|
|
||||||
{
|
{
|
||||||
struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
int i, j = 0, k = 0;
|
|
||||||
|
|
||||||
ctx->x = 1;
|
return arc4_setkey(ctx, in_key, key_len);
|
||||||
ctx->y = 0;
|
|
||||||
|
|
||||||
for (i = 0; i < 256; i++)
|
|
||||||
ctx->S[i] = i;
|
|
||||||
|
|
||||||
for (i = 0; i < 256; i++) {
|
|
||||||
u32 a = ctx->S[i];
|
|
||||||
j = (j + in_key[k] + a) & 0xff;
|
|
||||||
ctx->S[i] = ctx->S[j];
|
|
||||||
ctx->S[j] = a;
|
|
||||||
if (++k >= key_len)
|
|
||||||
k = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
|
static int crypto_arc4_crypt(struct skcipher_request *req)
|
||||||
unsigned int key_len)
|
|
||||||
{
|
|
||||||
return arc4_set_key(&tfm->base, in_key, key_len);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in,
|
|
||||||
unsigned int len)
|
|
||||||
{
|
|
||||||
u32 *const S = ctx->S;
|
|
||||||
u32 x, y, a, b;
|
|
||||||
u32 ty, ta, tb;
|
|
||||||
|
|
||||||
if (len == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
x = ctx->x;
|
|
||||||
y = ctx->y;
|
|
||||||
|
|
||||||
a = S[x];
|
|
||||||
y = (y + a) & 0xff;
|
|
||||||
b = S[y];
|
|
||||||
|
|
||||||
do {
|
|
||||||
S[y] = a;
|
|
||||||
a = (a + b) & 0xff;
|
|
||||||
S[x] = b;
|
|
||||||
x = (x + 1) & 0xff;
|
|
||||||
ta = S[x];
|
|
||||||
ty = (y + ta) & 0xff;
|
|
||||||
tb = S[ty];
|
|
||||||
*out++ = *in++ ^ S[a];
|
|
||||||
if (--len == 0)
|
|
||||||
break;
|
|
||||||
y = ty;
|
|
||||||
a = ta;
|
|
||||||
b = tb;
|
|
||||||
} while (true);
|
|
||||||
|
|
||||||
ctx->x = x;
|
|
||||||
ctx->y = y;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arc4_crypt_one(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
||||||
{
|
|
||||||
arc4_crypt(crypto_tfm_ctx(tfm), out, in, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ecb_arc4_crypt(struct skcipher_request *req)
|
|
||||||
{
|
{
|
||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
|
struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
@ -108,54 +39,32 @@ static int ecb_arc4_crypt(struct skcipher_request *req)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_alg arc4_cipher = {
|
static struct skcipher_alg arc4_alg = {
|
||||||
.cra_name = "arc4",
|
/*
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
* For legacy reasons, this is named "ecb(arc4)", not "arc4".
|
||||||
.cra_blocksize = ARC4_BLOCK_SIZE,
|
* Nevertheless it's actually a stream cipher, not a block cipher.
|
||||||
.cra_ctxsize = sizeof(struct arc4_ctx),
|
*/
|
||||||
.cra_module = THIS_MODULE,
|
|
||||||
.cra_u = {
|
|
||||||
.cipher = {
|
|
||||||
.cia_min_keysize = ARC4_MIN_KEY_SIZE,
|
|
||||||
.cia_max_keysize = ARC4_MAX_KEY_SIZE,
|
|
||||||
.cia_setkey = arc4_set_key,
|
|
||||||
.cia_encrypt = arc4_crypt_one,
|
|
||||||
.cia_decrypt = arc4_crypt_one,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct skcipher_alg arc4_skcipher = {
|
|
||||||
.base.cra_name = "ecb(arc4)",
|
.base.cra_name = "ecb(arc4)",
|
||||||
|
.base.cra_driver_name = "ecb(arc4)-generic",
|
||||||
.base.cra_priority = 100,
|
.base.cra_priority = 100,
|
||||||
.base.cra_blocksize = ARC4_BLOCK_SIZE,
|
.base.cra_blocksize = ARC4_BLOCK_SIZE,
|
||||||
.base.cra_ctxsize = sizeof(struct arc4_ctx),
|
.base.cra_ctxsize = sizeof(struct arc4_ctx),
|
||||||
.base.cra_module = THIS_MODULE,
|
.base.cra_module = THIS_MODULE,
|
||||||
.min_keysize = ARC4_MIN_KEY_SIZE,
|
.min_keysize = ARC4_MIN_KEY_SIZE,
|
||||||
.max_keysize = ARC4_MAX_KEY_SIZE,
|
.max_keysize = ARC4_MAX_KEY_SIZE,
|
||||||
.setkey = arc4_set_key_skcipher,
|
.setkey = crypto_arc4_setkey,
|
||||||
.encrypt = ecb_arc4_crypt,
|
.encrypt = crypto_arc4_crypt,
|
||||||
.decrypt = ecb_arc4_crypt,
|
.decrypt = crypto_arc4_crypt,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init arc4_init(void)
|
static int __init arc4_init(void)
|
||||||
{
|
{
|
||||||
int err;
|
return crypto_register_skcipher(&arc4_alg);
|
||||||
|
|
||||||
err = crypto_register_alg(&arc4_cipher);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
err = crypto_register_skcipher(&arc4_skcipher);
|
|
||||||
if (err)
|
|
||||||
crypto_unregister_alg(&arc4_cipher);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit arc4_exit(void)
|
static void __exit arc4_exit(void)
|
||||||
{
|
{
|
||||||
crypto_unregister_alg(&arc4_cipher);
|
crypto_unregister_skcipher(&arc4_alg);
|
||||||
crypto_unregister_skcipher(&arc4_skcipher);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
subsys_initcall(arc4_init);
|
subsys_initcall(arc4_init);
|
||||||
@ -164,4 +73,4 @@ module_exit(arc4_exit);
|
|||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
|
MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
|
||||||
MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
|
MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
|
||||||
MODULE_ALIAS_CRYPTO("arc4");
|
MODULE_ALIAS_CRYPTO("ecb(arc4)");
|
||||||
|
@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
|
|||||||
select MPILIB
|
select MPILIB
|
||||||
select CRYPTO_HASH_INFO
|
select CRYPTO_HASH_INFO
|
||||||
select CRYPTO_AKCIPHER
|
select CRYPTO_AKCIPHER
|
||||||
|
select CRYPTO_HASH
|
||||||
help
|
help
|
||||||
This option provides support for asymmetric public key type handling.
|
This option provides support for asymmetric public key type handling.
|
||||||
If signature generation and/or verification are to be used,
|
If signature generation and/or verification are to be used,
|
||||||
@ -65,6 +66,7 @@ config TPM_KEY_PARSER
|
|||||||
config PKCS7_MESSAGE_PARSER
|
config PKCS7_MESSAGE_PARSER
|
||||||
tristate "PKCS#7 message parser"
|
tristate "PKCS#7 message parser"
|
||||||
depends on X509_CERTIFICATE_PARSER
|
depends on X509_CERTIFICATE_PARSER
|
||||||
|
select CRYPTO_HASH
|
||||||
select ASN1
|
select ASN1
|
||||||
select OID_REGISTRY
|
select OID_REGISTRY
|
||||||
help
|
help
|
||||||
@ -87,6 +89,7 @@ config SIGNED_PE_FILE_VERIFICATION
|
|||||||
bool "Support for PE file signature verification"
|
bool "Support for PE file signature verification"
|
||||||
depends on PKCS7_MESSAGE_PARSER=y
|
depends on PKCS7_MESSAGE_PARSER=y
|
||||||
depends on SYSTEM_DATA_VERIFICATION
|
depends on SYSTEM_DATA_VERIFICATION
|
||||||
|
select CRYPTO_HASH
|
||||||
select ASN1
|
select ASN1
|
||||||
select OID_REGISTRY
|
select OID_REGISTRY
|
||||||
help
|
help
|
||||||
|
@ -61,6 +61,8 @@ struct chachapoly_req_ctx {
|
|||||||
unsigned int cryptlen;
|
unsigned int cryptlen;
|
||||||
/* Actual AD, excluding IV */
|
/* Actual AD, excluding IV */
|
||||||
unsigned int assoclen;
|
unsigned int assoclen;
|
||||||
|
/* request flags, with MAY_SLEEP cleared if needed */
|
||||||
|
u32 flags;
|
||||||
union {
|
union {
|
||||||
struct poly_req poly;
|
struct poly_req poly;
|
||||||
struct chacha_req chacha;
|
struct chacha_req chacha;
|
||||||
@ -70,8 +72,12 @@ struct chachapoly_req_ctx {
|
|||||||
static inline void async_done_continue(struct aead_request *req, int err,
|
static inline void async_done_continue(struct aead_request *req, int err,
|
||||||
int (*cont)(struct aead_request *))
|
int (*cont)(struct aead_request *))
|
||||||
{
|
{
|
||||||
if (!err)
|
if (!err) {
|
||||||
|
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||||
|
|
||||||
|
rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||||
err = cont(req);
|
err = cont(req);
|
||||||
|
}
|
||||||
|
|
||||||
if (err != -EINPROGRESS && err != -EBUSY)
|
if (err != -EINPROGRESS && err != -EBUSY)
|
||||||
aead_request_complete(req, err);
|
aead_request_complete(req, err);
|
||||||
@ -129,16 +135,12 @@ static int chacha_decrypt(struct aead_request *req)
|
|||||||
|
|
||||||
chacha_iv(creq->iv, req, 1);
|
chacha_iv(creq->iv, req, 1);
|
||||||
|
|
||||||
sg_init_table(rctx->src, 2);
|
|
||||||
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
|
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
|
||||||
dst = src;
|
dst = src;
|
||||||
|
if (req->src != req->dst)
|
||||||
if (req->src != req->dst) {
|
|
||||||
sg_init_table(rctx->dst, 2);
|
|
||||||
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
||||||
}
|
|
||||||
|
|
||||||
skcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
skcipher_request_set_callback(&creq->req, rctx->flags,
|
||||||
chacha_decrypt_done, req);
|
chacha_decrypt_done, req);
|
||||||
skcipher_request_set_tfm(&creq->req, ctx->chacha);
|
skcipher_request_set_tfm(&creq->req, ctx->chacha);
|
||||||
skcipher_request_set_crypt(&creq->req, src, dst,
|
skcipher_request_set_crypt(&creq->req, src, dst,
|
||||||
@ -172,17 +174,13 @@ static int poly_tail(struct aead_request *req)
|
|||||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||||
struct poly_req *preq = &rctx->u.poly;
|
struct poly_req *preq = &rctx->u.poly;
|
||||||
__le64 len;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
sg_init_table(preq->src, 1);
|
preq->tail.assoclen = cpu_to_le64(rctx->assoclen);
|
||||||
len = cpu_to_le64(rctx->assoclen);
|
preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen);
|
||||||
memcpy(&preq->tail.assoclen, &len, sizeof(len));
|
sg_init_one(preq->src, &preq->tail, sizeof(preq->tail));
|
||||||
len = cpu_to_le64(rctx->cryptlen);
|
|
||||||
memcpy(&preq->tail.cryptlen, &len, sizeof(len));
|
|
||||||
sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
|
|
||||||
|
|
||||||
ahash_request_set_callback(&preq->req, aead_request_flags(req),
|
ahash_request_set_callback(&preq->req, rctx->flags,
|
||||||
poly_tail_done, req);
|
poly_tail_done, req);
|
||||||
ahash_request_set_tfm(&preq->req, ctx->poly);
|
ahash_request_set_tfm(&preq->req, ctx->poly);
|
||||||
ahash_request_set_crypt(&preq->req, preq->src,
|
ahash_request_set_crypt(&preq->req, preq->src,
|
||||||
@ -205,15 +203,14 @@ static int poly_cipherpad(struct aead_request *req)
|
|||||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||||
struct poly_req *preq = &rctx->u.poly;
|
struct poly_req *preq = &rctx->u.poly;
|
||||||
unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
|
unsigned int padlen;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
padlen = (bs - (rctx->cryptlen % bs)) % bs;
|
padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
|
||||||
memset(preq->pad, 0, sizeof(preq->pad));
|
memset(preq->pad, 0, sizeof(preq->pad));
|
||||||
sg_init_table(preq->src, 1);
|
sg_init_one(preq->src, preq->pad, padlen);
|
||||||
sg_set_buf(preq->src, &preq->pad, padlen);
|
|
||||||
|
|
||||||
ahash_request_set_callback(&preq->req, aead_request_flags(req),
|
ahash_request_set_callback(&preq->req, rctx->flags,
|
||||||
poly_cipherpad_done, req);
|
poly_cipherpad_done, req);
|
||||||
ahash_request_set_tfm(&preq->req, ctx->poly);
|
ahash_request_set_tfm(&preq->req, ctx->poly);
|
||||||
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
|
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
|
||||||
@ -241,10 +238,9 @@ static int poly_cipher(struct aead_request *req)
|
|||||||
if (rctx->cryptlen == req->cryptlen) /* encrypting */
|
if (rctx->cryptlen == req->cryptlen) /* encrypting */
|
||||||
crypt = req->dst;
|
crypt = req->dst;
|
||||||
|
|
||||||
sg_init_table(rctx->src, 2);
|
|
||||||
crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
|
crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
|
||||||
|
|
||||||
ahash_request_set_callback(&preq->req, aead_request_flags(req),
|
ahash_request_set_callback(&preq->req, rctx->flags,
|
||||||
poly_cipher_done, req);
|
poly_cipher_done, req);
|
||||||
ahash_request_set_tfm(&preq->req, ctx->poly);
|
ahash_request_set_tfm(&preq->req, ctx->poly);
|
||||||
ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
|
ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
|
||||||
@ -266,15 +262,14 @@ static int poly_adpad(struct aead_request *req)
|
|||||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
||||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||||
struct poly_req *preq = &rctx->u.poly;
|
struct poly_req *preq = &rctx->u.poly;
|
||||||
unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
|
unsigned int padlen;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
padlen = (bs - (rctx->assoclen % bs)) % bs;
|
padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE;
|
||||||
memset(preq->pad, 0, sizeof(preq->pad));
|
memset(preq->pad, 0, sizeof(preq->pad));
|
||||||
sg_init_table(preq->src, 1);
|
sg_init_one(preq->src, preq->pad, padlen);
|
||||||
sg_set_buf(preq->src, preq->pad, padlen);
|
|
||||||
|
|
||||||
ahash_request_set_callback(&preq->req, aead_request_flags(req),
|
ahash_request_set_callback(&preq->req, rctx->flags,
|
||||||
poly_adpad_done, req);
|
poly_adpad_done, req);
|
||||||
ahash_request_set_tfm(&preq->req, ctx->poly);
|
ahash_request_set_tfm(&preq->req, ctx->poly);
|
||||||
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
|
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
|
||||||
@ -298,7 +293,7 @@ static int poly_ad(struct aead_request *req)
|
|||||||
struct poly_req *preq = &rctx->u.poly;
|
struct poly_req *preq = &rctx->u.poly;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
ahash_request_set_callback(&preq->req, aead_request_flags(req),
|
ahash_request_set_callback(&preq->req, rctx->flags,
|
||||||
poly_ad_done, req);
|
poly_ad_done, req);
|
||||||
ahash_request_set_tfm(&preq->req, ctx->poly);
|
ahash_request_set_tfm(&preq->req, ctx->poly);
|
||||||
ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
|
ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
|
||||||
@ -322,10 +317,9 @@ static int poly_setkey(struct aead_request *req)
|
|||||||
struct poly_req *preq = &rctx->u.poly;
|
struct poly_req *preq = &rctx->u.poly;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
sg_init_table(preq->src, 1);
|
sg_init_one(preq->src, rctx->key, sizeof(rctx->key));
|
||||||
sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
|
|
||||||
|
|
||||||
ahash_request_set_callback(&preq->req, aead_request_flags(req),
|
ahash_request_set_callback(&preq->req, rctx->flags,
|
||||||
poly_setkey_done, req);
|
poly_setkey_done, req);
|
||||||
ahash_request_set_tfm(&preq->req, ctx->poly);
|
ahash_request_set_tfm(&preq->req, ctx->poly);
|
||||||
ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
|
ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
|
||||||
@ -349,7 +343,7 @@ static int poly_init(struct aead_request *req)
|
|||||||
struct poly_req *preq = &rctx->u.poly;
|
struct poly_req *preq = &rctx->u.poly;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
ahash_request_set_callback(&preq->req, aead_request_flags(req),
|
ahash_request_set_callback(&preq->req, rctx->flags,
|
||||||
poly_init_done, req);
|
poly_init_done, req);
|
||||||
ahash_request_set_tfm(&preq->req, ctx->poly);
|
ahash_request_set_tfm(&preq->req, ctx->poly);
|
||||||
|
|
||||||
@ -381,13 +375,12 @@ static int poly_genkey(struct aead_request *req)
|
|||||||
rctx->assoclen -= 8;
|
rctx->assoclen -= 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_init_table(creq->src, 1);
|
|
||||||
memset(rctx->key, 0, sizeof(rctx->key));
|
memset(rctx->key, 0, sizeof(rctx->key));
|
||||||
sg_set_buf(creq->src, rctx->key, sizeof(rctx->key));
|
sg_init_one(creq->src, rctx->key, sizeof(rctx->key));
|
||||||
|
|
||||||
chacha_iv(creq->iv, req, 0);
|
chacha_iv(creq->iv, req, 0);
|
||||||
|
|
||||||
skcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
skcipher_request_set_callback(&creq->req, rctx->flags,
|
||||||
poly_genkey_done, req);
|
poly_genkey_done, req);
|
||||||
skcipher_request_set_tfm(&creq->req, ctx->chacha);
|
skcipher_request_set_tfm(&creq->req, ctx->chacha);
|
||||||
skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
|
skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
|
||||||
@ -418,16 +411,12 @@ static int chacha_encrypt(struct aead_request *req)
|
|||||||
|
|
||||||
chacha_iv(creq->iv, req, 1);
|
chacha_iv(creq->iv, req, 1);
|
||||||
|
|
||||||
sg_init_table(rctx->src, 2);
|
|
||||||
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
|
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
|
||||||
dst = src;
|
dst = src;
|
||||||
|
if (req->src != req->dst)
|
||||||
if (req->src != req->dst) {
|
|
||||||
sg_init_table(rctx->dst, 2);
|
|
||||||
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
||||||
}
|
|
||||||
|
|
||||||
skcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
skcipher_request_set_callback(&creq->req, rctx->flags,
|
||||||
chacha_encrypt_done, req);
|
chacha_encrypt_done, req);
|
||||||
skcipher_request_set_tfm(&creq->req, ctx->chacha);
|
skcipher_request_set_tfm(&creq->req, ctx->chacha);
|
||||||
skcipher_request_set_crypt(&creq->req, src, dst,
|
skcipher_request_set_crypt(&creq->req, src, dst,
|
||||||
@ -445,6 +434,7 @@ static int chachapoly_encrypt(struct aead_request *req)
|
|||||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||||
|
|
||||||
rctx->cryptlen = req->cryptlen;
|
rctx->cryptlen = req->cryptlen;
|
||||||
|
rctx->flags = aead_request_flags(req);
|
||||||
|
|
||||||
/* encrypt call chain:
|
/* encrypt call chain:
|
||||||
* - chacha_encrypt/done()
|
* - chacha_encrypt/done()
|
||||||
@ -466,6 +456,7 @@ static int chachapoly_decrypt(struct aead_request *req)
|
|||||||
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
|
||||||
|
|
||||||
rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
|
rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
|
||||||
|
rctx->flags = aead_request_flags(req);
|
||||||
|
|
||||||
/* decrypt call chain:
|
/* decrypt call chain:
|
||||||
* - poly_genkey/done()
|
* - poly_genkey/done()
|
||||||
|
@ -32,7 +32,7 @@ static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int chacha_stream_xor(struct skcipher_request *req,
|
static int chacha_stream_xor(struct skcipher_request *req,
|
||||||
struct chacha_ctx *ctx, u8 *iv)
|
const struct chacha_ctx *ctx, const u8 *iv)
|
||||||
{
|
{
|
||||||
struct skcipher_walk walk;
|
struct skcipher_walk walk;
|
||||||
u32 state[16];
|
u32 state[16];
|
||||||
@ -56,7 +56,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv)
|
void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv)
|
||||||
{
|
{
|
||||||
state[0] = 0x61707865; /* "expa" */
|
state[0] = 0x61707865; /* "expa" */
|
||||||
state[1] = 0x3320646e; /* "nd 3" */
|
state[1] = 0x3320646e; /* "nd 3" */
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include <crypto/internal/aead.h>
|
#include <crypto/internal/aead.h>
|
||||||
#include <crypto/internal/skcipher.h>
|
#include <crypto/internal/skcipher.h>
|
||||||
#include <crypto/cryptd.h>
|
#include <crypto/cryptd.h>
|
||||||
#include <crypto/crypto_wq.h>
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
@ -26,11 +25,14 @@
|
|||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/workqueue.h>
|
||||||
|
|
||||||
static unsigned int cryptd_max_cpu_qlen = 1000;
|
static unsigned int cryptd_max_cpu_qlen = 1000;
|
||||||
module_param(cryptd_max_cpu_qlen, uint, 0);
|
module_param(cryptd_max_cpu_qlen, uint, 0);
|
||||||
MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
|
MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
|
||||||
|
|
||||||
|
static struct workqueue_struct *cryptd_wq;
|
||||||
|
|
||||||
struct cryptd_cpu_queue {
|
struct cryptd_cpu_queue {
|
||||||
struct crypto_queue queue;
|
struct crypto_queue queue;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
@ -136,7 +138,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
|
|||||||
if (err == -ENOSPC)
|
if (err == -ENOSPC)
|
||||||
goto out_put_cpu;
|
goto out_put_cpu;
|
||||||
|
|
||||||
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
|
||||||
|
|
||||||
if (!atomic_read(refcnt))
|
if (!atomic_read(refcnt))
|
||||||
goto out_put_cpu;
|
goto out_put_cpu;
|
||||||
@ -179,7 +181,7 @@ static void cryptd_queue_worker(struct work_struct *work)
|
|||||||
req->complete(req, 0);
|
req->complete(req, 0);
|
||||||
|
|
||||||
if (cpu_queue->queue.qlen)
|
if (cpu_queue->queue.qlen)
|
||||||
queue_work(kcrypto_wq, &cpu_queue->work);
|
queue_work(cryptd_wq, &cpu_queue->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
|
static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
|
||||||
@ -919,7 +921,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||||||
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
|
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
|
||||||
case CRYPTO_ALG_TYPE_BLKCIPHER:
|
case CRYPTO_ALG_TYPE_BLKCIPHER:
|
||||||
return cryptd_create_skcipher(tmpl, tb, &queue);
|
return cryptd_create_skcipher(tmpl, tb, &queue);
|
||||||
case CRYPTO_ALG_TYPE_DIGEST:
|
case CRYPTO_ALG_TYPE_HASH:
|
||||||
return cryptd_create_hash(tmpl, tb, &queue);
|
return cryptd_create_hash(tmpl, tb, &queue);
|
||||||
case CRYPTO_ALG_TYPE_AEAD:
|
case CRYPTO_ALG_TYPE_AEAD:
|
||||||
return cryptd_create_aead(tmpl, tb, &queue);
|
return cryptd_create_aead(tmpl, tb, &queue);
|
||||||
@ -1119,19 +1121,31 @@ static int __init cryptd_init(void)
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
|
||||||
|
1);
|
||||||
|
if (!cryptd_wq)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
|
err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto err_destroy_wq;
|
||||||
|
|
||||||
err = crypto_register_template(&cryptd_tmpl);
|
err = crypto_register_template(&cryptd_tmpl);
|
||||||
if (err)
|
if (err)
|
||||||
cryptd_fini_queue(&queue);
|
goto err_fini_queue;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_fini_queue:
|
||||||
|
cryptd_fini_queue(&queue);
|
||||||
|
err_destroy_wq:
|
||||||
|
destroy_workqueue(cryptd_wq);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit cryptd_exit(void)
|
static void __exit cryptd_exit(void)
|
||||||
{
|
{
|
||||||
|
destroy_workqueue(cryptd_wq);
|
||||||
cryptd_fini_queue(&queue);
|
cryptd_fini_queue(&queue);
|
||||||
crypto_unregister_template(&cryptd_tmpl);
|
crypto_unregister_template(&cryptd_tmpl);
|
||||||
}
|
}
|
||||||
|
@ -100,6 +100,7 @@ static struct shash_alg digest_null = {
|
|||||||
.final = null_final,
|
.final = null_final,
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "digest_null",
|
.cra_name = "digest_null",
|
||||||
|
.cra_driver_name = "digest_null-generic",
|
||||||
.cra_blocksize = NULL_BLOCK_SIZE,
|
.cra_blocksize = NULL_BLOCK_SIZE,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
@ -122,6 +123,7 @@ static struct skcipher_alg skcipher_null = {
|
|||||||
|
|
||||||
static struct crypto_alg null_algs[] = { {
|
static struct crypto_alg null_algs[] = { {
|
||||||
.cra_name = "cipher_null",
|
.cra_name = "cipher_null",
|
||||||
|
.cra_driver_name = "cipher_null-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||||
.cra_blocksize = NULL_BLOCK_SIZE,
|
.cra_blocksize = NULL_BLOCK_SIZE,
|
||||||
.cra_ctxsize = 0,
|
.cra_ctxsize = 0,
|
||||||
@ -134,6 +136,7 @@ static struct crypto_alg null_algs[] = { {
|
|||||||
.cia_decrypt = null_crypt } }
|
.cia_decrypt = null_crypt } }
|
||||||
}, {
|
}, {
|
||||||
.cra_name = "compress_null",
|
.cra_name = "compress_null",
|
||||||
|
.cra_driver_name = "compress_null-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||||
.cra_blocksize = NULL_BLOCK_SIZE,
|
.cra_blocksize = NULL_BLOCK_SIZE,
|
||||||
.cra_ctxsize = 0,
|
.cra_ctxsize = 0,
|
||||||
|
@ -1,35 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
||||||
/*
|
|
||||||
* Workqueue for crypto subsystem
|
|
||||||
*
|
|
||||||
* Copyright (c) 2009 Intel Corp.
|
|
||||||
* Author: Huang Ying <ying.huang@intel.com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/workqueue.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <crypto/algapi.h>
|
|
||||||
#include <crypto/crypto_wq.h>
|
|
||||||
|
|
||||||
struct workqueue_struct *kcrypto_wq;
|
|
||||||
EXPORT_SYMBOL_GPL(kcrypto_wq);
|
|
||||||
|
|
||||||
static int __init crypto_wq_init(void)
|
|
||||||
{
|
|
||||||
kcrypto_wq = alloc_workqueue("crypto",
|
|
||||||
WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
|
|
||||||
if (unlikely(!kcrypto_wq))
|
|
||||||
return -ENOMEM;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __exit crypto_wq_exit(void)
|
|
||||||
{
|
|
||||||
destroy_workqueue(kcrypto_wq);
|
|
||||||
}
|
|
||||||
|
|
||||||
subsys_initcall(crypto_wq_init);
|
|
||||||
module_exit(crypto_wq_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_DESCRIPTION("Workqueue for crypto subsystem");
|
|
@ -275,6 +275,7 @@ static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
|
|||||||
|
|
||||||
static struct crypto_alg alg = {
|
static struct crypto_alg alg = {
|
||||||
.cra_name = "deflate",
|
.cra_name = "deflate",
|
||||||
|
.cra_driver_name = "deflate-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||||
.cra_ctxsize = sizeof(struct deflate_ctx),
|
.cra_ctxsize = sizeof(struct deflate_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
|
@ -219,6 +219,57 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIPS 140-2 continuous self test for the noise source
|
||||||
|
* The test is performed on the noise source input data. Thus, the function
|
||||||
|
* implicitly knows the size of the buffer to be equal to the security
|
||||||
|
* strength.
|
||||||
|
*
|
||||||
|
* Note, this function disregards the nonce trailing the entropy data during
|
||||||
|
* initial seeding.
|
||||||
|
*
|
||||||
|
* drbg->drbg_mutex must have been taken.
|
||||||
|
*
|
||||||
|
* @drbg DRBG handle
|
||||||
|
* @entropy buffer of seed data to be checked
|
||||||
|
*
|
||||||
|
* return:
|
||||||
|
* 0 on success
|
||||||
|
* -EAGAIN on when the CTRNG is not yet primed
|
||||||
|
* < 0 on error
|
||||||
|
*/
|
||||||
|
static int drbg_fips_continuous_test(struct drbg_state *drbg,
|
||||||
|
const unsigned char *entropy)
|
||||||
|
{
|
||||||
|
unsigned short entropylen = drbg_sec_strength(drbg->core->flags);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!IS_ENABLED(CONFIG_CRYPTO_FIPS))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* skip test if we test the overall system */
|
||||||
|
if (list_empty(&drbg->test_data.list))
|
||||||
|
return 0;
|
||||||
|
/* only perform test in FIPS mode */
|
||||||
|
if (!fips_enabled)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!drbg->fips_primed) {
|
||||||
|
/* Priming of FIPS test */
|
||||||
|
memcpy(drbg->prev, entropy, entropylen);
|
||||||
|
drbg->fips_primed = true;
|
||||||
|
/* priming: another round is needed */
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
ret = memcmp(drbg->prev, entropy, entropylen);
|
||||||
|
if (!ret)
|
||||||
|
panic("DRBG continuous self test failed\n");
|
||||||
|
memcpy(drbg->prev, entropy, entropylen);
|
||||||
|
|
||||||
|
/* the test shall pass when the two values are not equal */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert an integer into a byte representation of this integer.
|
* Convert an integer into a byte representation of this integer.
|
||||||
* The byte representation is big-endian
|
* The byte representation is big-endian
|
||||||
@ -998,6 +1049,22 @@ static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int drbg_get_random_bytes(struct drbg_state *drbg,
|
||||||
|
unsigned char *entropy,
|
||||||
|
unsigned int entropylen)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
do {
|
||||||
|
get_random_bytes(entropy, entropylen);
|
||||||
|
ret = drbg_fips_continuous_test(drbg, entropy);
|
||||||
|
if (ret && ret != -EAGAIN)
|
||||||
|
return ret;
|
||||||
|
} while (ret);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void drbg_async_seed(struct work_struct *work)
|
static void drbg_async_seed(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct drbg_string data;
|
struct drbg_string data;
|
||||||
@ -1006,16 +1073,20 @@ static void drbg_async_seed(struct work_struct *work)
|
|||||||
seed_work);
|
seed_work);
|
||||||
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
|
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
|
||||||
unsigned char entropy[32];
|
unsigned char entropy[32];
|
||||||
|
int ret;
|
||||||
|
|
||||||
BUG_ON(!entropylen);
|
BUG_ON(!entropylen);
|
||||||
BUG_ON(entropylen > sizeof(entropy));
|
BUG_ON(entropylen > sizeof(entropy));
|
||||||
get_random_bytes(entropy, entropylen);
|
|
||||||
|
|
||||||
drbg_string_fill(&data, entropy, entropylen);
|
drbg_string_fill(&data, entropy, entropylen);
|
||||||
list_add_tail(&data.list, &seedlist);
|
list_add_tail(&data.list, &seedlist);
|
||||||
|
|
||||||
mutex_lock(&drbg->drbg_mutex);
|
mutex_lock(&drbg->drbg_mutex);
|
||||||
|
|
||||||
|
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
|
||||||
|
if (ret)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
/* If nonblocking pool is initialized, deactivate Jitter RNG */
|
/* If nonblocking pool is initialized, deactivate Jitter RNG */
|
||||||
crypto_free_rng(drbg->jent);
|
crypto_free_rng(drbg->jent);
|
||||||
drbg->jent = NULL;
|
drbg->jent = NULL;
|
||||||
@ -1030,6 +1101,7 @@ static void drbg_async_seed(struct work_struct *work)
|
|||||||
if (drbg->seeded)
|
if (drbg->seeded)
|
||||||
drbg->reseed_threshold = drbg_max_requests(drbg);
|
drbg->reseed_threshold = drbg_max_requests(drbg);
|
||||||
|
|
||||||
|
unlock:
|
||||||
mutex_unlock(&drbg->drbg_mutex);
|
mutex_unlock(&drbg->drbg_mutex);
|
||||||
|
|
||||||
memzero_explicit(entropy, entropylen);
|
memzero_explicit(entropy, entropylen);
|
||||||
@ -1081,7 +1153,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
|||||||
BUG_ON((entropylen * 2) > sizeof(entropy));
|
BUG_ON((entropylen * 2) > sizeof(entropy));
|
||||||
|
|
||||||
/* Get seed from in-kernel /dev/urandom */
|
/* Get seed from in-kernel /dev/urandom */
|
||||||
get_random_bytes(entropy, entropylen);
|
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (!drbg->jent) {
|
if (!drbg->jent) {
|
||||||
drbg_string_fill(&data1, entropy, entropylen);
|
drbg_string_fill(&data1, entropy, entropylen);
|
||||||
@ -1094,7 +1168,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
|||||||
entropylen);
|
entropylen);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_devel("DRBG: jent failed with %d\n", ret);
|
pr_devel("DRBG: jent failed with %d\n", ret);
|
||||||
return ret;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
drbg_string_fill(&data1, entropy, entropylen * 2);
|
drbg_string_fill(&data1, entropy, entropylen * 2);
|
||||||
@ -1121,6 +1195,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
|
|||||||
|
|
||||||
ret = __drbg_seed(drbg, &seedlist, reseed);
|
ret = __drbg_seed(drbg, &seedlist, reseed);
|
||||||
|
|
||||||
|
out:
|
||||||
memzero_explicit(entropy, entropylen * 2);
|
memzero_explicit(entropy, entropylen * 2);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -1142,6 +1217,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
|
|||||||
drbg->reseed_ctr = 0;
|
drbg->reseed_ctr = 0;
|
||||||
drbg->d_ops = NULL;
|
drbg->d_ops = NULL;
|
||||||
drbg->core = NULL;
|
drbg->core = NULL;
|
||||||
|
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
|
||||||
|
kzfree(drbg->prev);
|
||||||
|
drbg->prev = NULL;
|
||||||
|
drbg->fips_primed = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1211,6 +1291,14 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
|
|||||||
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
|
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
|
||||||
|
drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!drbg->prev)
|
||||||
|
goto fini;
|
||||||
|
drbg->fips_primed = false;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fini:
|
fini:
|
||||||
|
@ -391,6 +391,7 @@ static int fcrypt_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key
|
|||||||
|
|
||||||
static struct crypto_alg fcrypt_alg = {
|
static struct crypto_alg fcrypt_alg = {
|
||||||
.cra_name = "fcrypt",
|
.cra_name = "fcrypt",
|
||||||
|
.cra_driver_name = "fcrypt-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||||
.cra_blocksize = 8,
|
.cra_blocksize = 8,
|
||||||
.cra_ctxsize = sizeof(struct fcrypt_ctx),
|
.cra_ctxsize = sizeof(struct fcrypt_ctx),
|
||||||
|
@ -31,6 +31,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|||||||
const u8 *key, unsigned int keylen)
|
const u8 *key, unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
|
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
|
||||||
|
be128 k;
|
||||||
|
|
||||||
if (keylen != GHASH_BLOCK_SIZE) {
|
if (keylen != GHASH_BLOCK_SIZE) {
|
||||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||||
@ -39,7 +40,12 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|||||||
|
|
||||||
if (ctx->gf128)
|
if (ctx->gf128)
|
||||||
gf128mul_free_4k(ctx->gf128);
|
gf128mul_free_4k(ctx->gf128);
|
||||||
ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
|
|
||||||
|
BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
|
||||||
|
memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
|
||||||
|
ctx->gf128 = gf128mul_init_4k_lle(&k);
|
||||||
|
memzero_explicit(&k, GHASH_BLOCK_SIZE);
|
||||||
|
|
||||||
if (!ctx->gf128)
|
if (!ctx->gf128)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -56,11 +56,6 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector);
|
|||||||
* Helper function
|
* Helper function
|
||||||
***************************************************************************/
|
***************************************************************************/
|
||||||
|
|
||||||
__u64 jent_rol64(__u64 word, unsigned int shift)
|
|
||||||
{
|
|
||||||
return rol64(word, shift);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *jent_zalloc(unsigned int len)
|
void *jent_zalloc(unsigned int len)
|
||||||
{
|
{
|
||||||
return kzalloc(len, GFP_KERNEL);
|
return kzalloc(len, GFP_KERNEL);
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
* Non-physical true random number generator based on timing jitter --
|
* Non-physical true random number generator based on timing jitter --
|
||||||
* Jitter RNG standalone code.
|
* Jitter RNG standalone code.
|
||||||
*
|
*
|
||||||
* Copyright Stephan Mueller <smueller@chronox.de>, 2015
|
* Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2019
|
||||||
*
|
*
|
||||||
* Design
|
* Design
|
||||||
* ======
|
* ======
|
||||||
@ -47,7 +47,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* This Jitterentropy RNG is based on the jitterentropy library
|
* This Jitterentropy RNG is based on the jitterentropy library
|
||||||
* version 1.1.0 provided at http://www.chronox.de/jent.html
|
* version 2.1.2 provided at http://www.chronox.de/jent.html
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef __OPTIMIZE__
|
#ifdef __OPTIMIZE__
|
||||||
@ -71,10 +71,7 @@ struct rand_data {
|
|||||||
#define DATA_SIZE_BITS ((sizeof(__u64)) * 8)
|
#define DATA_SIZE_BITS ((sizeof(__u64)) * 8)
|
||||||
__u64 last_delta; /* SENSITIVE stuck test */
|
__u64 last_delta; /* SENSITIVE stuck test */
|
||||||
__s64 last_delta2; /* SENSITIVE stuck test */
|
__s64 last_delta2; /* SENSITIVE stuck test */
|
||||||
unsigned int stuck:1; /* Time measurement stuck */
|
|
||||||
unsigned int osr; /* Oversample rate */
|
unsigned int osr; /* Oversample rate */
|
||||||
unsigned int stir:1; /* Post-processing stirring */
|
|
||||||
unsigned int disable_unbias:1; /* Deactivate Von-Neuman unbias */
|
|
||||||
#define JENT_MEMORY_BLOCKS 64
|
#define JENT_MEMORY_BLOCKS 64
|
||||||
#define JENT_MEMORY_BLOCKSIZE 32
|
#define JENT_MEMORY_BLOCKSIZE 32
|
||||||
#define JENT_MEMORY_ACCESSLOOPS 128
|
#define JENT_MEMORY_ACCESSLOOPS 128
|
||||||
@ -89,8 +86,6 @@ struct rand_data {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* Flags that can be used to initialize the RNG */
|
/* Flags that can be used to initialize the RNG */
|
||||||
#define JENT_DISABLE_STIR (1<<0) /* Disable stirring the entropy pool */
|
|
||||||
#define JENT_DISABLE_UNBIAS (1<<1) /* Disable the Von-Neuman Unbiaser */
|
|
||||||
#define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more
|
#define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more
|
||||||
* entropy, saves MEMORY_SIZE RAM for
|
* entropy, saves MEMORY_SIZE RAM for
|
||||||
* entropy collector */
|
* entropy collector */
|
||||||
@ -99,19 +94,16 @@ struct rand_data {
|
|||||||
#define JENT_ENOTIME 1 /* Timer service not available */
|
#define JENT_ENOTIME 1 /* Timer service not available */
|
||||||
#define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */
|
#define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */
|
||||||
#define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */
|
#define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */
|
||||||
#define JENT_EMINVARIATION 4 /* Timer variations too small for RNG */
|
|
||||||
#define JENT_EVARVAR 5 /* Timer does not produce variations of
|
#define JENT_EVARVAR 5 /* Timer does not produce variations of
|
||||||
* variations (2nd derivation of time is
|
* variations (2nd derivation of time is
|
||||||
* zero). */
|
* zero). */
|
||||||
#define JENT_EMINVARVAR 6 /* Timer variations of variations is tooi
|
#define JENT_ESTUCK 8 /* Too many stuck results during init. */
|
||||||
* small. */
|
|
||||||
|
|
||||||
/***************************************************************************
|
/***************************************************************************
|
||||||
* Helper functions
|
* Helper functions
|
||||||
***************************************************************************/
|
***************************************************************************/
|
||||||
|
|
||||||
void jent_get_nstime(__u64 *out);
|
void jent_get_nstime(__u64 *out);
|
||||||
__u64 jent_rol64(__u64 word, unsigned int shift);
|
|
||||||
void *jent_zalloc(unsigned int len);
|
void *jent_zalloc(unsigned int len);
|
||||||
void jent_zfree(void *ptr);
|
void jent_zfree(void *ptr);
|
||||||
int jent_fips_enabled(void);
|
int jent_fips_enabled(void);
|
||||||
@ -140,16 +132,16 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
|
|||||||
|
|
||||||
jent_get_nstime(&time);
|
jent_get_nstime(&time);
|
||||||
/*
|
/*
|
||||||
* mix the current state of the random number into the shuffle
|
* Mix the current state of the random number into the shuffle
|
||||||
* calculation to balance that shuffle a bit more
|
* calculation to balance that shuffle a bit more.
|
||||||
*/
|
*/
|
||||||
if (ec)
|
if (ec)
|
||||||
time ^= ec->data;
|
time ^= ec->data;
|
||||||
/*
|
/*
|
||||||
* we fold the time value as much as possible to ensure that as many
|
* We fold the time value as much as possible to ensure that as many
|
||||||
* bits of the time stamp are included as possible
|
* bits of the time stamp are included as possible.
|
||||||
*/
|
*/
|
||||||
for (i = 0; (DATA_SIZE_BITS / bits) > i; i++) {
|
for (i = 0; ((DATA_SIZE_BITS + bits - 1) / bits) > i; i++) {
|
||||||
shuffle ^= time & mask;
|
shuffle ^= time & mask;
|
||||||
time = time >> bits;
|
time = time >> bits;
|
||||||
}
|
}
|
||||||
@ -169,38 +161,28 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
|
|||||||
* CPU Jitter noise source -- this is the noise source based on the CPU
|
* CPU Jitter noise source -- this is the noise source based on the CPU
|
||||||
* execution time jitter
|
* execution time jitter
|
||||||
*
|
*
|
||||||
* This function folds the time into one bit units by iterating
|
* This function injects the individual bits of the time value into the
|
||||||
* through the DATA_SIZE_BITS bit time value as follows: assume our time value
|
* entropy pool using an LFSR.
|
||||||
* is 0xabcd
|
|
||||||
* 1st loop, 1st shift generates 0xd000
|
|
||||||
* 1st loop, 2nd shift generates 0x000d
|
|
||||||
* 2nd loop, 1st shift generates 0xcd00
|
|
||||||
* 2nd loop, 2nd shift generates 0x000c
|
|
||||||
* 3rd loop, 1st shift generates 0xbcd0
|
|
||||||
* 3rd loop, 2nd shift generates 0x000b
|
|
||||||
* 4th loop, 1st shift generates 0xabcd
|
|
||||||
* 4th loop, 2nd shift generates 0x000a
|
|
||||||
* Now, the values at the end of the 2nd shifts are XORed together.
|
|
||||||
*
|
*
|
||||||
* The code is deliberately inefficient and shall stay that way. This function
|
* The code is deliberately inefficient with respect to the bit shifting
|
||||||
* is the root cause why the code shall be compiled without optimization. This
|
* and shall stay that way. This function is the root cause why the code
|
||||||
* function not only acts as folding operation, but this function's execution
|
* shall be compiled without optimization. This function not only acts as
|
||||||
* is used to measure the CPU execution time jitter. Any change to the loop in
|
* folding operation, but this function's execution is used to measure
|
||||||
* this function implies that careful retesting must be done.
|
* the CPU execution time jitter. Any change to the loop in this function
|
||||||
|
* implies that careful retesting must be done.
|
||||||
*
|
*
|
||||||
* Input:
|
* Input:
|
||||||
* @ec entropy collector struct -- may be NULL
|
* @ec entropy collector struct -- may be NULL
|
||||||
* @time time stamp to be folded
|
* @time time stamp to be injected
|
||||||
* @loop_cnt if a value not equal to 0 is set, use the given value as number of
|
* @loop_cnt if a value not equal to 0 is set, use the given value as number of
|
||||||
* loops to perform the folding
|
* loops to perform the folding
|
||||||
*
|
*
|
||||||
* Output:
|
* Output:
|
||||||
* @folded result of folding operation
|
* updated ec->data
|
||||||
*
|
*
|
||||||
* @return Number of loops the folding operation is performed
|
* @return Number of loops the folding operation is performed
|
||||||
*/
|
*/
|
||||||
static __u64 jent_fold_time(struct rand_data *ec, __u64 time,
|
static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt)
|
||||||
__u64 *folded, __u64 loop_cnt)
|
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
__u64 j = 0;
|
__u64 j = 0;
|
||||||
@ -217,15 +199,34 @@ static __u64 jent_fold_time(struct rand_data *ec, __u64 time,
|
|||||||
if (loop_cnt)
|
if (loop_cnt)
|
||||||
fold_loop_cnt = loop_cnt;
|
fold_loop_cnt = loop_cnt;
|
||||||
for (j = 0; j < fold_loop_cnt; j++) {
|
for (j = 0; j < fold_loop_cnt; j++) {
|
||||||
new = 0;
|
new = ec->data;
|
||||||
for (i = 1; (DATA_SIZE_BITS) >= i; i++) {
|
for (i = 1; (DATA_SIZE_BITS) >= i; i++) {
|
||||||
__u64 tmp = time << (DATA_SIZE_BITS - i);
|
__u64 tmp = time << (DATA_SIZE_BITS - i);
|
||||||
|
|
||||||
tmp = tmp >> (DATA_SIZE_BITS - 1);
|
tmp = tmp >> (DATA_SIZE_BITS - 1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fibonacci LSFR with polynomial of
|
||||||
|
* x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
|
||||||
|
* primitive according to
|
||||||
|
* http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
|
||||||
|
* (the shift values are the polynomial values minus one
|
||||||
|
* due to counting bits from 0 to 63). As the current
|
||||||
|
* position is always the LSB, the polynomial only needs
|
||||||
|
* to shift data in from the left without wrap.
|
||||||
|
*/
|
||||||
|
tmp ^= ((new >> 63) & 1);
|
||||||
|
tmp ^= ((new >> 60) & 1);
|
||||||
|
tmp ^= ((new >> 55) & 1);
|
||||||
|
tmp ^= ((new >> 30) & 1);
|
||||||
|
tmp ^= ((new >> 27) & 1);
|
||||||
|
tmp ^= ((new >> 22) & 1);
|
||||||
|
new <<= 1;
|
||||||
new ^= tmp;
|
new ^= tmp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*folded = new;
|
ec->data = new;
|
||||||
|
|
||||||
return fold_loop_cnt;
|
return fold_loop_cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -258,7 +259,6 @@ static __u64 jent_fold_time(struct rand_data *ec, __u64 time,
|
|||||||
*/
|
*/
|
||||||
static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
|
static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
|
||||||
{
|
{
|
||||||
unsigned char *tmpval = NULL;
|
|
||||||
unsigned int wrap = 0;
|
unsigned int wrap = 0;
|
||||||
__u64 i = 0;
|
__u64 i = 0;
|
||||||
#define MAX_ACC_LOOP_BIT 7
|
#define MAX_ACC_LOOP_BIT 7
|
||||||
@ -278,7 +278,7 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
|
|||||||
acc_loop_cnt = loop_cnt;
|
acc_loop_cnt = loop_cnt;
|
||||||
|
|
||||||
for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) {
|
for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) {
|
||||||
tmpval = ec->mem + ec->memlocation;
|
unsigned char *tmpval = ec->mem + ec->memlocation;
|
||||||
/*
|
/*
|
||||||
* memory access: just add 1 to one byte,
|
* memory access: just add 1 to one byte,
|
||||||
* wrap at 255 -- memory access implies read
|
* wrap at 255 -- memory access implies read
|
||||||
@ -316,7 +316,7 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
|
|||||||
* 0 jitter measurement not stuck (good bit)
|
* 0 jitter measurement not stuck (good bit)
|
||||||
* 1 jitter measurement stuck (reject bit)
|
* 1 jitter measurement stuck (reject bit)
|
||||||
*/
|
*/
|
||||||
static void jent_stuck(struct rand_data *ec, __u64 current_delta)
|
static int jent_stuck(struct rand_data *ec, __u64 current_delta)
|
||||||
{
|
{
|
||||||
__s64 delta2 = ec->last_delta - current_delta;
|
__s64 delta2 = ec->last_delta - current_delta;
|
||||||
__s64 delta3 = delta2 - ec->last_delta2;
|
__s64 delta3 = delta2 - ec->last_delta2;
|
||||||
@ -325,14 +325,15 @@ static void jent_stuck(struct rand_data *ec, __u64 current_delta)
|
|||||||
ec->last_delta2 = delta2;
|
ec->last_delta2 = delta2;
|
||||||
|
|
||||||
if (!current_delta || !delta2 || !delta3)
|
if (!current_delta || !delta2 || !delta3)
|
||||||
ec->stuck = 1;
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is the heart of the entropy generation: calculate time deltas and
|
* This is the heart of the entropy generation: calculate time deltas and
|
||||||
* use the CPU jitter in the time deltas. The jitter is folded into one
|
* use the CPU jitter in the time deltas. The jitter is injected into the
|
||||||
* bit. You can call this function the "random bit generator" as it
|
* entropy pool.
|
||||||
* produces one random bit per invocation.
|
|
||||||
*
|
*
|
||||||
* WARNING: ensure that ->prev_time is primed before using the output
|
* WARNING: ensure that ->prev_time is primed before using the output
|
||||||
* of this function! This can be done by calling this function
|
* of this function! This can be done by calling this function
|
||||||
@ -341,12 +342,11 @@ static void jent_stuck(struct rand_data *ec, __u64 current_delta)
|
|||||||
* Input:
|
* Input:
|
||||||
* @entropy_collector Reference to entropy collector
|
* @entropy_collector Reference to entropy collector
|
||||||
*
|
*
|
||||||
* @return One random bit
|
* @return result of stuck test
|
||||||
*/
|
*/
|
||||||
static __u64 jent_measure_jitter(struct rand_data *ec)
|
static int jent_measure_jitter(struct rand_data *ec)
|
||||||
{
|
{
|
||||||
__u64 time = 0;
|
__u64 time = 0;
|
||||||
__u64 data = 0;
|
|
||||||
__u64 current_delta = 0;
|
__u64 current_delta = 0;
|
||||||
|
|
||||||
/* Invoke one noise source before time measurement to add variations */
|
/* Invoke one noise source before time measurement to add variations */
|
||||||
@ -360,109 +360,11 @@ static __u64 jent_measure_jitter(struct rand_data *ec)
|
|||||||
current_delta = time - ec->prev_time;
|
current_delta = time - ec->prev_time;
|
||||||
ec->prev_time = time;
|
ec->prev_time = time;
|
||||||
|
|
||||||
/* Now call the next noise sources which also folds the data */
|
/* Now call the next noise sources which also injects the data */
|
||||||
jent_fold_time(ec, current_delta, &data, 0);
|
jent_lfsr_time(ec, current_delta, 0);
|
||||||
|
|
||||||
/*
|
/* Check whether we have a stuck measurement. */
|
||||||
* Check whether we have a stuck measurement. The enforcement
|
return jent_stuck(ec, current_delta);
|
||||||
* is performed after the stuck value has been mixed into the
|
|
||||||
* entropy pool.
|
|
||||||
*/
|
|
||||||
jent_stuck(ec, current_delta);
|
|
||||||
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Von Neuman unbias as explained in RFC 4086 section 4.2. As shown in the
|
|
||||||
* documentation of that RNG, the bits from jent_measure_jitter are considered
|
|
||||||
* independent which implies that the Von Neuman unbias operation is applicable.
|
|
||||||
* A proof of the Von-Neumann unbias operation to remove skews is given in the
|
|
||||||
* document "A proposal for: Functionality classes for random number
|
|
||||||
* generators", version 2.0 by Werner Schindler, section 5.4.1.
|
|
||||||
*
|
|
||||||
* Input:
|
|
||||||
* @entropy_collector Reference to entropy collector
|
|
||||||
*
|
|
||||||
* @return One random bit
|
|
||||||
*/
|
|
||||||
static __u64 jent_unbiased_bit(struct rand_data *entropy_collector)
|
|
||||||
{
|
|
||||||
do {
|
|
||||||
__u64 a = jent_measure_jitter(entropy_collector);
|
|
||||||
__u64 b = jent_measure_jitter(entropy_collector);
|
|
||||||
|
|
||||||
if (a == b)
|
|
||||||
continue;
|
|
||||||
if (1 == a)
|
|
||||||
return 1;
|
|
||||||
else
|
|
||||||
return 0;
|
|
||||||
} while (1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Shuffle the pool a bit by mixing some value with a bijective function (XOR)
|
|
||||||
* into the pool.
|
|
||||||
*
|
|
||||||
* The function generates a mixer value that depends on the bits set and the
|
|
||||||
* location of the set bits in the random number generated by the entropy
|
|
||||||
* source. Therefore, based on the generated random number, this mixer value
|
|
||||||
* can have 2**64 different values. That mixer value is initialized with the
|
|
||||||
* first two SHA-1 constants. After obtaining the mixer value, it is XORed into
|
|
||||||
* the random number.
|
|
||||||
*
|
|
||||||
* The mixer value is not assumed to contain any entropy. But due to the XOR
|
|
||||||
* operation, it can also not destroy any entropy present in the entropy pool.
|
|
||||||
*
|
|
||||||
* Input:
|
|
||||||
* @entropy_collector Reference to entropy collector
|
|
||||||
*/
|
|
||||||
static void jent_stir_pool(struct rand_data *entropy_collector)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* to shut up GCC on 32 bit, we have to initialize the 64 variable
|
|
||||||
* with two 32 bit variables
|
|
||||||
*/
|
|
||||||
union c {
|
|
||||||
__u64 u64;
|
|
||||||
__u32 u32[2];
|
|
||||||
};
|
|
||||||
/*
|
|
||||||
* This constant is derived from the first two 32 bit initialization
|
|
||||||
* vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1
|
|
||||||
*/
|
|
||||||
union c constant;
|
|
||||||
/*
|
|
||||||
* The start value of the mixer variable is derived from the third
|
|
||||||
* and fourth 32 bit initialization vector of SHA-1 as defined in
|
|
||||||
* FIPS 180-4 section 5.3.1
|
|
||||||
*/
|
|
||||||
union c mixer;
|
|
||||||
unsigned int i = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Store the SHA-1 constants in reverse order to make up the 64 bit
|
|
||||||
* value -- this applies to a little endian system, on a big endian
|
|
||||||
* system, it reverses as expected. But this really does not matter
|
|
||||||
* as we do not rely on the specific numbers. We just pick the SHA-1
|
|
||||||
* constants as they have a good mix of bit set and unset.
|
|
||||||
*/
|
|
||||||
constant.u32[1] = 0x67452301;
|
|
||||||
constant.u32[0] = 0xefcdab89;
|
|
||||||
mixer.u32[1] = 0x98badcfe;
|
|
||||||
mixer.u32[0] = 0x10325476;
|
|
||||||
|
|
||||||
for (i = 0; i < DATA_SIZE_BITS; i++) {
|
|
||||||
/*
|
|
||||||
* get the i-th bit of the input random number and only XOR
|
|
||||||
* the constant into the mixer value when that bit is set
|
|
||||||
*/
|
|
||||||
if ((entropy_collector->data >> i) & 1)
|
|
||||||
mixer.u64 ^= constant.u64;
|
|
||||||
mixer.u64 = jent_rol64(mixer.u64, 1);
|
|
||||||
}
|
|
||||||
entropy_collector->data ^= mixer.u64;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -480,48 +382,9 @@ static void jent_gen_entropy(struct rand_data *ec)
|
|||||||
jent_measure_jitter(ec);
|
jent_measure_jitter(ec);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
__u64 data = 0;
|
/* If a stuck measurement is received, repeat measurement */
|
||||||
|
if (jent_measure_jitter(ec))
|
||||||
if (ec->disable_unbias == 1)
|
|
||||||
data = jent_measure_jitter(ec);
|
|
||||||
else
|
|
||||||
data = jent_unbiased_bit(ec);
|
|
||||||
|
|
||||||
/* enforcement of the jent_stuck test */
|
|
||||||
if (ec->stuck) {
|
|
||||||
/*
|
|
||||||
* We only mix in the bit considered not appropriate
|
|
||||||
* without the LSFR. The reason is that if we apply
|
|
||||||
* the LSFR and we do not rotate, the 2nd bit with LSFR
|
|
||||||
* will cancel out the first LSFR application on the
|
|
||||||
* bad bit.
|
|
||||||
*
|
|
||||||
* And we do not rotate as we apply the next bit to the
|
|
||||||
* current bit location again.
|
|
||||||
*/
|
|
||||||
ec->data ^= data;
|
|
||||||
ec->stuck = 0;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Fibonacci LSFR with polynom of
|
|
||||||
* x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
|
|
||||||
* primitive according to
|
|
||||||
* http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
|
|
||||||
* (the shift values are the polynom values minus one
|
|
||||||
* due to counting bits from 0 to 63). As the current
|
|
||||||
* position is always the LSB, the polynom only needs
|
|
||||||
* to shift data in from the left without wrap.
|
|
||||||
*/
|
|
||||||
ec->data ^= data;
|
|
||||||
ec->data ^= ((ec->data >> 63) & 1);
|
|
||||||
ec->data ^= ((ec->data >> 60) & 1);
|
|
||||||
ec->data ^= ((ec->data >> 55) & 1);
|
|
||||||
ec->data ^= ((ec->data >> 30) & 1);
|
|
||||||
ec->data ^= ((ec->data >> 27) & 1);
|
|
||||||
ec->data ^= ((ec->data >> 22) & 1);
|
|
||||||
ec->data = jent_rol64(ec->data, 1);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We multiply the loop value with ->osr to obtain the
|
* We multiply the loop value with ->osr to obtain the
|
||||||
@ -530,8 +393,6 @@ static void jent_gen_entropy(struct rand_data *ec)
|
|||||||
if (++k >= (DATA_SIZE_BITS * ec->osr))
|
if (++k >= (DATA_SIZE_BITS * ec->osr))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (ec->stir)
|
|
||||||
jent_stir_pool(ec);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -639,12 +500,6 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
|
|||||||
osr = 1; /* minimum sampling rate is 1 */
|
osr = 1; /* minimum sampling rate is 1 */
|
||||||
entropy_collector->osr = osr;
|
entropy_collector->osr = osr;
|
||||||
|
|
||||||
entropy_collector->stir = 1;
|
|
||||||
if (flags & JENT_DISABLE_STIR)
|
|
||||||
entropy_collector->stir = 0;
|
|
||||||
if (flags & JENT_DISABLE_UNBIAS)
|
|
||||||
entropy_collector->disable_unbias = 1;
|
|
||||||
|
|
||||||
/* fill the data pad with non-zero values */
|
/* fill the data pad with non-zero values */
|
||||||
jent_gen_entropy(entropy_collector);
|
jent_gen_entropy(entropy_collector);
|
||||||
|
|
||||||
@ -656,7 +511,6 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector)
|
|||||||
jent_zfree(entropy_collector->mem);
|
jent_zfree(entropy_collector->mem);
|
||||||
entropy_collector->mem = NULL;
|
entropy_collector->mem = NULL;
|
||||||
jent_zfree(entropy_collector);
|
jent_zfree(entropy_collector);
|
||||||
entropy_collector = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int jent_entropy_init(void)
|
int jent_entropy_init(void)
|
||||||
@ -665,8 +519,9 @@ int jent_entropy_init(void)
|
|||||||
__u64 delta_sum = 0;
|
__u64 delta_sum = 0;
|
||||||
__u64 old_delta = 0;
|
__u64 old_delta = 0;
|
||||||
int time_backwards = 0;
|
int time_backwards = 0;
|
||||||
int count_var = 0;
|
|
||||||
int count_mod = 0;
|
int count_mod = 0;
|
||||||
|
int count_stuck = 0;
|
||||||
|
struct rand_data ec = { 0 };
|
||||||
|
|
||||||
/* We could perform statistical tests here, but the problem is
|
/* We could perform statistical tests here, but the problem is
|
||||||
* that we only have a few loop counts to do testing. These
|
* that we only have a few loop counts to do testing. These
|
||||||
@ -695,12 +550,14 @@ int jent_entropy_init(void)
|
|||||||
for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) {
|
for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) {
|
||||||
__u64 time = 0;
|
__u64 time = 0;
|
||||||
__u64 time2 = 0;
|
__u64 time2 = 0;
|
||||||
__u64 folded = 0;
|
|
||||||
__u64 delta = 0;
|
__u64 delta = 0;
|
||||||
unsigned int lowdelta = 0;
|
unsigned int lowdelta = 0;
|
||||||
|
int stuck;
|
||||||
|
|
||||||
|
/* Invoke core entropy collection logic */
|
||||||
jent_get_nstime(&time);
|
jent_get_nstime(&time);
|
||||||
jent_fold_time(NULL, time, &folded, 1<<MIN_FOLD_LOOP_BIT);
|
ec.prev_time = time;
|
||||||
|
jent_lfsr_time(&ec, time, 0);
|
||||||
jent_get_nstime(&time2);
|
jent_get_nstime(&time2);
|
||||||
|
|
||||||
/* test whether timer works */
|
/* test whether timer works */
|
||||||
@ -715,6 +572,8 @@ int jent_entropy_init(void)
|
|||||||
if (!delta)
|
if (!delta)
|
||||||
return JENT_ECOARSETIME;
|
return JENT_ECOARSETIME;
|
||||||
|
|
||||||
|
stuck = jent_stuck(&ec, delta);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* up to here we did not modify any variable that will be
|
* up to here we did not modify any variable that will be
|
||||||
* evaluated later, but we already performed some work. Thus we
|
* evaluated later, but we already performed some work. Thus we
|
||||||
@ -725,14 +584,14 @@ int jent_entropy_init(void)
|
|||||||
if (CLEARCACHE > i)
|
if (CLEARCACHE > i)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (stuck)
|
||||||
|
count_stuck++;
|
||||||
|
|
||||||
/* test whether we have an increasing timer */
|
/* test whether we have an increasing timer */
|
||||||
if (!(time2 > time))
|
if (!(time2 > time))
|
||||||
time_backwards++;
|
time_backwards++;
|
||||||
|
|
||||||
/*
|
/* use 32 bit value to ensure compilation on 32 bit arches */
|
||||||
* Avoid modulo of 64 bit integer to allow code to compile
|
|
||||||
* on 32 bit architectures.
|
|
||||||
*/
|
|
||||||
lowdelta = time2 - time;
|
lowdelta = time2 - time;
|
||||||
if (!(lowdelta % 100))
|
if (!(lowdelta % 100))
|
||||||
count_mod++;
|
count_mod++;
|
||||||
@ -743,14 +602,10 @@ int jent_entropy_init(void)
|
|||||||
* only after the first loop is executed as we need to prime
|
* only after the first loop is executed as we need to prime
|
||||||
* the old_data value
|
* the old_data value
|
||||||
*/
|
*/
|
||||||
if (i) {
|
if (delta > old_delta)
|
||||||
if (delta != old_delta)
|
delta_sum += (delta - old_delta);
|
||||||
count_var++;
|
else
|
||||||
if (delta > old_delta)
|
delta_sum += (old_delta - delta);
|
||||||
delta_sum += (delta - old_delta);
|
|
||||||
else
|
|
||||||
delta_sum += (old_delta - delta);
|
|
||||||
}
|
|
||||||
old_delta = delta;
|
old_delta = delta;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -763,25 +618,29 @@ int jent_entropy_init(void)
|
|||||||
*/
|
*/
|
||||||
if (3 < time_backwards)
|
if (3 < time_backwards)
|
||||||
return JENT_ENOMONOTONIC;
|
return JENT_ENOMONOTONIC;
|
||||||
/* Error if the time variances are always identical */
|
|
||||||
if (!delta_sum)
|
|
||||||
return JENT_EVARVAR;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Variations of deltas of time must on average be larger
|
* Variations of deltas of time must on average be larger
|
||||||
* than 1 to ensure the entropy estimation
|
* than 1 to ensure the entropy estimation
|
||||||
* implied with 1 is preserved
|
* implied with 1 is preserved
|
||||||
*/
|
*/
|
||||||
if (delta_sum <= 1)
|
if ((delta_sum) <= 1)
|
||||||
return JENT_EMINVARVAR;
|
return JENT_EVARVAR;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that we have variations in the time stamp below 10 for at
|
* Ensure that we have variations in the time stamp below 10 for at
|
||||||
* least 10% of all checks -- on some platforms, the counter
|
* least 10% of all checks -- on some platforms, the counter increments
|
||||||
* increments in multiples of 100, but not always
|
* in multiples of 100, but not always
|
||||||
*/
|
*/
|
||||||
if ((TESTLOOPCOUNT/10 * 9) < count_mod)
|
if ((TESTLOOPCOUNT/10 * 9) < count_mod)
|
||||||
return JENT_ECOARSETIME;
|
return JENT_ECOARSETIME;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have more than 90% stuck results, then this Jitter RNG is
|
||||||
|
* likely to not work well.
|
||||||
|
*/
|
||||||
|
if ((TESTLOOPCOUNT/10 * 9) < count_stuck)
|
||||||
|
return JENT_ESTUCK;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -848,6 +848,7 @@ static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||||||
|
|
||||||
static struct crypto_alg khazad_alg = {
|
static struct crypto_alg khazad_alg = {
|
||||||
.cra_name = "khazad",
|
.cra_name = "khazad",
|
||||||
|
.cra_driver_name = "khazad-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||||
.cra_blocksize = KHAZAD_BLOCK_SIZE,
|
.cra_blocksize = KHAZAD_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof (struct khazad_ctx),
|
.cra_ctxsize = sizeof (struct khazad_ctx),
|
||||||
|
@ -384,7 +384,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||||
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
|
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
|
||||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
|
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
|
||||||
(__alignof__(__be32) - 1);
|
(__alignof__(be128) - 1);
|
||||||
|
|
||||||
inst->alg.ivsize = LRW_BLOCK_SIZE;
|
inst->alg.ivsize = LRW_BLOCK_SIZE;
|
||||||
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
|
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
|
||||||
|
@ -106,6 +106,7 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
|
|||||||
|
|
||||||
static struct crypto_alg alg_lz4 = {
|
static struct crypto_alg alg_lz4 = {
|
||||||
.cra_name = "lz4",
|
.cra_name = "lz4",
|
||||||
|
.cra_driver_name = "lz4-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||||
.cra_ctxsize = sizeof(struct lz4_ctx),
|
.cra_ctxsize = sizeof(struct lz4_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
|
@ -107,6 +107,7 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
|
|||||||
|
|
||||||
static struct crypto_alg alg_lz4hc = {
|
static struct crypto_alg alg_lz4hc = {
|
||||||
.cra_name = "lz4hc",
|
.cra_name = "lz4hc",
|
||||||
|
.cra_driver_name = "lz4hc-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||||
.cra_ctxsize = sizeof(struct lz4hc_ctx),
|
.cra_ctxsize = sizeof(struct lz4hc_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
|
@ -109,6 +109,7 @@ static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src,
|
|||||||
|
|
||||||
static struct crypto_alg alg = {
|
static struct crypto_alg alg = {
|
||||||
.cra_name = "lzo-rle",
|
.cra_name = "lzo-rle",
|
||||||
|
.cra_driver_name = "lzo-rle-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||||
.cra_ctxsize = sizeof(struct lzorle_ctx),
|
.cra_ctxsize = sizeof(struct lzorle_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
|
@ -109,6 +109,7 @@ static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
|
|||||||
|
|
||||||
static struct crypto_alg alg = {
|
static struct crypto_alg alg = {
|
||||||
.cra_name = "lzo",
|
.cra_name = "lzo",
|
||||||
|
.cra_driver_name = "lzo-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||||
.cra_ctxsize = sizeof(struct lzo_ctx),
|
.cra_ctxsize = sizeof(struct lzo_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
|
@ -216,9 +216,10 @@ static struct shash_alg alg = {
|
|||||||
.final = md4_final,
|
.final = md4_final,
|
||||||
.descsize = sizeof(struct md4_ctx),
|
.descsize = sizeof(struct md4_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "md4",
|
.cra_name = "md4",
|
||||||
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
|
.cra_driver_name = "md4-generic",
|
||||||
.cra_module = THIS_MODULE,
|
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -228,9 +228,10 @@ static struct shash_alg alg = {
|
|||||||
.descsize = sizeof(struct md5_state),
|
.descsize = sizeof(struct md5_state),
|
||||||
.statesize = sizeof(struct md5_state),
|
.statesize = sizeof(struct md5_state),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "md5",
|
.cra_name = "md5",
|
||||||
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
.cra_driver_name = "md5-generic",
|
||||||
.cra_module = THIS_MODULE,
|
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -156,6 +156,7 @@ static struct shash_alg alg = {
|
|||||||
.descsize = sizeof(struct michael_mic_desc_ctx),
|
.descsize = sizeof(struct michael_mic_desc_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "michael_mic",
|
.cra_name = "michael_mic",
|
||||||
|
.cra_driver_name = "michael_mic-generic",
|
||||||
.cra_blocksize = 8,
|
.cra_blocksize = 8,
|
||||||
.cra_alignmask = 3,
|
.cra_alignmask = 3,
|
||||||
.cra_ctxsize = sizeof(struct michael_mic_ctx),
|
.cra_ctxsize = sizeof(struct michael_mic_ctx),
|
||||||
|
@ -298,6 +298,7 @@ static struct shash_alg alg = {
|
|||||||
.descsize = sizeof(struct rmd128_ctx),
|
.descsize = sizeof(struct rmd128_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "rmd128",
|
.cra_name = "rmd128",
|
||||||
|
.cra_driver_name = "rmd128-generic",
|
||||||
.cra_blocksize = RMD128_BLOCK_SIZE,
|
.cra_blocksize = RMD128_BLOCK_SIZE,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
|
@ -342,6 +342,7 @@ static struct shash_alg alg = {
|
|||||||
.descsize = sizeof(struct rmd160_ctx),
|
.descsize = sizeof(struct rmd160_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "rmd160",
|
.cra_name = "rmd160",
|
||||||
|
.cra_driver_name = "rmd160-generic",
|
||||||
.cra_blocksize = RMD160_BLOCK_SIZE,
|
.cra_blocksize = RMD160_BLOCK_SIZE,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
|
@ -317,6 +317,7 @@ static struct shash_alg alg = {
|
|||||||
.descsize = sizeof(struct rmd256_ctx),
|
.descsize = sizeof(struct rmd256_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "rmd256",
|
.cra_name = "rmd256",
|
||||||
|
.cra_driver_name = "rmd256-generic",
|
||||||
.cra_blocksize = RMD256_BLOCK_SIZE,
|
.cra_blocksize = RMD256_BLOCK_SIZE,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
|
@ -366,6 +366,7 @@ static struct shash_alg alg = {
|
|||||||
.descsize = sizeof(struct rmd320_ctx),
|
.descsize = sizeof(struct rmd320_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "rmd320",
|
.cra_name = "rmd320",
|
||||||
|
.cra_driver_name = "rmd320-generic",
|
||||||
.cra_blocksize = RMD320_BLOCK_SIZE,
|
.cra_blocksize = RMD320_BLOCK_SIZE,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
|
@ -225,7 +225,13 @@
|
|||||||
x4 ^= x2; \
|
x4 ^= x2; \
|
||||||
})
|
})
|
||||||
|
|
||||||
static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k)
|
/*
|
||||||
|
* both gcc and clang have misoptimized this function in the past,
|
||||||
|
* producing horrible object code from spilling temporary variables
|
||||||
|
* on the stack. Forcing this part out of line avoids that.
|
||||||
|
*/
|
||||||
|
static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2,
|
||||||
|
u32 r3, u32 r4, u32 *k)
|
||||||
{
|
{
|
||||||
k += 100;
|
k += 100;
|
||||||
S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
|
S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
|
||||||
@ -637,6 +643,7 @@ static struct crypto_alg srp_algs[2] = { {
|
|||||||
.cia_decrypt = serpent_decrypt } }
|
.cia_decrypt = serpent_decrypt } }
|
||||||
}, {
|
}, {
|
||||||
.cra_name = "tnepres",
|
.cra_name = "tnepres",
|
||||||
|
.cra_driver_name = "tnepres-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||||
|
@ -837,6 +837,40 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int crypto_skcipher_encrypt(struct skcipher_request *req)
|
||||||
|
{
|
||||||
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
|
struct crypto_alg *alg = tfm->base.__crt_alg;
|
||||||
|
unsigned int cryptlen = req->cryptlen;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
crypto_stats_get(alg);
|
||||||
|
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
||||||
|
ret = -ENOKEY;
|
||||||
|
else
|
||||||
|
ret = tfm->encrypt(req);
|
||||||
|
crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
|
||||||
|
|
||||||
|
int crypto_skcipher_decrypt(struct skcipher_request *req)
|
||||||
|
{
|
||||||
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
|
struct crypto_alg *alg = tfm->base.__crt_alg;
|
||||||
|
unsigned int cryptlen = req->cryptlen;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
crypto_stats_get(alg);
|
||||||
|
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
||||||
|
ret = -ENOKEY;
|
||||||
|
else
|
||||||
|
ret = tfm->decrypt(req);
|
||||||
|
crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
|
||||||
|
|
||||||
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
|
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||||
|
@ -216,6 +216,7 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||||||
|
|
||||||
static struct crypto_alg tea_algs[3] = { {
|
static struct crypto_alg tea_algs[3] = { {
|
||||||
.cra_name = "tea",
|
.cra_name = "tea",
|
||||||
|
.cra_driver_name = "tea-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||||
.cra_blocksize = TEA_BLOCK_SIZE,
|
.cra_blocksize = TEA_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof (struct tea_ctx),
|
.cra_ctxsize = sizeof (struct tea_ctx),
|
||||||
@ -229,6 +230,7 @@ static struct crypto_alg tea_algs[3] = { {
|
|||||||
.cia_decrypt = tea_decrypt } }
|
.cia_decrypt = tea_decrypt } }
|
||||||
}, {
|
}, {
|
||||||
.cra_name = "xtea",
|
.cra_name = "xtea",
|
||||||
|
.cra_driver_name = "xtea-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||||
.cra_blocksize = XTEA_BLOCK_SIZE,
|
.cra_blocksize = XTEA_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof (struct xtea_ctx),
|
.cra_ctxsize = sizeof (struct xtea_ctx),
|
||||||
@ -242,6 +244,7 @@ static struct crypto_alg tea_algs[3] = { {
|
|||||||
.cia_decrypt = xtea_decrypt } }
|
.cia_decrypt = xtea_decrypt } }
|
||||||
}, {
|
}, {
|
||||||
.cra_name = "xeta",
|
.cra_name = "xeta",
|
||||||
|
.cra_driver_name = "xeta-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||||
.cra_blocksize = XTEA_BLOCK_SIZE,
|
.cra_blocksize = XTEA_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof (struct xtea_ctx),
|
.cra_ctxsize = sizeof (struct xtea_ctx),
|
||||||
|
478
crypto/testmgr.c
478
crypto/testmgr.c
@ -1032,6 +1032,205 @@ static void crypto_reenable_simd_for_test(void)
|
|||||||
}
|
}
|
||||||
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
|
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
|
||||||
|
|
||||||
|
static int build_hash_sglist(struct test_sglist *tsgl,
|
||||||
|
const struct hash_testvec *vec,
|
||||||
|
const struct testvec_config *cfg,
|
||||||
|
unsigned int alignmask,
|
||||||
|
const struct test_sg_division *divs[XBUFSIZE])
|
||||||
|
{
|
||||||
|
struct kvec kv;
|
||||||
|
struct iov_iter input;
|
||||||
|
|
||||||
|
kv.iov_base = (void *)vec->plaintext;
|
||||||
|
kv.iov_len = vec->psize;
|
||||||
|
iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize);
|
||||||
|
return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
|
||||||
|
&input, divs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int check_hash_result(const char *type,
|
||||||
|
const u8 *result, unsigned int digestsize,
|
||||||
|
const struct hash_testvec *vec,
|
||||||
|
const char *vec_name,
|
||||||
|
const char *driver,
|
||||||
|
const struct testvec_config *cfg)
|
||||||
|
{
|
||||||
|
if (memcmp(result, vec->digest, digestsize) != 0) {
|
||||||
|
pr_err("alg: %s: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
|
||||||
|
type, driver, vec_name, cfg->name);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
|
||||||
|
pr_err("alg: %s: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
|
||||||
|
type, driver, vec_name, cfg->name);
|
||||||
|
return -EOVERFLOW;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int check_shash_op(const char *op, int err,
|
||||||
|
const char *driver, const char *vec_name,
|
||||||
|
const struct testvec_config *cfg)
|
||||||
|
{
|
||||||
|
if (err)
|
||||||
|
pr_err("alg: shash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
|
||||||
|
driver, op, err, vec_name, cfg->name);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline const void *sg_data(struct scatterlist *sg)
|
||||||
|
{
|
||||||
|
return page_address(sg_page(sg)) + sg->offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Test one hash test vector in one configuration, using the shash API */
|
||||||
|
static int test_shash_vec_cfg(const char *driver,
|
||||||
|
const struct hash_testvec *vec,
|
||||||
|
const char *vec_name,
|
||||||
|
const struct testvec_config *cfg,
|
||||||
|
struct shash_desc *desc,
|
||||||
|
struct test_sglist *tsgl,
|
||||||
|
u8 *hashstate)
|
||||||
|
{
|
||||||
|
struct crypto_shash *tfm = desc->tfm;
|
||||||
|
const unsigned int alignmask = crypto_shash_alignmask(tfm);
|
||||||
|
const unsigned int digestsize = crypto_shash_digestsize(tfm);
|
||||||
|
const unsigned int statesize = crypto_shash_statesize(tfm);
|
||||||
|
const struct test_sg_division *divs[XBUFSIZE];
|
||||||
|
unsigned int i;
|
||||||
|
u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/* Set the key, if specified */
|
||||||
|
if (vec->ksize) {
|
||||||
|
err = crypto_shash_setkey(tfm, vec->key, vec->ksize);
|
||||||
|
if (err) {
|
||||||
|
if (err == vec->setkey_error)
|
||||||
|
return 0;
|
||||||
|
pr_err("alg: shash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
|
||||||
|
driver, vec_name, vec->setkey_error, err,
|
||||||
|
crypto_shash_get_flags(tfm));
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
if (vec->setkey_error) {
|
||||||
|
pr_err("alg: shash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
|
||||||
|
driver, vec_name, vec->setkey_error);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Build the scatterlist for the source data */
|
||||||
|
err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
|
||||||
|
if (err) {
|
||||||
|
pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
|
||||||
|
driver, vec_name, cfg->name);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Do the actual hashing */
|
||||||
|
|
||||||
|
testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm));
|
||||||
|
testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
|
||||||
|
|
||||||
|
if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
|
||||||
|
vec->digest_error) {
|
||||||
|
/* Just using digest() */
|
||||||
|
if (tsgl->nents != 1)
|
||||||
|
return 0;
|
||||||
|
if (cfg->nosimd)
|
||||||
|
crypto_disable_simd_for_test();
|
||||||
|
err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]),
|
||||||
|
tsgl->sgl[0].length, result);
|
||||||
|
if (cfg->nosimd)
|
||||||
|
crypto_reenable_simd_for_test();
|
||||||
|
if (err) {
|
||||||
|
if (err == vec->digest_error)
|
||||||
|
return 0;
|
||||||
|
pr_err("alg: shash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
|
||||||
|
driver, vec_name, vec->digest_error, err,
|
||||||
|
cfg->name);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
if (vec->digest_error) {
|
||||||
|
pr_err("alg: shash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
|
||||||
|
driver, vec_name, vec->digest_error, cfg->name);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
goto result_ready;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Using init(), zero or more update(), then final() or finup() */
|
||||||
|
|
||||||
|
if (cfg->nosimd)
|
||||||
|
crypto_disable_simd_for_test();
|
||||||
|
err = crypto_shash_init(desc);
|
||||||
|
if (cfg->nosimd)
|
||||||
|
crypto_reenable_simd_for_test();
|
||||||
|
err = check_shash_op("init", err, driver, vec_name, cfg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
for (i = 0; i < tsgl->nents; i++) {
|
||||||
|
if (i + 1 == tsgl->nents &&
|
||||||
|
cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
|
||||||
|
if (divs[i]->nosimd)
|
||||||
|
crypto_disable_simd_for_test();
|
||||||
|
err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]),
|
||||||
|
tsgl->sgl[i].length, result);
|
||||||
|
if (divs[i]->nosimd)
|
||||||
|
crypto_reenable_simd_for_test();
|
||||||
|
err = check_shash_op("finup", err, driver, vec_name,
|
||||||
|
cfg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
goto result_ready;
|
||||||
|
}
|
||||||
|
if (divs[i]->nosimd)
|
||||||
|
crypto_disable_simd_for_test();
|
||||||
|
err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]),
|
||||||
|
tsgl->sgl[i].length);
|
||||||
|
if (divs[i]->nosimd)
|
||||||
|
crypto_reenable_simd_for_test();
|
||||||
|
err = check_shash_op("update", err, driver, vec_name, cfg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) {
|
||||||
|
/* Test ->export() and ->import() */
|
||||||
|
testmgr_poison(hashstate + statesize,
|
||||||
|
TESTMGR_POISON_LEN);
|
||||||
|
err = crypto_shash_export(desc, hashstate);
|
||||||
|
err = check_shash_op("export", err, driver, vec_name,
|
||||||
|
cfg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
if (!testmgr_is_poison(hashstate + statesize,
|
||||||
|
TESTMGR_POISON_LEN)) {
|
||||||
|
pr_err("alg: shash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
|
||||||
|
driver, vec_name, cfg->name);
|
||||||
|
return -EOVERFLOW;
|
||||||
|
}
|
||||||
|
testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm));
|
||||||
|
err = crypto_shash_import(desc, hashstate);
|
||||||
|
err = check_shash_op("import", err, driver, vec_name,
|
||||||
|
cfg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cfg->nosimd)
|
||||||
|
crypto_disable_simd_for_test();
|
||||||
|
err = crypto_shash_final(desc, result);
|
||||||
|
if (cfg->nosimd)
|
||||||
|
crypto_reenable_simd_for_test();
|
||||||
|
err = check_shash_op("final", err, driver, vec_name, cfg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
result_ready:
|
||||||
|
return check_hash_result("shash", result, digestsize, vec, vec_name,
|
||||||
|
driver, cfg);
|
||||||
|
}
|
||||||
|
|
||||||
static int do_ahash_op(int (*op)(struct ahash_request *req),
|
static int do_ahash_op(int (*op)(struct ahash_request *req),
|
||||||
struct ahash_request *req,
|
struct ahash_request *req,
|
||||||
struct crypto_wait *wait, bool nosimd)
|
struct crypto_wait *wait, bool nosimd)
|
||||||
@ -1049,31 +1248,32 @@ static int do_ahash_op(int (*op)(struct ahash_request *req),
|
|||||||
return crypto_wait_req(err, wait);
|
return crypto_wait_req(err, wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int check_nonfinal_hash_op(const char *op, int err,
|
static int check_nonfinal_ahash_op(const char *op, int err,
|
||||||
u8 *result, unsigned int digestsize,
|
u8 *result, unsigned int digestsize,
|
||||||
const char *driver, const char *vec_name,
|
const char *driver, const char *vec_name,
|
||||||
const struct testvec_config *cfg)
|
const struct testvec_config *cfg)
|
||||||
{
|
{
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("alg: hash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
|
pr_err("alg: ahash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
|
||||||
driver, op, err, vec_name, cfg->name);
|
driver, op, err, vec_name, cfg->name);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
if (!testmgr_is_poison(result, digestsize)) {
|
if (!testmgr_is_poison(result, digestsize)) {
|
||||||
pr_err("alg: hash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
|
pr_err("alg: ahash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
|
||||||
driver, op, vec_name, cfg->name);
|
driver, op, vec_name, cfg->name);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int test_hash_vec_cfg(const char *driver,
|
/* Test one hash test vector in one configuration, using the ahash API */
|
||||||
const struct hash_testvec *vec,
|
static int test_ahash_vec_cfg(const char *driver,
|
||||||
const char *vec_name,
|
const struct hash_testvec *vec,
|
||||||
const struct testvec_config *cfg,
|
const char *vec_name,
|
||||||
struct ahash_request *req,
|
const struct testvec_config *cfg,
|
||||||
struct test_sglist *tsgl,
|
struct ahash_request *req,
|
||||||
u8 *hashstate)
|
struct test_sglist *tsgl,
|
||||||
|
u8 *hashstate)
|
||||||
{
|
{
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
const unsigned int alignmask = crypto_ahash_alignmask(tfm);
|
const unsigned int alignmask = crypto_ahash_alignmask(tfm);
|
||||||
@ -1082,8 +1282,6 @@ static int test_hash_vec_cfg(const char *driver,
|
|||||||
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
|
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
|
||||||
const struct test_sg_division *divs[XBUFSIZE];
|
const struct test_sg_division *divs[XBUFSIZE];
|
||||||
DECLARE_CRYPTO_WAIT(wait);
|
DECLARE_CRYPTO_WAIT(wait);
|
||||||
struct kvec _input;
|
|
||||||
struct iov_iter input;
|
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct scatterlist *pending_sgl;
|
struct scatterlist *pending_sgl;
|
||||||
unsigned int pending_len;
|
unsigned int pending_len;
|
||||||
@ -1096,26 +1294,22 @@ static int test_hash_vec_cfg(const char *driver,
|
|||||||
if (err) {
|
if (err) {
|
||||||
if (err == vec->setkey_error)
|
if (err == vec->setkey_error)
|
||||||
return 0;
|
return 0;
|
||||||
pr_err("alg: hash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
|
pr_err("alg: ahash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
|
||||||
driver, vec_name, vec->setkey_error, err,
|
driver, vec_name, vec->setkey_error, err,
|
||||||
crypto_ahash_get_flags(tfm));
|
crypto_ahash_get_flags(tfm));
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
if (vec->setkey_error) {
|
if (vec->setkey_error) {
|
||||||
pr_err("alg: hash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
|
pr_err("alg: ahash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
|
||||||
driver, vec_name, vec->setkey_error);
|
driver, vec_name, vec->setkey_error);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Build the scatterlist for the source data */
|
/* Build the scatterlist for the source data */
|
||||||
_input.iov_base = (void *)vec->plaintext;
|
err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
|
||||||
_input.iov_len = vec->psize;
|
|
||||||
iov_iter_kvec(&input, WRITE, &_input, 1, vec->psize);
|
|
||||||
err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
|
|
||||||
&input, divs);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("alg: hash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
|
pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
|
||||||
driver, vec_name, cfg->name);
|
driver, vec_name, cfg->name);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -1135,13 +1329,13 @@ static int test_hash_vec_cfg(const char *driver,
|
|||||||
if (err) {
|
if (err) {
|
||||||
if (err == vec->digest_error)
|
if (err == vec->digest_error)
|
||||||
return 0;
|
return 0;
|
||||||
pr_err("alg: hash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
|
pr_err("alg: ahash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
|
||||||
driver, vec_name, vec->digest_error, err,
|
driver, vec_name, vec->digest_error, err,
|
||||||
cfg->name);
|
cfg->name);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
if (vec->digest_error) {
|
if (vec->digest_error) {
|
||||||
pr_err("alg: hash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
|
pr_err("alg: ahash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
|
||||||
driver, vec_name, vec->digest_error, cfg->name);
|
driver, vec_name, vec->digest_error, cfg->name);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -1153,8 +1347,8 @@ static int test_hash_vec_cfg(const char *driver,
|
|||||||
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
|
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
|
||||||
ahash_request_set_crypt(req, NULL, result, 0);
|
ahash_request_set_crypt(req, NULL, result, 0);
|
||||||
err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
|
err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
|
||||||
err = check_nonfinal_hash_op("init", err, result, digestsize,
|
err = check_nonfinal_ahash_op("init", err, result, digestsize,
|
||||||
driver, vec_name, cfg);
|
driver, vec_name, cfg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -1170,9 +1364,9 @@ static int test_hash_vec_cfg(const char *driver,
|
|||||||
pending_len);
|
pending_len);
|
||||||
err = do_ahash_op(crypto_ahash_update, req, &wait,
|
err = do_ahash_op(crypto_ahash_update, req, &wait,
|
||||||
divs[i]->nosimd);
|
divs[i]->nosimd);
|
||||||
err = check_nonfinal_hash_op("update", err,
|
err = check_nonfinal_ahash_op("update", err,
|
||||||
result, digestsize,
|
result, digestsize,
|
||||||
driver, vec_name, cfg);
|
driver, vec_name, cfg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
pending_sgl = NULL;
|
pending_sgl = NULL;
|
||||||
@ -1183,23 +1377,23 @@ static int test_hash_vec_cfg(const char *driver,
|
|||||||
testmgr_poison(hashstate + statesize,
|
testmgr_poison(hashstate + statesize,
|
||||||
TESTMGR_POISON_LEN);
|
TESTMGR_POISON_LEN);
|
||||||
err = crypto_ahash_export(req, hashstate);
|
err = crypto_ahash_export(req, hashstate);
|
||||||
err = check_nonfinal_hash_op("export", err,
|
err = check_nonfinal_ahash_op("export", err,
|
||||||
result, digestsize,
|
result, digestsize,
|
||||||
driver, vec_name, cfg);
|
driver, vec_name, cfg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
if (!testmgr_is_poison(hashstate + statesize,
|
if (!testmgr_is_poison(hashstate + statesize,
|
||||||
TESTMGR_POISON_LEN)) {
|
TESTMGR_POISON_LEN)) {
|
||||||
pr_err("alg: hash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
|
pr_err("alg: ahash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
|
||||||
driver, vec_name, cfg->name);
|
driver, vec_name, cfg->name);
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
}
|
}
|
||||||
|
|
||||||
testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
|
testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
|
||||||
err = crypto_ahash_import(req, hashstate);
|
err = crypto_ahash_import(req, hashstate);
|
||||||
err = check_nonfinal_hash_op("import", err,
|
err = check_nonfinal_ahash_op("import", err,
|
||||||
result, digestsize,
|
result, digestsize,
|
||||||
driver, vec_name, cfg);
|
driver, vec_name, cfg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -1213,13 +1407,13 @@ static int test_hash_vec_cfg(const char *driver,
|
|||||||
if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
|
if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
|
||||||
/* finish with update() and final() */
|
/* finish with update() and final() */
|
||||||
err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
|
err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
|
||||||
err = check_nonfinal_hash_op("update", err, result, digestsize,
|
err = check_nonfinal_ahash_op("update", err, result, digestsize,
|
||||||
driver, vec_name, cfg);
|
driver, vec_name, cfg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
|
err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("alg: hash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
|
pr_err("alg: ahash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
|
||||||
driver, err, vec_name, cfg->name);
|
driver, err, vec_name, cfg->name);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -1227,31 +1421,49 @@ static int test_hash_vec_cfg(const char *driver,
|
|||||||
/* finish with finup() */
|
/* finish with finup() */
|
||||||
err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
|
err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("alg: hash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
|
pr_err("alg: ahash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
|
||||||
driver, err, vec_name, cfg->name);
|
driver, err, vec_name, cfg->name);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
result_ready:
|
result_ready:
|
||||||
/* Check that the algorithm produced the correct digest */
|
return check_hash_result("ahash", result, digestsize, vec, vec_name,
|
||||||
if (memcmp(result, vec->digest, digestsize) != 0) {
|
driver, cfg);
|
||||||
pr_err("alg: hash: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
|
}
|
||||||
driver, vec_name, cfg->name);
|
|
||||||
return -EINVAL;
|
static int test_hash_vec_cfg(const char *driver,
|
||||||
}
|
const struct hash_testvec *vec,
|
||||||
if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
|
const char *vec_name,
|
||||||
pr_err("alg: hash: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
|
const struct testvec_config *cfg,
|
||||||
driver, vec_name, cfg->name);
|
struct ahash_request *req,
|
||||||
return -EOVERFLOW;
|
struct shash_desc *desc,
|
||||||
|
struct test_sglist *tsgl,
|
||||||
|
u8 *hashstate)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For algorithms implemented as "shash", most bugs will be detected by
|
||||||
|
* both the shash and ahash tests. Test the shash API first so that the
|
||||||
|
* failures involve less indirection, so are easier to debug.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (desc) {
|
||||||
|
err = test_shash_vec_cfg(driver, vec, vec_name, cfg, desc, tsgl,
|
||||||
|
hashstate);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return test_ahash_vec_cfg(driver, vec, vec_name, cfg, req, tsgl,
|
||||||
|
hashstate);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
|
static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
|
||||||
unsigned int vec_num, struct ahash_request *req,
|
unsigned int vec_num, struct ahash_request *req,
|
||||||
struct test_sglist *tsgl, u8 *hashstate)
|
struct shash_desc *desc, struct test_sglist *tsgl,
|
||||||
|
u8 *hashstate)
|
||||||
{
|
{
|
||||||
char vec_name[16];
|
char vec_name[16];
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
@ -1262,7 +1474,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
|
|||||||
for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
|
for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
|
||||||
err = test_hash_vec_cfg(driver, vec, vec_name,
|
err = test_hash_vec_cfg(driver, vec, vec_name,
|
||||||
&default_hash_testvec_configs[i],
|
&default_hash_testvec_configs[i],
|
||||||
req, tsgl, hashstate);
|
req, desc, tsgl, hashstate);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -1276,9 +1488,10 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
|
|||||||
generate_random_testvec_config(&cfg, cfgname,
|
generate_random_testvec_config(&cfg, cfgname,
|
||||||
sizeof(cfgname));
|
sizeof(cfgname));
|
||||||
err = test_hash_vec_cfg(driver, vec, vec_name, &cfg,
|
err = test_hash_vec_cfg(driver, vec, vec_name, &cfg,
|
||||||
req, tsgl, hashstate);
|
req, desc, tsgl, hashstate);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -1290,14 +1503,12 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
|
|||||||
* Generate a hash test vector from the given implementation.
|
* Generate a hash test vector from the given implementation.
|
||||||
* Assumes the buffers in 'vec' were already allocated.
|
* Assumes the buffers in 'vec' were already allocated.
|
||||||
*/
|
*/
|
||||||
static void generate_random_hash_testvec(struct crypto_shash *tfm,
|
static void generate_random_hash_testvec(struct shash_desc *desc,
|
||||||
struct hash_testvec *vec,
|
struct hash_testvec *vec,
|
||||||
unsigned int maxkeysize,
|
unsigned int maxkeysize,
|
||||||
unsigned int maxdatasize,
|
unsigned int maxdatasize,
|
||||||
char *name, size_t max_namelen)
|
char *name, size_t max_namelen)
|
||||||
{
|
{
|
||||||
SHASH_DESC_ON_STACK(desc, tfm);
|
|
||||||
|
|
||||||
/* Data */
|
/* Data */
|
||||||
vec->psize = generate_random_length(maxdatasize);
|
vec->psize = generate_random_length(maxdatasize);
|
||||||
generate_random_bytes((u8 *)vec->plaintext, vec->psize);
|
generate_random_bytes((u8 *)vec->plaintext, vec->psize);
|
||||||
@ -1314,7 +1525,7 @@ static void generate_random_hash_testvec(struct crypto_shash *tfm,
|
|||||||
vec->ksize = 1 + (prandom_u32() % maxkeysize);
|
vec->ksize = 1 + (prandom_u32() % maxkeysize);
|
||||||
generate_random_bytes((u8 *)vec->key, vec->ksize);
|
generate_random_bytes((u8 *)vec->key, vec->ksize);
|
||||||
|
|
||||||
vec->setkey_error = crypto_shash_setkey(tfm, vec->key,
|
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
|
||||||
vec->ksize);
|
vec->ksize);
|
||||||
/* If the key couldn't be set, no need to continue to digest. */
|
/* If the key couldn't be set, no need to continue to digest. */
|
||||||
if (vec->setkey_error)
|
if (vec->setkey_error)
|
||||||
@ -1322,7 +1533,6 @@ static void generate_random_hash_testvec(struct crypto_shash *tfm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Digest */
|
/* Digest */
|
||||||
desc->tfm = tfm;
|
|
||||||
vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
|
vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
|
||||||
vec->psize, (u8 *)vec->digest);
|
vec->psize, (u8 *)vec->digest);
|
||||||
done:
|
done:
|
||||||
@ -1338,6 +1548,7 @@ static int test_hash_vs_generic_impl(const char *driver,
|
|||||||
const char *generic_driver,
|
const char *generic_driver,
|
||||||
unsigned int maxkeysize,
|
unsigned int maxkeysize,
|
||||||
struct ahash_request *req,
|
struct ahash_request *req,
|
||||||
|
struct shash_desc *desc,
|
||||||
struct test_sglist *tsgl,
|
struct test_sglist *tsgl,
|
||||||
u8 *hashstate)
|
u8 *hashstate)
|
||||||
{
|
{
|
||||||
@ -1348,10 +1559,11 @@ static int test_hash_vs_generic_impl(const char *driver,
|
|||||||
const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
|
const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
|
||||||
char _generic_driver[CRYPTO_MAX_ALG_NAME];
|
char _generic_driver[CRYPTO_MAX_ALG_NAME];
|
||||||
struct crypto_shash *generic_tfm = NULL;
|
struct crypto_shash *generic_tfm = NULL;
|
||||||
|
struct shash_desc *generic_desc = NULL;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct hash_testvec vec = { 0 };
|
struct hash_testvec vec = { 0 };
|
||||||
char vec_name[64];
|
char vec_name[64];
|
||||||
struct testvec_config cfg;
|
struct testvec_config *cfg;
|
||||||
char cfgname[TESTVEC_CONFIG_NAMELEN];
|
char cfgname[TESTVEC_CONFIG_NAMELEN];
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -1381,6 +1593,20 @@ static int test_hash_vs_generic_impl(const char *driver,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
||||||
|
if (!cfg) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
generic_desc = kzalloc(sizeof(*desc) +
|
||||||
|
crypto_shash_descsize(generic_tfm), GFP_KERNEL);
|
||||||
|
if (!generic_desc) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
generic_desc->tfm = generic_tfm;
|
||||||
|
|
||||||
/* Check the algorithm properties for consistency. */
|
/* Check the algorithm properties for consistency. */
|
||||||
|
|
||||||
if (digestsize != crypto_shash_digestsize(generic_tfm)) {
|
if (digestsize != crypto_shash_digestsize(generic_tfm)) {
|
||||||
@ -1412,23 +1638,25 @@ static int test_hash_vs_generic_impl(const char *driver,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < fuzz_iterations * 8; i++) {
|
for (i = 0; i < fuzz_iterations * 8; i++) {
|
||||||
generate_random_hash_testvec(generic_tfm, &vec,
|
generate_random_hash_testvec(generic_desc, &vec,
|
||||||
maxkeysize, maxdatasize,
|
maxkeysize, maxdatasize,
|
||||||
vec_name, sizeof(vec_name));
|
vec_name, sizeof(vec_name));
|
||||||
generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
|
generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
|
||||||
|
|
||||||
err = test_hash_vec_cfg(driver, &vec, vec_name, &cfg,
|
err = test_hash_vec_cfg(driver, &vec, vec_name, cfg,
|
||||||
req, tsgl, hashstate);
|
req, desc, tsgl, hashstate);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
err = 0;
|
err = 0;
|
||||||
out:
|
out:
|
||||||
|
kfree(cfg);
|
||||||
kfree(vec.key);
|
kfree(vec.key);
|
||||||
kfree(vec.plaintext);
|
kfree(vec.plaintext);
|
||||||
kfree(vec.digest);
|
kfree(vec.digest);
|
||||||
crypto_free_shash(generic_tfm);
|
crypto_free_shash(generic_tfm);
|
||||||
|
kzfree(generic_desc);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
|
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
|
||||||
@ -1436,6 +1664,7 @@ static int test_hash_vs_generic_impl(const char *driver,
|
|||||||
const char *generic_driver,
|
const char *generic_driver,
|
||||||
unsigned int maxkeysize,
|
unsigned int maxkeysize,
|
||||||
struct ahash_request *req,
|
struct ahash_request *req,
|
||||||
|
struct shash_desc *desc,
|
||||||
struct test_sglist *tsgl,
|
struct test_sglist *tsgl,
|
||||||
u8 *hashstate)
|
u8 *hashstate)
|
||||||
{
|
{
|
||||||
@ -1443,26 +1672,67 @@ static int test_hash_vs_generic_impl(const char *driver,
|
|||||||
}
|
}
|
||||||
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
|
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
|
||||||
|
|
||||||
|
static int alloc_shash(const char *driver, u32 type, u32 mask,
|
||||||
|
struct crypto_shash **tfm_ret,
|
||||||
|
struct shash_desc **desc_ret)
|
||||||
|
{
|
||||||
|
struct crypto_shash *tfm;
|
||||||
|
struct shash_desc *desc;
|
||||||
|
|
||||||
|
tfm = crypto_alloc_shash(driver, type, mask);
|
||||||
|
if (IS_ERR(tfm)) {
|
||||||
|
if (PTR_ERR(tfm) == -ENOENT) {
|
||||||
|
/*
|
||||||
|
* This algorithm is only available through the ahash
|
||||||
|
* API, not the shash API, so skip the shash tests.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
pr_err("alg: hash: failed to allocate shash transform for %s: %ld\n",
|
||||||
|
driver, PTR_ERR(tfm));
|
||||||
|
return PTR_ERR(tfm);
|
||||||
|
}
|
||||||
|
|
||||||
|
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
|
||||||
|
if (!desc) {
|
||||||
|
crypto_free_shash(tfm);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
desc->tfm = tfm;
|
||||||
|
|
||||||
|
*tfm_ret = tfm;
|
||||||
|
*desc_ret = desc;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __alg_test_hash(const struct hash_testvec *vecs,
|
static int __alg_test_hash(const struct hash_testvec *vecs,
|
||||||
unsigned int num_vecs, const char *driver,
|
unsigned int num_vecs, const char *driver,
|
||||||
u32 type, u32 mask,
|
u32 type, u32 mask,
|
||||||
const char *generic_driver, unsigned int maxkeysize)
|
const char *generic_driver, unsigned int maxkeysize)
|
||||||
{
|
{
|
||||||
struct crypto_ahash *tfm;
|
struct crypto_ahash *atfm = NULL;
|
||||||
struct ahash_request *req = NULL;
|
struct ahash_request *req = NULL;
|
||||||
|
struct crypto_shash *stfm = NULL;
|
||||||
|
struct shash_desc *desc = NULL;
|
||||||
struct test_sglist *tsgl = NULL;
|
struct test_sglist *tsgl = NULL;
|
||||||
u8 *hashstate = NULL;
|
u8 *hashstate = NULL;
|
||||||
|
unsigned int statesize;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
tfm = crypto_alloc_ahash(driver, type, mask);
|
/*
|
||||||
if (IS_ERR(tfm)) {
|
* Always test the ahash API. This works regardless of whether the
|
||||||
|
* algorithm is implemented as ahash or shash.
|
||||||
|
*/
|
||||||
|
|
||||||
|
atfm = crypto_alloc_ahash(driver, type, mask);
|
||||||
|
if (IS_ERR(atfm)) {
|
||||||
pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
|
pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
|
||||||
driver, PTR_ERR(tfm));
|
driver, PTR_ERR(atfm));
|
||||||
return PTR_ERR(tfm);
|
return PTR_ERR(atfm);
|
||||||
}
|
}
|
||||||
|
|
||||||
req = ahash_request_alloc(tfm, GFP_KERNEL);
|
req = ahash_request_alloc(atfm, GFP_KERNEL);
|
||||||
if (!req) {
|
if (!req) {
|
||||||
pr_err("alg: hash: failed to allocate request for %s\n",
|
pr_err("alg: hash: failed to allocate request for %s\n",
|
||||||
driver);
|
driver);
|
||||||
@ -1470,6 +1740,14 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If available also test the shash API, to cover corner cases that may
|
||||||
|
* be missed by testing the ahash API only.
|
||||||
|
*/
|
||||||
|
err = alloc_shash(driver, type, mask, &stfm, &desc);
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
|
||||||
tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL);
|
tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL);
|
||||||
if (!tsgl || init_test_sglist(tsgl) != 0) {
|
if (!tsgl || init_test_sglist(tsgl) != 0) {
|
||||||
pr_err("alg: hash: failed to allocate test buffers for %s\n",
|
pr_err("alg: hash: failed to allocate test buffers for %s\n",
|
||||||
@ -1480,8 +1758,10 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
hashstate = kmalloc(crypto_ahash_statesize(tfm) + TESTMGR_POISON_LEN,
|
statesize = crypto_ahash_statesize(atfm);
|
||||||
GFP_KERNEL);
|
if (stfm)
|
||||||
|
statesize = max(statesize, crypto_shash_statesize(stfm));
|
||||||
|
hashstate = kmalloc(statesize + TESTMGR_POISON_LEN, GFP_KERNEL);
|
||||||
if (!hashstate) {
|
if (!hashstate) {
|
||||||
pr_err("alg: hash: failed to allocate hash state buffer for %s\n",
|
pr_err("alg: hash: failed to allocate hash state buffer for %s\n",
|
||||||
driver);
|
driver);
|
||||||
@ -1490,20 +1770,24 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_vecs; i++) {
|
for (i = 0; i < num_vecs; i++) {
|
||||||
err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate);
|
err = test_hash_vec(driver, &vecs[i], i, req, desc, tsgl,
|
||||||
|
hashstate);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
|
err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
|
||||||
tsgl, hashstate);
|
desc, tsgl, hashstate);
|
||||||
out:
|
out:
|
||||||
kfree(hashstate);
|
kfree(hashstate);
|
||||||
if (tsgl) {
|
if (tsgl) {
|
||||||
destroy_test_sglist(tsgl);
|
destroy_test_sglist(tsgl);
|
||||||
kfree(tsgl);
|
kfree(tsgl);
|
||||||
}
|
}
|
||||||
|
kfree(desc);
|
||||||
|
crypto_free_shash(stfm);
|
||||||
ahash_request_free(req);
|
ahash_request_free(req);
|
||||||
crypto_free_ahash(tfm);
|
crypto_free_ahash(atfm);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1755,6 +2039,7 @@ static int test_aead_vec(const char *driver, int enc,
|
|||||||
&cfg, req, tsgls);
|
&cfg, req, tsgls);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -1864,7 +2149,7 @@ static int test_aead_vs_generic_impl(const char *driver,
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct aead_testvec vec = { 0 };
|
struct aead_testvec vec = { 0 };
|
||||||
char vec_name[64];
|
char vec_name[64];
|
||||||
struct testvec_config cfg;
|
struct testvec_config *cfg;
|
||||||
char cfgname[TESTVEC_CONFIG_NAMELEN];
|
char cfgname[TESTVEC_CONFIG_NAMELEN];
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -1894,6 +2179,12 @@ static int test_aead_vs_generic_impl(const char *driver,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
||||||
|
if (!cfg) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
|
generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
|
||||||
if (!generic_req) {
|
if (!generic_req) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@ -1948,13 +2239,13 @@ static int test_aead_vs_generic_impl(const char *driver,
|
|||||||
generate_random_aead_testvec(generic_req, &vec,
|
generate_random_aead_testvec(generic_req, &vec,
|
||||||
maxkeysize, maxdatasize,
|
maxkeysize, maxdatasize,
|
||||||
vec_name, sizeof(vec_name));
|
vec_name, sizeof(vec_name));
|
||||||
generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
|
generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
|
||||||
|
|
||||||
err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, &cfg,
|
err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, cfg,
|
||||||
req, tsgls);
|
req, tsgls);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, &cfg,
|
err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg,
|
||||||
req, tsgls);
|
req, tsgls);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
@ -1962,6 +2253,7 @@ static int test_aead_vs_generic_impl(const char *driver,
|
|||||||
}
|
}
|
||||||
err = 0;
|
err = 0;
|
||||||
out:
|
out:
|
||||||
|
kfree(cfg);
|
||||||
kfree(vec.key);
|
kfree(vec.key);
|
||||||
kfree(vec.iv);
|
kfree(vec.iv);
|
||||||
kfree(vec.assoc);
|
kfree(vec.assoc);
|
||||||
@ -1994,6 +2286,7 @@ static int test_aead(const char *driver, int enc,
|
|||||||
tsgls);
|
tsgls);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2336,6 +2629,7 @@ static int test_skcipher_vec(const char *driver, int enc,
|
|||||||
&cfg, req, tsgls);
|
&cfg, req, tsgls);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -2409,7 +2703,7 @@ static int test_skcipher_vs_generic_impl(const char *driver,
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct cipher_testvec vec = { 0 };
|
struct cipher_testvec vec = { 0 };
|
||||||
char vec_name[64];
|
char vec_name[64];
|
||||||
struct testvec_config cfg;
|
struct testvec_config *cfg;
|
||||||
char cfgname[TESTVEC_CONFIG_NAMELEN];
|
char cfgname[TESTVEC_CONFIG_NAMELEN];
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -2443,6 +2737,12 @@ static int test_skcipher_vs_generic_impl(const char *driver,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
||||||
|
if (!cfg) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL);
|
generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL);
|
||||||
if (!generic_req) {
|
if (!generic_req) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@ -2490,20 +2790,21 @@ static int test_skcipher_vs_generic_impl(const char *driver,
|
|||||||
for (i = 0; i < fuzz_iterations * 8; i++) {
|
for (i = 0; i < fuzz_iterations * 8; i++) {
|
||||||
generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
|
generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
|
||||||
vec_name, sizeof(vec_name));
|
vec_name, sizeof(vec_name));
|
||||||
generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
|
generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
|
||||||
|
|
||||||
err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name,
|
err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name,
|
||||||
&cfg, req, tsgls);
|
cfg, req, tsgls);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name,
|
err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name,
|
||||||
&cfg, req, tsgls);
|
cfg, req, tsgls);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
err = 0;
|
err = 0;
|
||||||
out:
|
out:
|
||||||
|
kfree(cfg);
|
||||||
kfree(vec.key);
|
kfree(vec.key);
|
||||||
kfree(vec.iv);
|
kfree(vec.iv);
|
||||||
kfree(vec.ptext);
|
kfree(vec.ptext);
|
||||||
@ -2535,6 +2836,7 @@ static int test_skcipher(const char *driver, int enc,
|
|||||||
tsgls);
|
tsgls);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -4125,6 +4427,7 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
.alg = "ecb(arc4)",
|
.alg = "ecb(arc4)",
|
||||||
|
.generic_driver = "ecb(arc4)-generic",
|
||||||
.test = alg_test_skcipher,
|
.test = alg_test_skcipher,
|
||||||
.suite = {
|
.suite = {
|
||||||
.cipher = __VECS(arc4_tv_template)
|
.cipher = __VECS(arc4_tv_template)
|
||||||
@ -4789,6 +5092,13 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|||||||
.alg = "xts512(paes)",
|
.alg = "xts512(paes)",
|
||||||
.test = alg_test_null,
|
.test = alg_test_null,
|
||||||
.fips_allowed = 1,
|
.fips_allowed = 1,
|
||||||
|
}, {
|
||||||
|
.alg = "xxhash64",
|
||||||
|
.test = alg_test_hash,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.hash = __VECS(xxhash64_tv_template)
|
||||||
|
}
|
||||||
}, {
|
}, {
|
||||||
.alg = "zlib-deflate",
|
.alg = "zlib-deflate",
|
||||||
.test = alg_test_comp,
|
.test = alg_test_comp,
|
||||||
|
116
crypto/testmgr.h
116
crypto/testmgr.h
@ -38,7 +38,7 @@ struct hash_testvec {
|
|||||||
const char *key;
|
const char *key;
|
||||||
const char *plaintext;
|
const char *plaintext;
|
||||||
const char *digest;
|
const char *digest;
|
||||||
unsigned short psize;
|
unsigned int psize;
|
||||||
unsigned short ksize;
|
unsigned short ksize;
|
||||||
int setkey_error;
|
int setkey_error;
|
||||||
int digest_error;
|
int digest_error;
|
||||||
@ -69,7 +69,7 @@ struct cipher_testvec {
|
|||||||
const char *ctext;
|
const char *ctext;
|
||||||
unsigned char wk; /* weak key flag */
|
unsigned char wk; /* weak key flag */
|
||||||
unsigned short klen;
|
unsigned short klen;
|
||||||
unsigned short len;
|
unsigned int len;
|
||||||
bool fips_skip;
|
bool fips_skip;
|
||||||
bool generates_iv;
|
bool generates_iv;
|
||||||
int setkey_error;
|
int setkey_error;
|
||||||
@ -105,9 +105,9 @@ struct aead_testvec {
|
|||||||
unsigned char novrfy;
|
unsigned char novrfy;
|
||||||
unsigned char wk;
|
unsigned char wk;
|
||||||
unsigned char klen;
|
unsigned char klen;
|
||||||
unsigned short plen;
|
unsigned int plen;
|
||||||
unsigned short clen;
|
unsigned int clen;
|
||||||
unsigned short alen;
|
unsigned int alen;
|
||||||
int setkey_error;
|
int setkey_error;
|
||||||
int setauthsize_error;
|
int setauthsize_error;
|
||||||
int crypt_error;
|
int crypt_error;
|
||||||
@ -33382,6 +33382,112 @@ static const struct hash_testvec crc32c_tv_template[] = {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct hash_testvec xxhash64_tv_template[] = {
|
||||||
|
{
|
||||||
|
.psize = 0,
|
||||||
|
.digest = "\x99\xe9\xd8\x51\x37\xdb\x46\xef",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.plaintext = "\x40",
|
||||||
|
.psize = 1,
|
||||||
|
.digest = "\x20\x5c\x91\xaa\x88\xeb\x59\xd0",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d"
|
||||||
|
"\x88\xc7\x9a\x09\x1a\x9b",
|
||||||
|
.psize = 14,
|
||||||
|
.digest = "\xa8\xe8\x2b\xa9\x92\xa1\x37\x4a",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d"
|
||||||
|
"\x88\xc7\x9a\x09\x1a\x9b\x42\xe0"
|
||||||
|
"\xd4\x38\xa5\x2a\x26\xa5\x19\x4b"
|
||||||
|
"\x57\x65\x7f\xad\xc3\x7d\xca\x40"
|
||||||
|
"\x31\x65\x05\xbb\x31\xae\x51\x11"
|
||||||
|
"\xa8\xc0\xb3\x28\x42\xeb\x3c\x46"
|
||||||
|
"\xc8\xed\xed\x0f\x8d\x0b\xfa\x6e"
|
||||||
|
"\xbc\xe3\x88\x53\xca\x8f\xc8\xd9"
|
||||||
|
"\x41\x26\x7a\x3d\x21\xdb\x1a\x3c"
|
||||||
|
"\x01\x1d\xc9\xe9\xb7\x3a\x78\x67"
|
||||||
|
"\x57\x20\x94\xf1\x1e\xfd\xce\x39"
|
||||||
|
"\x99\x57\x69\x39\xa5\xd0\x8d\xd9"
|
||||||
|
"\x43\xfe\x1d\x66\x04\x3c\x27\x6a"
|
||||||
|
"\xe1\x0d\xe7\xc9\xfa\xc9\x07\x56"
|
||||||
|
"\xa5\xb3\xec\xd9\x1f\x42\x65\x66"
|
||||||
|
"\xaa\xbf\x87\x9b\xc5\x41\x9c\x27"
|
||||||
|
"\x3f\x2f\xa9\x55\x93\x01\x27\x33"
|
||||||
|
"\x43\x99\x4d\x81\x85\xae\x82\x00"
|
||||||
|
"\x6c\xd0\xd1\xa3\x57\x18\x06\xcc"
|
||||||
|
"\xec\x72\xf7\x8e\x87\x2d\x1f\x5e"
|
||||||
|
"\xd7\x5b\x1f\x36\x4c\xfa\xfd\x18"
|
||||||
|
"\x89\x76\xd3\x5e\xb5\x5a\xc0\x01"
|
||||||
|
"\xd2\xa1\x9a\x50\xe6\x08\xb4\x76"
|
||||||
|
"\x56\x4f\x0e\xbc\x54\xfc\x67\xe6"
|
||||||
|
"\xb9\xc0\x28\x4b\xb5\xc3\xff\x79"
|
||||||
|
"\x52\xea\xa1\x90\xc3\xaf\x08\x70"
|
||||||
|
"\x12\x02\x0c\xdb\x94\x00\x38\x95"
|
||||||
|
"\xed\xfd\x08\xf7\xe8\x04",
|
||||||
|
.psize = 222,
|
||||||
|
.digest = "\x41\xfc\xd4\x29\xfe\xe7\x85\x17",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.psize = 0,
|
||||||
|
.key = "\xb1\x79\x37\x9e\x00\x00\x00\x00",
|
||||||
|
.ksize = 8,
|
||||||
|
.digest = "\xef\x17\x9b\x92\xa2\xfd\x75\xac",
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
.plaintext = "\x40",
|
||||||
|
.psize = 1,
|
||||||
|
.key = "\xb1\x79\x37\x9e\x00\x00\x00\x00",
|
||||||
|
.ksize = 8,
|
||||||
|
.digest = "\xd1\x70\x4f\x14\x02\xc4\x9e\x71",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d"
|
||||||
|
"\x88\xc7\x9a\x09\x1a\x9b",
|
||||||
|
.psize = 14,
|
||||||
|
.key = "\xb1\x79\x37\x9e\x00\x00\x00\x00",
|
||||||
|
.ksize = 8,
|
||||||
|
.digest = "\xa4\xcd\xfe\x8e\x37\xe2\x1c\x64"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d"
|
||||||
|
"\x88\xc7\x9a\x09\x1a\x9b\x42\xe0"
|
||||||
|
"\xd4\x38\xa5\x2a\x26\xa5\x19\x4b"
|
||||||
|
"\x57\x65\x7f\xad\xc3\x7d\xca\x40"
|
||||||
|
"\x31\x65\x05\xbb\x31\xae\x51\x11"
|
||||||
|
"\xa8\xc0\xb3\x28\x42\xeb\x3c\x46"
|
||||||
|
"\xc8\xed\xed\x0f\x8d\x0b\xfa\x6e"
|
||||||
|
"\xbc\xe3\x88\x53\xca\x8f\xc8\xd9"
|
||||||
|
"\x41\x26\x7a\x3d\x21\xdb\x1a\x3c"
|
||||||
|
"\x01\x1d\xc9\xe9\xb7\x3a\x78\x67"
|
||||||
|
"\x57\x20\x94\xf1\x1e\xfd\xce\x39"
|
||||||
|
"\x99\x57\x69\x39\xa5\xd0\x8d\xd9"
|
||||||
|
"\x43\xfe\x1d\x66\x04\x3c\x27\x6a"
|
||||||
|
"\xe1\x0d\xe7\xc9\xfa\xc9\x07\x56"
|
||||||
|
"\xa5\xb3\xec\xd9\x1f\x42\x65\x66"
|
||||||
|
"\xaa\xbf\x87\x9b\xc5\x41\x9c\x27"
|
||||||
|
"\x3f\x2f\xa9\x55\x93\x01\x27\x33"
|
||||||
|
"\x43\x99\x4d\x81\x85\xae\x82\x00"
|
||||||
|
"\x6c\xd0\xd1\xa3\x57\x18\x06\xcc"
|
||||||
|
"\xec\x72\xf7\x8e\x87\x2d\x1f\x5e"
|
||||||
|
"\xd7\x5b\x1f\x36\x4c\xfa\xfd\x18"
|
||||||
|
"\x89\x76\xd3\x5e\xb5\x5a\xc0\x01"
|
||||||
|
"\xd2\xa1\x9a\x50\xe6\x08\xb4\x76"
|
||||||
|
"\x56\x4f\x0e\xbc\x54\xfc\x67\xe6"
|
||||||
|
"\xb9\xc0\x28\x4b\xb5\xc3\xff\x79"
|
||||||
|
"\x52\xea\xa1\x90\xc3\xaf\x08\x70"
|
||||||
|
"\x12\x02\x0c\xdb\x94\x00\x38\x95"
|
||||||
|
"\xed\xfd\x08\xf7\xe8\x04",
|
||||||
|
.psize = 222,
|
||||||
|
.key = "\xb1\x79\x37\x9e\x00\x00\x00\x00",
|
||||||
|
.ksize = 8,
|
||||||
|
.digest = "\x58\xbc\x55\xf2\x42\x81\x5c\xf0"
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
static const struct comp_testvec lz4_comp_tv_template[] = {
|
static const struct comp_testvec lz4_comp_tv_template[] = {
|
||||||
{
|
{
|
||||||
.inlen = 255,
|
.inlen = 255,
|
||||||
|
@ -630,9 +630,10 @@ static struct shash_alg tgr_algs[3] = { {
|
|||||||
.final = tgr192_final,
|
.final = tgr192_final,
|
||||||
.descsize = sizeof(struct tgr192_ctx),
|
.descsize = sizeof(struct tgr192_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "tgr192",
|
.cra_name = "tgr192",
|
||||||
.cra_blocksize = TGR192_BLOCK_SIZE,
|
.cra_driver_name = "tgr192-generic",
|
||||||
.cra_module = THIS_MODULE,
|
.cra_blocksize = TGR192_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
.digestsize = TGR160_DIGEST_SIZE,
|
.digestsize = TGR160_DIGEST_SIZE,
|
||||||
@ -641,9 +642,10 @@ static struct shash_alg tgr_algs[3] = { {
|
|||||||
.final = tgr160_final,
|
.final = tgr160_final,
|
||||||
.descsize = sizeof(struct tgr192_ctx),
|
.descsize = sizeof(struct tgr192_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "tgr160",
|
.cra_name = "tgr160",
|
||||||
.cra_blocksize = TGR192_BLOCK_SIZE,
|
.cra_driver_name = "tgr160-generic",
|
||||||
.cra_module = THIS_MODULE,
|
.cra_blocksize = TGR192_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
.digestsize = TGR128_DIGEST_SIZE,
|
.digestsize = TGR128_DIGEST_SIZE,
|
||||||
@ -652,9 +654,10 @@ static struct shash_alg tgr_algs[3] = { {
|
|||||||
.final = tgr128_final,
|
.final = tgr128_final,
|
||||||
.descsize = sizeof(struct tgr192_ctx),
|
.descsize = sizeof(struct tgr192_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "tgr128",
|
.cra_name = "tgr128",
|
||||||
.cra_blocksize = TGR192_BLOCK_SIZE,
|
.cra_driver_name = "tgr128-generic",
|
||||||
.cra_module = THIS_MODULE,
|
.cra_blocksize = TGR192_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
} };
|
} };
|
||||||
|
|
||||||
|
@ -1126,9 +1126,10 @@ static struct shash_alg wp_algs[3] = { {
|
|||||||
.final = wp512_final,
|
.final = wp512_final,
|
||||||
.descsize = sizeof(struct wp512_ctx),
|
.descsize = sizeof(struct wp512_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "wp512",
|
.cra_name = "wp512",
|
||||||
.cra_blocksize = WP512_BLOCK_SIZE,
|
.cra_driver_name = "wp512-generic",
|
||||||
.cra_module = THIS_MODULE,
|
.cra_blocksize = WP512_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
.digestsize = WP384_DIGEST_SIZE,
|
.digestsize = WP384_DIGEST_SIZE,
|
||||||
@ -1137,9 +1138,10 @@ static struct shash_alg wp_algs[3] = { {
|
|||||||
.final = wp384_final,
|
.final = wp384_final,
|
||||||
.descsize = sizeof(struct wp512_ctx),
|
.descsize = sizeof(struct wp512_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "wp384",
|
.cra_name = "wp384",
|
||||||
.cra_blocksize = WP512_BLOCK_SIZE,
|
.cra_driver_name = "wp384-generic",
|
||||||
.cra_module = THIS_MODULE,
|
.cra_blocksize = WP512_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
.digestsize = WP256_DIGEST_SIZE,
|
.digestsize = WP256_DIGEST_SIZE,
|
||||||
@ -1148,9 +1150,10 @@ static struct shash_alg wp_algs[3] = { {
|
|||||||
.final = wp256_final,
|
.final = wp256_final,
|
||||||
.descsize = sizeof(struct wp512_ctx),
|
.descsize = sizeof(struct wp512_ctx),
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "wp256",
|
.cra_name = "wp256",
|
||||||
.cra_blocksize = WP512_BLOCK_SIZE,
|
.cra_driver_name = "wp256-generic",
|
||||||
.cra_module = THIS_MODULE,
|
.cra_blocksize = WP512_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
}
|
}
|
||||||
} };
|
} };
|
||||||
|
|
||||||
|
108
crypto/xxhash_generic.c
Normal file
108
crypto/xxhash_generic.c
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#include <crypto/internal/hash.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/xxhash.h>
|
||||||
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
|
#define XXHASH64_BLOCK_SIZE 32
|
||||||
|
#define XXHASH64_DIGEST_SIZE 8
|
||||||
|
|
||||||
|
struct xxhash64_tfm_ctx {
|
||||||
|
u64 seed;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct xxhash64_desc_ctx {
|
||||||
|
struct xxh64_state xxhstate;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int xxhash64_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||||
|
unsigned int keylen)
|
||||||
|
{
|
||||||
|
struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(tfm);
|
||||||
|
|
||||||
|
if (keylen != sizeof(tctx->seed)) {
|
||||||
|
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
tctx->seed = get_unaligned_le64(key);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xxhash64_init(struct shash_desc *desc)
|
||||||
|
{
|
||||||
|
struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||||
|
struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
xxh64_reset(&dctx->xxhstate, tctx->seed);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xxhash64_update(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int length)
|
||||||
|
{
|
||||||
|
struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
xxh64_update(&dctx->xxhstate, data, length);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xxhash64_final(struct shash_desc *desc, u8 *out)
|
||||||
|
{
|
||||||
|
struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
put_unaligned_le64(xxh64_digest(&dctx->xxhstate), out);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xxhash64_digest(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int length, u8 *out)
|
||||||
|
{
|
||||||
|
struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||||
|
|
||||||
|
put_unaligned_le64(xxh64(data, length, tctx->seed), out);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct shash_alg alg = {
|
||||||
|
.digestsize = XXHASH64_DIGEST_SIZE,
|
||||||
|
.setkey = xxhash64_setkey,
|
||||||
|
.init = xxhash64_init,
|
||||||
|
.update = xxhash64_update,
|
||||||
|
.final = xxhash64_final,
|
||||||
|
.digest = xxhash64_digest,
|
||||||
|
.descsize = sizeof(struct xxhash64_desc_ctx),
|
||||||
|
.base = {
|
||||||
|
.cra_name = "xxhash64",
|
||||||
|
.cra_driver_name = "xxhash64-generic",
|
||||||
|
.cra_priority = 100,
|
||||||
|
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||||
|
.cra_blocksize = XXHASH64_BLOCK_SIZE,
|
||||||
|
.cra_ctxsize = sizeof(struct xxhash64_tfm_ctx),
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init xxhash_mod_init(void)
|
||||||
|
{
|
||||||
|
return crypto_register_shash(&alg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit xxhash_mod_fini(void)
|
||||||
|
{
|
||||||
|
crypto_unregister_shash(&alg);
|
||||||
|
}
|
||||||
|
|
||||||
|
subsys_initcall(xxhash_mod_init);
|
||||||
|
module_exit(xxhash_mod_fini);
|
||||||
|
|
||||||
|
MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>");
|
||||||
|
MODULE_DESCRIPTION("xxhash calculations wrapper for lib/xxhash.c");
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_ALIAS_CRYPTO("xxhash64");
|
||||||
|
MODULE_ALIAS_CRYPTO("xxhash64-generic");
|
@ -206,6 +206,7 @@ static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
|
|||||||
|
|
||||||
static struct crypto_alg alg = {
|
static struct crypto_alg alg = {
|
||||||
.cra_name = "zstd",
|
.cra_name = "zstd",
|
||||||
|
.cra_driver_name = "zstd-generic",
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||||
.cra_ctxsize = sizeof(struct zstd_ctx),
|
.cra_ctxsize = sizeof(struct zstd_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
|
@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct of_device_id iproc_rng200_of_match[] = {
|
static const struct of_device_id iproc_rng200_of_match[] = {
|
||||||
|
{ .compatible = "brcm,bcm7211-rng200", },
|
||||||
{ .compatible = "brcm,bcm7278-rng200", },
|
{ .compatible = "brcm,bcm7278-rng200", },
|
||||||
{ .compatible = "brcm,iproc-rng200", },
|
{ .compatible = "brcm,iproc-rng200", },
|
||||||
{},
|
{},
|
||||||
|
@ -1,58 +1,8 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||||
/*
|
/*
|
||||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
|
||||||
* redistributing this file, you may do so under either license.
|
|
||||||
*
|
|
||||||
* GPL LICENSE SUMMARY
|
|
||||||
*
|
|
||||||
* Copyright (c) 2016 BayLibre, SAS.
|
* Copyright (c) 2016 BayLibre, SAS.
|
||||||
* Author: Neil Armstrong <narmstrong@baylibre.com>
|
* Author: Neil Armstrong <narmstrong@baylibre.com>
|
||||||
* Copyright (C) 2014 Amlogic, Inc.
|
* Copyright (C) 2014 Amlogic, Inc.
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of version 2 of the GNU General Public License as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
||||||
* General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License
|
|
||||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
||||||
* The full GNU General Public License is included in this distribution
|
|
||||||
* in the file called COPYING.
|
|
||||||
*
|
|
||||||
* BSD LICENSE
|
|
||||||
*
|
|
||||||
* Copyright (c) 2016 BayLibre, SAS.
|
|
||||||
* Author: Neil Armstrong <narmstrong@baylibre.com>
|
|
||||||
* Copyright (C) 2014 Amlogic, Inc.
|
|
||||||
*
|
|
||||||
* Redistribution and use in source and binary forms, with or without
|
|
||||||
* modification, are permitted provided that the following conditions
|
|
||||||
* are met:
|
|
||||||
*
|
|
||||||
* * Redistributions of source code must retain the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer.
|
|
||||||
* * Redistributions in binary form must reproduce the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer in
|
|
||||||
* the documentation and/or other materials provided with the
|
|
||||||
* distribution.
|
|
||||||
* * Neither the name of Intel Corporation nor the names of its
|
|
||||||
* contributors may be used to endorse or promote products derived
|
|
||||||
* from this software without specific prior written permission.
|
|
||||||
*
|
|
||||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*/
|
*/
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
@ -520,10 +520,13 @@ config CRYPTO_DEV_ATMEL_SHA
|
|||||||
To compile this driver as a module, choose M here: the module
|
To compile this driver as a module, choose M here: the module
|
||||||
will be called atmel-sha.
|
will be called atmel-sha.
|
||||||
|
|
||||||
|
config CRYPTO_DEV_ATMEL_I2C
|
||||||
|
tristate
|
||||||
|
|
||||||
config CRYPTO_DEV_ATMEL_ECC
|
config CRYPTO_DEV_ATMEL_ECC
|
||||||
tristate "Support for Microchip / Atmel ECC hw accelerator"
|
tristate "Support for Microchip / Atmel ECC hw accelerator"
|
||||||
depends on ARCH_AT91 || COMPILE_TEST
|
|
||||||
depends on I2C
|
depends on I2C
|
||||||
|
select CRYPTO_DEV_ATMEL_I2C
|
||||||
select CRYPTO_ECDH
|
select CRYPTO_ECDH
|
||||||
select CRC16
|
select CRC16
|
||||||
help
|
help
|
||||||
@ -534,6 +537,21 @@ config CRYPTO_DEV_ATMEL_ECC
|
|||||||
To compile this driver as a module, choose M here: the module
|
To compile this driver as a module, choose M here: the module
|
||||||
will be called atmel-ecc.
|
will be called atmel-ecc.
|
||||||
|
|
||||||
|
config CRYPTO_DEV_ATMEL_SHA204A
|
||||||
|
tristate "Support for Microchip / Atmel SHA accelerator and RNG"
|
||||||
|
depends on I2C
|
||||||
|
select CRYPTO_DEV_ATMEL_I2C
|
||||||
|
select HW_RANDOM
|
||||||
|
select CRC16
|
||||||
|
help
|
||||||
|
Microhip / Atmel SHA accelerator and RNG.
|
||||||
|
Select this if you want to use the Microchip / Atmel SHA204A
|
||||||
|
module as a random number generator. (Other functions of the
|
||||||
|
chip are currently not exposed by this driver)
|
||||||
|
|
||||||
|
To compile this driver as a module, choose M here: the module
|
||||||
|
will be called atmel-sha204a.
|
||||||
|
|
||||||
config CRYPTO_DEV_CCP
|
config CRYPTO_DEV_CCP
|
||||||
bool "Support for AMD Secure Processor"
|
bool "Support for AMD Secure Processor"
|
||||||
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
|
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
|
||||||
|
@ -2,7 +2,9 @@
|
|||||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
|
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
|
||||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
|
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
|
||||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
|
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
|
||||||
|
obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
|
||||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
|
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
|
||||||
|
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
|
||||||
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
|
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
|
||||||
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
|
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
|
||||||
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
|
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
|
||||||
|
@ -67,12 +67,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int crypto4xx_crypt(struct skcipher_request *req,
|
static inline int crypto4xx_crypt(struct skcipher_request *req,
|
||||||
const unsigned int ivlen, bool decrypt)
|
const unsigned int ivlen, bool decrypt,
|
||||||
|
bool check_blocksize)
|
||||||
{
|
{
|
||||||
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
|
||||||
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
|
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
|
||||||
__le32 iv[AES_IV_SIZE];
|
__le32 iv[AES_IV_SIZE];
|
||||||
|
|
||||||
|
if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (ivlen)
|
if (ivlen)
|
||||||
crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
|
crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
|
||||||
|
|
||||||
@ -81,24 +85,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
|
|||||||
ctx->sa_len, 0, NULL);
|
ctx->sa_len, 0, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int crypto4xx_encrypt_noiv(struct skcipher_request *req)
|
int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return crypto4xx_crypt(req, 0, false);
|
return crypto4xx_crypt(req, 0, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
int crypto4xx_encrypt_iv(struct skcipher_request *req)
|
int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return crypto4xx_crypt(req, AES_IV_SIZE, false);
|
return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
int crypto4xx_decrypt_noiv(struct skcipher_request *req)
|
int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return crypto4xx_crypt(req, 0, true);
|
return crypto4xx_crypt(req, 0, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
int crypto4xx_decrypt_iv(struct skcipher_request *req)
|
int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
return crypto4xx_crypt(req, AES_IV_SIZE, true);
|
return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
|
||||||
|
{
|
||||||
|
return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
|
||||||
|
{
|
||||||
|
return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -269,8 +283,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return encrypt ? crypto4xx_encrypt_iv(req)
|
return encrypt ? crypto4xx_encrypt_iv_stream(req)
|
||||||
: crypto4xx_decrypt_iv(req);
|
: crypto4xx_decrypt_iv_stream(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
|
static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
|
||||||
|
@ -182,7 +182,6 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
|
|||||||
dev->pdr_pa);
|
dev->pdr_pa);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
|
|
||||||
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
|
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
|
||||||
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
|
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
|
||||||
&dev->shadow_sa_pool_pa,
|
&dev->shadow_sa_pool_pa,
|
||||||
@ -1210,8 +1209,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
.ivsize = AES_IV_SIZE,
|
.ivsize = AES_IV_SIZE,
|
||||||
.setkey = crypto4xx_setkey_aes_cbc,
|
.setkey = crypto4xx_setkey_aes_cbc,
|
||||||
.encrypt = crypto4xx_encrypt_iv,
|
.encrypt = crypto4xx_encrypt_iv_block,
|
||||||
.decrypt = crypto4xx_decrypt_iv,
|
.decrypt = crypto4xx_decrypt_iv_block,
|
||||||
.init = crypto4xx_sk_init,
|
.init = crypto4xx_sk_init,
|
||||||
.exit = crypto4xx_sk_exit,
|
.exit = crypto4xx_sk_exit,
|
||||||
} },
|
} },
|
||||||
@ -1222,7 +1221,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
||||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
.cra_blocksize = 1,
|
||||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
},
|
},
|
||||||
@ -1230,8 +1229,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
.ivsize = AES_IV_SIZE,
|
.ivsize = AES_IV_SIZE,
|
||||||
.setkey = crypto4xx_setkey_aes_cfb,
|
.setkey = crypto4xx_setkey_aes_cfb,
|
||||||
.encrypt = crypto4xx_encrypt_iv,
|
.encrypt = crypto4xx_encrypt_iv_stream,
|
||||||
.decrypt = crypto4xx_decrypt_iv,
|
.decrypt = crypto4xx_decrypt_iv_stream,
|
||||||
.init = crypto4xx_sk_init,
|
.init = crypto4xx_sk_init,
|
||||||
.exit = crypto4xx_sk_exit,
|
.exit = crypto4xx_sk_exit,
|
||||||
} },
|
} },
|
||||||
@ -1243,7 +1242,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||||
CRYPTO_ALG_ASYNC |
|
CRYPTO_ALG_ASYNC |
|
||||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
.cra_blocksize = 1,
|
||||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
},
|
},
|
||||||
@ -1263,7 +1262,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
||||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
.cra_blocksize = 1,
|
||||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
},
|
},
|
||||||
@ -1290,8 +1289,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.min_keysize = AES_MIN_KEY_SIZE,
|
.min_keysize = AES_MIN_KEY_SIZE,
|
||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
.setkey = crypto4xx_setkey_aes_ecb,
|
.setkey = crypto4xx_setkey_aes_ecb,
|
||||||
.encrypt = crypto4xx_encrypt_noiv,
|
.encrypt = crypto4xx_encrypt_noiv_block,
|
||||||
.decrypt = crypto4xx_decrypt_noiv,
|
.decrypt = crypto4xx_decrypt_noiv_block,
|
||||||
.init = crypto4xx_sk_init,
|
.init = crypto4xx_sk_init,
|
||||||
.exit = crypto4xx_sk_exit,
|
.exit = crypto4xx_sk_exit,
|
||||||
} },
|
} },
|
||||||
@ -1302,7 +1301,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
||||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
.cra_blocksize = 1,
|
||||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
},
|
},
|
||||||
@ -1310,8 +1309,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
|||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
.ivsize = AES_IV_SIZE,
|
.ivsize = AES_IV_SIZE,
|
||||||
.setkey = crypto4xx_setkey_aes_ofb,
|
.setkey = crypto4xx_setkey_aes_ofb,
|
||||||
.encrypt = crypto4xx_encrypt_iv,
|
.encrypt = crypto4xx_encrypt_iv_stream,
|
||||||
.decrypt = crypto4xx_decrypt_iv,
|
.decrypt = crypto4xx_decrypt_iv_stream,
|
||||||
.init = crypto4xx_sk_init,
|
.init = crypto4xx_sk_init,
|
||||||
.exit = crypto4xx_sk_exit,
|
.exit = crypto4xx_sk_exit,
|
||||||
} },
|
} },
|
||||||
|
@ -173,10 +173,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
|
|||||||
const u8 *key, unsigned int keylen);
|
const u8 *key, unsigned int keylen);
|
||||||
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
|
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
|
||||||
int crypto4xx_decrypt_ctr(struct skcipher_request *req);
|
int crypto4xx_decrypt_ctr(struct skcipher_request *req);
|
||||||
int crypto4xx_encrypt_iv(struct skcipher_request *req);
|
int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
|
||||||
int crypto4xx_decrypt_iv(struct skcipher_request *req);
|
int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
|
||||||
int crypto4xx_encrypt_noiv(struct skcipher_request *req);
|
int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
|
||||||
int crypto4xx_decrypt_noiv(struct skcipher_request *req);
|
int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
|
||||||
|
int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
|
||||||
|
int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
|
||||||
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
|
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
|
||||||
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
|
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
|
||||||
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
|
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
|
||||||
|
@ -6,8 +6,6 @@
|
|||||||
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/bitrev.h>
|
|
||||||
#include <linux/crc16.h>
|
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
@ -23,41 +21,10 @@
|
|||||||
#include <crypto/internal/kpp.h>
|
#include <crypto/internal/kpp.h>
|
||||||
#include <crypto/ecdh.h>
|
#include <crypto/ecdh.h>
|
||||||
#include <crypto/kpp.h>
|
#include <crypto/kpp.h>
|
||||||
#include "atmel-ecc.h"
|
#include "atmel-i2c.h"
|
||||||
|
|
||||||
/* Used for binding tfm objects to i2c clients. */
|
|
||||||
struct atmel_ecc_driver_data {
|
|
||||||
struct list_head i2c_client_list;
|
|
||||||
spinlock_t i2c_list_lock;
|
|
||||||
} ____cacheline_aligned;
|
|
||||||
|
|
||||||
static struct atmel_ecc_driver_data driver_data;
|
static struct atmel_ecc_driver_data driver_data;
|
||||||
|
|
||||||
/**
|
|
||||||
* atmel_ecc_i2c_client_priv - i2c_client private data
|
|
||||||
* @client : pointer to i2c client device
|
|
||||||
* @i2c_client_list_node: part of i2c_client_list
|
|
||||||
* @lock : lock for sending i2c commands
|
|
||||||
* @wake_token : wake token array of zeros
|
|
||||||
* @wake_token_sz : size in bytes of the wake_token
|
|
||||||
* @tfm_count : number of active crypto transformations on i2c client
|
|
||||||
*
|
|
||||||
* Reads and writes from/to the i2c client are sequential. The first byte
|
|
||||||
* transmitted to the device is treated as the byte size. Any attempt to send
|
|
||||||
* more than this number of bytes will cause the device to not ACK those bytes.
|
|
||||||
* After the host writes a single command byte to the input buffer, reads are
|
|
||||||
* prohibited until after the device completes command execution. Use a mutex
|
|
||||||
* when sending i2c commands.
|
|
||||||
*/
|
|
||||||
struct atmel_ecc_i2c_client_priv {
|
|
||||||
struct i2c_client *client;
|
|
||||||
struct list_head i2c_client_list_node;
|
|
||||||
struct mutex lock;
|
|
||||||
u8 wake_token[WAKE_TOKEN_MAX_SIZE];
|
|
||||||
size_t wake_token_sz;
|
|
||||||
atomic_t tfm_count ____cacheline_aligned;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* atmel_ecdh_ctx - transformation context
|
* atmel_ecdh_ctx - transformation context
|
||||||
* @client : pointer to i2c client device
|
* @client : pointer to i2c client device
|
||||||
@ -80,188 +47,12 @@ struct atmel_ecdh_ctx {
|
|||||||
bool do_fallback;
|
bool do_fallback;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
|
||||||
* atmel_ecc_work_data - data structure representing the work
|
|
||||||
* @ctx : transformation context.
|
|
||||||
* @cbk : pointer to a callback function to be invoked upon completion of this
|
|
||||||
* request. This has the form:
|
|
||||||
* callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status)
|
|
||||||
* where:
|
|
||||||
* @work_data: data structure representing the work
|
|
||||||
* @areq : optional pointer to an argument passed with the original
|
|
||||||
* request.
|
|
||||||
* @status : status returned from the i2c client device or i2c error.
|
|
||||||
* @areq: optional pointer to a user argument for use at callback time.
|
|
||||||
* @work: describes the task to be executed.
|
|
||||||
* @cmd : structure used for communicating with the device.
|
|
||||||
*/
|
|
||||||
struct atmel_ecc_work_data {
|
|
||||||
struct atmel_ecdh_ctx *ctx;
|
|
||||||
void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq,
|
|
||||||
int status);
|
|
||||||
void *areq;
|
|
||||||
struct work_struct work;
|
|
||||||
struct atmel_ecc_cmd cmd;
|
|
||||||
};
|
|
||||||
|
|
||||||
static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len)
|
|
||||||
{
|
|
||||||
return cpu_to_le16(bitrev16(crc16(crc, buffer, len)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
|
|
||||||
* CRC16 verification of the count, opcode, param1, param2 and data bytes.
|
|
||||||
* The checksum is saved in little-endian format in the least significant
|
|
||||||
* two bytes of the command. CRC polynomial is 0x8005 and the initial register
|
|
||||||
* value should be zero.
|
|
||||||
*
|
|
||||||
* @cmd : structure used for communicating with the device.
|
|
||||||
*/
|
|
||||||
static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd)
|
|
||||||
{
|
|
||||||
u8 *data = &cmd->count;
|
|
||||||
size_t len = cmd->count - CRC_SIZE;
|
|
||||||
u16 *crc16 = (u16 *)(data + len);
|
|
||||||
|
|
||||||
*crc16 = atmel_ecc_crc16(0, data, len);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd)
|
|
||||||
{
|
|
||||||
cmd->word_addr = COMMAND;
|
|
||||||
cmd->opcode = OPCODE_READ;
|
|
||||||
/*
|
|
||||||
* Read the word from Configuration zone that contains the lock bytes
|
|
||||||
* (UserExtra, Selector, LockValue, LockConfig).
|
|
||||||
*/
|
|
||||||
cmd->param1 = CONFIG_ZONE;
|
|
||||||
cmd->param2 = DEVICE_LOCK_ADDR;
|
|
||||||
cmd->count = READ_COUNT;
|
|
||||||
|
|
||||||
atmel_ecc_checksum(cmd);
|
|
||||||
|
|
||||||
cmd->msecs = MAX_EXEC_TIME_READ;
|
|
||||||
cmd->rxsize = READ_RSP_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid)
|
|
||||||
{
|
|
||||||
cmd->word_addr = COMMAND;
|
|
||||||
cmd->count = GENKEY_COUNT;
|
|
||||||
cmd->opcode = OPCODE_GENKEY;
|
|
||||||
cmd->param1 = GENKEY_MODE_PRIVATE;
|
|
||||||
/* a random private key will be generated and stored in slot keyID */
|
|
||||||
cmd->param2 = cpu_to_le16(keyid);
|
|
||||||
|
|
||||||
atmel_ecc_checksum(cmd);
|
|
||||||
|
|
||||||
cmd->msecs = MAX_EXEC_TIME_GENKEY;
|
|
||||||
cmd->rxsize = GENKEY_RSP_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
|
|
||||||
struct scatterlist *pubkey)
|
|
||||||
{
|
|
||||||
size_t copied;
|
|
||||||
|
|
||||||
cmd->word_addr = COMMAND;
|
|
||||||
cmd->count = ECDH_COUNT;
|
|
||||||
cmd->opcode = OPCODE_ECDH;
|
|
||||||
cmd->param1 = ECDH_PREFIX_MODE;
|
|
||||||
/* private key slot */
|
|
||||||
cmd->param2 = cpu_to_le16(DATA_SLOT_2);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The device only supports NIST P256 ECC keys. The public key size will
|
|
||||||
* always be the same. Use a macro for the key size to avoid unnecessary
|
|
||||||
* computations.
|
|
||||||
*/
|
|
||||||
copied = sg_copy_to_buffer(pubkey,
|
|
||||||
sg_nents_for_len(pubkey,
|
|
||||||
ATMEL_ECC_PUBKEY_SIZE),
|
|
||||||
cmd->data, ATMEL_ECC_PUBKEY_SIZE);
|
|
||||||
if (copied != ATMEL_ECC_PUBKEY_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
atmel_ecc_checksum(cmd);
|
|
||||||
|
|
||||||
cmd->msecs = MAX_EXEC_TIME_ECDH;
|
|
||||||
cmd->rxsize = ECDH_RSP_SIZE;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* After wake and after execution of a command, there will be error, status, or
|
|
||||||
* result bytes in the device's output register that can be retrieved by the
|
|
||||||
* system. When the length of that group is four bytes, the codes returned are
|
|
||||||
* detailed in error_list.
|
|
||||||
*/
|
|
||||||
static int atmel_ecc_status(struct device *dev, u8 *status)
|
|
||||||
{
|
|
||||||
size_t err_list_len = ARRAY_SIZE(error_list);
|
|
||||||
int i;
|
|
||||||
u8 err_id = status[1];
|
|
||||||
|
|
||||||
if (*status != STATUS_SIZE)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
for (i = 0; i < err_list_len; i++)
|
|
||||||
if (error_list[i].value == err_id)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* if err_id is not in the error_list then ignore it */
|
|
||||||
if (i != err_list_len) {
|
|
||||||
dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
|
|
||||||
return err_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int atmel_ecc_wakeup(struct i2c_client *client)
|
|
||||||
{
|
|
||||||
struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
|
||||||
u8 status[STATUS_RSP_SIZE];
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The device ignores any levels or transitions on the SCL pin when the
|
|
||||||
* device is idle, asleep or during waking up. Don't check for error
|
|
||||||
* when waking up the device.
|
|
||||||
*/
|
|
||||||
i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait to wake the device. Typical execution times for ecdh and genkey
|
|
||||||
* are around tens of milliseconds. Delta is chosen to 50 microseconds.
|
|
||||||
*/
|
|
||||||
usleep_range(TWHI_MIN, TWHI_MAX);
|
|
||||||
|
|
||||||
ret = i2c_master_recv(client, status, STATUS_SIZE);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return atmel_ecc_status(&client->dev, status);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int atmel_ecc_sleep(struct i2c_client *client)
|
|
||||||
{
|
|
||||||
u8 sleep = SLEEP_TOKEN;
|
|
||||||
|
|
||||||
return i2c_master_send(client, &sleep, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
|
|
||||||
int status)
|
int status)
|
||||||
{
|
{
|
||||||
struct kpp_request *req = areq;
|
struct kpp_request *req = areq;
|
||||||
struct atmel_ecdh_ctx *ctx = work_data->ctx;
|
struct atmel_ecdh_ctx *ctx = work_data->ctx;
|
||||||
struct atmel_ecc_cmd *cmd = &work_data->cmd;
|
struct atmel_i2c_cmd *cmd = &work_data->cmd;
|
||||||
size_t copied, n_sz;
|
size_t copied, n_sz;
|
||||||
|
|
||||||
if (status)
|
if (status)
|
||||||
@ -282,82 +73,6 @@ static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
|
|||||||
kpp_request_complete(req, status);
|
kpp_request_complete(req, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* atmel_ecc_send_receive() - send a command to the device and receive its
|
|
||||||
* response.
|
|
||||||
* @client: i2c client device
|
|
||||||
* @cmd : structure used to communicate with the device
|
|
||||||
*
|
|
||||||
* After the device receives a Wake token, a watchdog counter starts within the
|
|
||||||
* device. After the watchdog timer expires, the device enters sleep mode
|
|
||||||
* regardless of whether some I/O transmission or command execution is in
|
|
||||||
* progress. If a command is attempted when insufficient time remains prior to
|
|
||||||
* watchdog timer execution, the device will return the watchdog timeout error
|
|
||||||
* code without attempting to execute the command. There is no way to reset the
|
|
||||||
* counter other than to put the device into sleep or idle mode and then
|
|
||||||
* wake it up again.
|
|
||||||
*/
|
|
||||||
static int atmel_ecc_send_receive(struct i2c_client *client,
|
|
||||||
struct atmel_ecc_cmd *cmd)
|
|
||||||
{
|
|
||||||
struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&i2c_priv->lock);
|
|
||||||
|
|
||||||
ret = atmel_ecc_wakeup(client);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
/* send the command */
|
|
||||||
ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
|
|
||||||
if (ret < 0)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
/* delay the appropriate amount of time for command to execute */
|
|
||||||
msleep(cmd->msecs);
|
|
||||||
|
|
||||||
/* receive the response */
|
|
||||||
ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
|
|
||||||
if (ret < 0)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
/* put the device into low-power mode */
|
|
||||||
ret = atmel_ecc_sleep(client);
|
|
||||||
if (ret < 0)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
mutex_unlock(&i2c_priv->lock);
|
|
||||||
return atmel_ecc_status(&client->dev, cmd->data);
|
|
||||||
err:
|
|
||||||
mutex_unlock(&i2c_priv->lock);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void atmel_ecc_work_handler(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct atmel_ecc_work_data *work_data =
|
|
||||||
container_of(work, struct atmel_ecc_work_data, work);
|
|
||||||
struct atmel_ecc_cmd *cmd = &work_data->cmd;
|
|
||||||
struct i2c_client *client = work_data->ctx->client;
|
|
||||||
int status;
|
|
||||||
|
|
||||||
status = atmel_ecc_send_receive(client, cmd);
|
|
||||||
work_data->cbk(work_data, work_data->areq, status);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data,
|
|
||||||
void (*cbk)(struct atmel_ecc_work_data *work_data,
|
|
||||||
void *areq, int status),
|
|
||||||
void *areq)
|
|
||||||
{
|
|
||||||
work_data->cbk = (void *)cbk;
|
|
||||||
work_data->areq = areq;
|
|
||||||
|
|
||||||
INIT_WORK(&work_data->work, atmel_ecc_work_handler);
|
|
||||||
schedule_work(&work_data->work);
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
|
static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
|
||||||
{
|
{
|
||||||
if (curve_id == ECC_CURVE_NIST_P256)
|
if (curve_id == ECC_CURVE_NIST_P256)
|
||||||
@ -374,7 +89,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
|
|||||||
unsigned int len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
|
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
|
||||||
struct atmel_ecc_cmd *cmd;
|
struct atmel_i2c_cmd *cmd;
|
||||||
void *public_key;
|
void *public_key;
|
||||||
struct ecdh params;
|
struct ecdh params;
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
@ -412,9 +127,9 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
|
|||||||
ctx->do_fallback = false;
|
ctx->do_fallback = false;
|
||||||
ctx->curve_id = params.curve_id;
|
ctx->curve_id = params.curve_id;
|
||||||
|
|
||||||
atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2);
|
atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
|
||||||
|
|
||||||
ret = atmel_ecc_send_receive(ctx->client, cmd);
|
ret = atmel_i2c_send_receive(ctx->client, cmd);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_public_key;
|
goto free_public_key;
|
||||||
|
|
||||||
@ -444,6 +159,9 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
|
|||||||
return crypto_kpp_generate_public_key(req);
|
return crypto_kpp_generate_public_key(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!ctx->public_key)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* might want less than we've got */
|
/* might want less than we've got */
|
||||||
nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
|
nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
|
||||||
|
|
||||||
@ -461,7 +179,7 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
|
|||||||
{
|
{
|
||||||
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
|
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
|
||||||
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
|
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
|
||||||
struct atmel_ecc_work_data *work_data;
|
struct atmel_i2c_work_data *work_data;
|
||||||
gfp_t gfp;
|
gfp_t gfp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -482,12 +200,13 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
work_data->ctx = ctx;
|
work_data->ctx = ctx;
|
||||||
|
work_data->client = ctx->client;
|
||||||
|
|
||||||
ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src);
|
ret = atmel_i2c_init_ecdh_cmd(&work_data->cmd, req->src);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_work_data;
|
goto free_work_data;
|
||||||
|
|
||||||
atmel_ecc_enqueue(work_data, atmel_ecdh_done, req);
|
atmel_i2c_enqueue(work_data, atmel_ecdh_done, req);
|
||||||
|
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
|
|
||||||
@ -498,7 +217,7 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
|
|||||||
|
|
||||||
static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
|
static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
|
||||||
{
|
{
|
||||||
struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
|
struct atmel_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
|
||||||
struct i2c_client *client = ERR_PTR(-ENODEV);
|
struct i2c_client *client = ERR_PTR(-ENODEV);
|
||||||
int min_tfm_cnt = INT_MAX;
|
int min_tfm_cnt = INT_MAX;
|
||||||
int tfm_cnt;
|
int tfm_cnt;
|
||||||
@ -533,7 +252,7 @@ static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
|
|||||||
|
|
||||||
static void atmel_ecc_i2c_client_free(struct i2c_client *client)
|
static void atmel_ecc_i2c_client_free(struct i2c_client *client)
|
||||||
{
|
{
|
||||||
struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||||
|
|
||||||
atomic_dec(&i2c_priv->tfm_count);
|
atomic_dec(&i2c_priv->tfm_count);
|
||||||
}
|
}
|
||||||
@ -604,96 +323,18 @@ static struct kpp_alg atmel_ecdh = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate)
|
|
||||||
{
|
|
||||||
u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
|
|
||||||
|
|
||||||
/* return the size of the wake_token in bytes */
|
|
||||||
return DIV_ROUND_UP(no_of_bits, 8);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int device_sanity_check(struct i2c_client *client)
|
|
||||||
{
|
|
||||||
struct atmel_ecc_cmd *cmd;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
|
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
atmel_ecc_init_read_cmd(cmd);
|
|
||||||
|
|
||||||
ret = atmel_ecc_send_receive(client, cmd);
|
|
||||||
if (ret)
|
|
||||||
goto free_cmd;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* It is vital that the Configuration, Data and OTP zones be locked
|
|
||||||
* prior to release into the field of the system containing the device.
|
|
||||||
* Failure to lock these zones may permit modification of any secret
|
|
||||||
* keys and may lead to other security problems.
|
|
||||||
*/
|
|
||||||
if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
|
|
||||||
dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
|
|
||||||
ret = -ENOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* fall through */
|
|
||||||
free_cmd:
|
|
||||||
kfree(cmd);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int atmel_ecc_probe(struct i2c_client *client,
|
static int atmel_ecc_probe(struct i2c_client *client,
|
||||||
const struct i2c_device_id *id)
|
const struct i2c_device_id *id)
|
||||||
{
|
{
|
||||||
struct atmel_ecc_i2c_client_priv *i2c_priv;
|
struct atmel_i2c_client_priv *i2c_priv;
|
||||||
struct device *dev = &client->dev;
|
|
||||||
int ret;
|
int ret;
|
||||||
u32 bus_clk_rate;
|
|
||||||
|
|
||||||
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
|
ret = atmel_i2c_probe(client, id);
|
||||||
dev_err(dev, "I2C_FUNC_I2C not supported\n");
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = of_property_read_u32(client->adapter->dev.of_node,
|
|
||||||
"clock-frequency", &bus_clk_rate);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "of: failed to read clock-frequency property\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bus_clk_rate > 1000000L) {
|
|
||||||
dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
|
|
||||||
bus_clk_rate);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
|
|
||||||
if (!i2c_priv)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
i2c_priv->client = client;
|
|
||||||
mutex_init(&i2c_priv->lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
|
|
||||||
* 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
|
|
||||||
* will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
|
|
||||||
*/
|
|
||||||
i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate);
|
|
||||||
|
|
||||||
memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
|
|
||||||
|
|
||||||
atomic_set(&i2c_priv->tfm_count, 0);
|
|
||||||
|
|
||||||
i2c_set_clientdata(client, i2c_priv);
|
|
||||||
|
|
||||||
ret = device_sanity_check(client);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
i2c_priv = i2c_get_clientdata(client);
|
||||||
|
|
||||||
spin_lock(&driver_data.i2c_list_lock);
|
spin_lock(&driver_data.i2c_list_lock);
|
||||||
list_add_tail(&i2c_priv->i2c_client_list_node,
|
list_add_tail(&i2c_priv->i2c_client_list_node,
|
||||||
&driver_data.i2c_client_list);
|
&driver_data.i2c_client_list);
|
||||||
@ -705,10 +346,10 @@ static int atmel_ecc_probe(struct i2c_client *client,
|
|||||||
list_del(&i2c_priv->i2c_client_list_node);
|
list_del(&i2c_priv->i2c_client_list_node);
|
||||||
spin_unlock(&driver_data.i2c_list_lock);
|
spin_unlock(&driver_data.i2c_list_lock);
|
||||||
|
|
||||||
dev_err(dev, "%s alg registration failed\n",
|
dev_err(&client->dev, "%s alg registration failed\n",
|
||||||
atmel_ecdh.base.cra_driver_name);
|
atmel_ecdh.base.cra_driver_name);
|
||||||
} else {
|
} else {
|
||||||
dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n");
|
dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -716,7 +357,7 @@ static int atmel_ecc_probe(struct i2c_client *client,
|
|||||||
|
|
||||||
static int atmel_ecc_remove(struct i2c_client *client)
|
static int atmel_ecc_remove(struct i2c_client *client)
|
||||||
{
|
{
|
||||||
struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||||
|
|
||||||
/* Return EBUSY if i2c client already allocated. */
|
/* Return EBUSY if i2c client already allocated. */
|
||||||
if (atomic_read(&i2c_priv->tfm_count)) {
|
if (atomic_read(&i2c_priv->tfm_count)) {
|
||||||
|
@ -1,116 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
/*
|
|
||||||
* Copyright (c) 2017, Microchip Technology Inc.
|
|
||||||
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __ATMEL_ECC_H__
|
|
||||||
#define __ATMEL_ECC_H__
|
|
||||||
|
|
||||||
#define ATMEL_ECC_PRIORITY 300
|
|
||||||
|
|
||||||
#define COMMAND 0x03 /* packet function */
|
|
||||||
#define SLEEP_TOKEN 0x01
|
|
||||||
#define WAKE_TOKEN_MAX_SIZE 8
|
|
||||||
|
|
||||||
/* Definitions of Data and Command sizes */
|
|
||||||
#define WORD_ADDR_SIZE 1
|
|
||||||
#define COUNT_SIZE 1
|
|
||||||
#define CRC_SIZE 2
|
|
||||||
#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
|
|
||||||
|
|
||||||
/* size in bytes of the n prime */
|
|
||||||
#define ATMEL_ECC_NIST_P256_N_SIZE 32
|
|
||||||
#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
|
|
||||||
|
|
||||||
#define STATUS_RSP_SIZE 4
|
|
||||||
#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
|
|
||||||
#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
|
|
||||||
CMD_OVERHEAD_SIZE)
|
|
||||||
#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
|
|
||||||
#define MAX_RSP_SIZE GENKEY_RSP_SIZE
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atmel_ecc_cmd - structure used for communicating with the device.
|
|
||||||
* @word_addr: indicates the function of the packet sent to the device. This
|
|
||||||
* byte should have a value of COMMAND for normal operation.
|
|
||||||
* @count : number of bytes to be transferred to (or from) the device.
|
|
||||||
* @opcode : the command code.
|
|
||||||
* @param1 : the first parameter; always present.
|
|
||||||
* @param2 : the second parameter; always present.
|
|
||||||
* @data : optional remaining input data. Includes a 2-byte CRC.
|
|
||||||
* @rxsize : size of the data received from i2c client.
|
|
||||||
* @msecs : command execution time in milliseconds
|
|
||||||
*/
|
|
||||||
struct atmel_ecc_cmd {
|
|
||||||
u8 word_addr;
|
|
||||||
u8 count;
|
|
||||||
u8 opcode;
|
|
||||||
u8 param1;
|
|
||||||
u16 param2;
|
|
||||||
u8 data[MAX_RSP_SIZE];
|
|
||||||
u8 msecs;
|
|
||||||
u16 rxsize;
|
|
||||||
} __packed;
|
|
||||||
|
|
||||||
/* Status/Error codes */
|
|
||||||
#define STATUS_SIZE 0x04
|
|
||||||
#define STATUS_NOERR 0x00
|
|
||||||
#define STATUS_WAKE_SUCCESSFUL 0x11
|
|
||||||
|
|
||||||
static const struct {
|
|
||||||
u8 value;
|
|
||||||
const char *error_text;
|
|
||||||
} error_list[] = {
|
|
||||||
{ 0x01, "CheckMac or Verify miscompare" },
|
|
||||||
{ 0x03, "Parse Error" },
|
|
||||||
{ 0x05, "ECC Fault" },
|
|
||||||
{ 0x0F, "Execution Error" },
|
|
||||||
{ 0xEE, "Watchdog about to expire" },
|
|
||||||
{ 0xFF, "CRC or other communication error" },
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Definitions for eeprom organization */
|
|
||||||
#define CONFIG_ZONE 0
|
|
||||||
|
|
||||||
/* Definitions for Indexes common to all commands */
|
|
||||||
#define RSP_DATA_IDX 1 /* buffer index of data in response */
|
|
||||||
#define DATA_SLOT_2 2 /* used for ECDH private key */
|
|
||||||
|
|
||||||
/* Definitions for the device lock state */
|
|
||||||
#define DEVICE_LOCK_ADDR 0x15
|
|
||||||
#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
|
|
||||||
#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wake High delay to data communication (microseconds). SDA should be stable
|
|
||||||
* high for this entire duration.
|
|
||||||
*/
|
|
||||||
#define TWHI_MIN 1500
|
|
||||||
#define TWHI_MAX 1550
|
|
||||||
|
|
||||||
/* Wake Low duration */
|
|
||||||
#define TWLO_USEC 60
|
|
||||||
|
|
||||||
/* Command execution time (milliseconds) */
|
|
||||||
#define MAX_EXEC_TIME_ECDH 58
|
|
||||||
#define MAX_EXEC_TIME_GENKEY 115
|
|
||||||
#define MAX_EXEC_TIME_READ 1
|
|
||||||
|
|
||||||
/* Command opcode */
|
|
||||||
#define OPCODE_ECDH 0x43
|
|
||||||
#define OPCODE_GENKEY 0x40
|
|
||||||
#define OPCODE_READ 0x02
|
|
||||||
|
|
||||||
/* Definitions for the READ Command */
|
|
||||||
#define READ_COUNT 7
|
|
||||||
|
|
||||||
/* Definitions for the GenKey Command */
|
|
||||||
#define GENKEY_COUNT 7
|
|
||||||
#define GENKEY_MODE_PRIVATE 0x04
|
|
||||||
|
|
||||||
/* Definitions for the ECDH Command */
|
|
||||||
#define ECDH_COUNT 71
|
|
||||||
#define ECDH_PREFIX_MODE 0x00
|
|
||||||
|
|
||||||
#endif /* __ATMEL_ECC_H__ */
|
|
364
drivers/crypto/atmel-i2c.c
Normal file
364
drivers/crypto/atmel-i2c.c
Normal file
@ -0,0 +1,364 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Microchip / Atmel ECC (I2C) driver.
|
||||||
|
*
|
||||||
|
* Copyright (c) 2017, Microchip Technology Inc.
|
||||||
|
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/bitrev.h>
|
||||||
|
#include <linux/crc16.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/device.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/i2c.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/workqueue.h>
|
||||||
|
#include "atmel-i2c.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
|
||||||
|
* CRC16 verification of the count, opcode, param1, param2 and data bytes.
|
||||||
|
* The checksum is saved in little-endian format in the least significant
|
||||||
|
* two bytes of the command. CRC polynomial is 0x8005 and the initial register
|
||||||
|
* value should be zero.
|
||||||
|
*
|
||||||
|
* @cmd : structure used for communicating with the device.
|
||||||
|
*/
|
||||||
|
static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd)
|
||||||
|
{
|
||||||
|
u8 *data = &cmd->count;
|
||||||
|
size_t len = cmd->count - CRC_SIZE;
|
||||||
|
__le16 *__crc16 = (__le16 *)(data + len);
|
||||||
|
|
||||||
|
*__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len)));
|
||||||
|
}
|
||||||
|
|
||||||
|
void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
|
||||||
|
{
|
||||||
|
cmd->word_addr = COMMAND;
|
||||||
|
cmd->opcode = OPCODE_READ;
|
||||||
|
/*
|
||||||
|
* Read the word from Configuration zone that contains the lock bytes
|
||||||
|
* (UserExtra, Selector, LockValue, LockConfig).
|
||||||
|
*/
|
||||||
|
cmd->param1 = CONFIG_ZONE;
|
||||||
|
cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR);
|
||||||
|
cmd->count = READ_COUNT;
|
||||||
|
|
||||||
|
atmel_i2c_checksum(cmd);
|
||||||
|
|
||||||
|
cmd->msecs = MAX_EXEC_TIME_READ;
|
||||||
|
cmd->rxsize = READ_RSP_SIZE;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(atmel_i2c_init_read_cmd);
|
||||||
|
|
||||||
|
void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd)
|
||||||
|
{
|
||||||
|
cmd->word_addr = COMMAND;
|
||||||
|
cmd->opcode = OPCODE_RANDOM;
|
||||||
|
cmd->param1 = 0;
|
||||||
|
cmd->param2 = 0;
|
||||||
|
cmd->count = RANDOM_COUNT;
|
||||||
|
|
||||||
|
atmel_i2c_checksum(cmd);
|
||||||
|
|
||||||
|
cmd->msecs = MAX_EXEC_TIME_RANDOM;
|
||||||
|
cmd->rxsize = RANDOM_RSP_SIZE;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(atmel_i2c_init_random_cmd);
|
||||||
|
|
||||||
|
void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid)
|
||||||
|
{
|
||||||
|
cmd->word_addr = COMMAND;
|
||||||
|
cmd->count = GENKEY_COUNT;
|
||||||
|
cmd->opcode = OPCODE_GENKEY;
|
||||||
|
cmd->param1 = GENKEY_MODE_PRIVATE;
|
||||||
|
/* a random private key will be generated and stored in slot keyID */
|
||||||
|
cmd->param2 = cpu_to_le16(keyid);
|
||||||
|
|
||||||
|
atmel_i2c_checksum(cmd);
|
||||||
|
|
||||||
|
cmd->msecs = MAX_EXEC_TIME_GENKEY;
|
||||||
|
cmd->rxsize = GENKEY_RSP_SIZE;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(atmel_i2c_init_genkey_cmd);
|
||||||
|
|
||||||
|
int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
|
||||||
|
struct scatterlist *pubkey)
|
||||||
|
{
|
||||||
|
size_t copied;
|
||||||
|
|
||||||
|
cmd->word_addr = COMMAND;
|
||||||
|
cmd->count = ECDH_COUNT;
|
||||||
|
cmd->opcode = OPCODE_ECDH;
|
||||||
|
cmd->param1 = ECDH_PREFIX_MODE;
|
||||||
|
/* private key slot */
|
||||||
|
cmd->param2 = cpu_to_le16(DATA_SLOT_2);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The device only supports NIST P256 ECC keys. The public key size will
|
||||||
|
* always be the same. Use a macro for the key size to avoid unnecessary
|
||||||
|
* computations.
|
||||||
|
*/
|
||||||
|
copied = sg_copy_to_buffer(pubkey,
|
||||||
|
sg_nents_for_len(pubkey,
|
||||||
|
ATMEL_ECC_PUBKEY_SIZE),
|
||||||
|
cmd->data, ATMEL_ECC_PUBKEY_SIZE);
|
||||||
|
if (copied != ATMEL_ECC_PUBKEY_SIZE)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
atmel_i2c_checksum(cmd);
|
||||||
|
|
||||||
|
cmd->msecs = MAX_EXEC_TIME_ECDH;
|
||||||
|
cmd->rxsize = ECDH_RSP_SIZE;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(atmel_i2c_init_ecdh_cmd);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* After wake and after execution of a command, there will be error, status, or
|
||||||
|
* result bytes in the device's output register that can be retrieved by the
|
||||||
|
* system. When the length of that group is four bytes, the codes returned are
|
||||||
|
* detailed in error_list.
|
||||||
|
*/
|
||||||
|
static int atmel_i2c_status(struct device *dev, u8 *status)
|
||||||
|
{
|
||||||
|
size_t err_list_len = ARRAY_SIZE(error_list);
|
||||||
|
int i;
|
||||||
|
u8 err_id = status[1];
|
||||||
|
|
||||||
|
if (*status != STATUS_SIZE)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
for (i = 0; i < err_list_len; i++)
|
||||||
|
if (error_list[i].value == err_id)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* if err_id is not in the error_list then ignore it */
|
||||||
|
if (i != err_list_len) {
|
||||||
|
dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
|
||||||
|
return err_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int atmel_i2c_wakeup(struct i2c_client *client)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||||
|
u8 status[STATUS_RSP_SIZE];
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The device ignores any levels or transitions on the SCL pin when the
|
||||||
|
* device is idle, asleep or during waking up. Don't check for error
|
||||||
|
* when waking up the device.
|
||||||
|
*/
|
||||||
|
i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait to wake the device. Typical execution times for ecdh and genkey
|
||||||
|
* are around tens of milliseconds. Delta is chosen to 50 microseconds.
|
||||||
|
*/
|
||||||
|
usleep_range(TWHI_MIN, TWHI_MAX);
|
||||||
|
|
||||||
|
ret = i2c_master_recv(client, status, STATUS_SIZE);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return atmel_i2c_status(&client->dev, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int atmel_i2c_sleep(struct i2c_client *client)
|
||||||
|
{
|
||||||
|
u8 sleep = SLEEP_TOKEN;
|
||||||
|
|
||||||
|
return i2c_master_send(client, &sleep, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* atmel_i2c_send_receive() - send a command to the device and receive its
|
||||||
|
* response.
|
||||||
|
* @client: i2c client device
|
||||||
|
* @cmd : structure used to communicate with the device
|
||||||
|
*
|
||||||
|
* After the device receives a Wake token, a watchdog counter starts within the
|
||||||
|
* device. After the watchdog timer expires, the device enters sleep mode
|
||||||
|
* regardless of whether some I/O transmission or command execution is in
|
||||||
|
* progress. If a command is attempted when insufficient time remains prior to
|
||||||
|
* watchdog timer execution, the device will return the watchdog timeout error
|
||||||
|
* code without attempting to execute the command. There is no way to reset the
|
||||||
|
* counter other than to put the device into sleep or idle mode and then
|
||||||
|
* wake it up again.
|
||||||
|
*/
|
||||||
|
int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&i2c_priv->lock);
|
||||||
|
|
||||||
|
ret = atmel_i2c_wakeup(client);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
/* send the command */
|
||||||
|
ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
/* delay the appropriate amount of time for command to execute */
|
||||||
|
msleep(cmd->msecs);
|
||||||
|
|
||||||
|
/* receive the response */
|
||||||
|
ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
/* put the device into low-power mode */
|
||||||
|
ret = atmel_i2c_sleep(client);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
mutex_unlock(&i2c_priv->lock);
|
||||||
|
return atmel_i2c_status(&client->dev, cmd->data);
|
||||||
|
err:
|
||||||
|
mutex_unlock(&i2c_priv->lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(atmel_i2c_send_receive);
|
||||||
|
|
||||||
|
static void atmel_i2c_work_handler(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_work_data *work_data =
|
||||||
|
container_of(work, struct atmel_i2c_work_data, work);
|
||||||
|
struct atmel_i2c_cmd *cmd = &work_data->cmd;
|
||||||
|
struct i2c_client *client = work_data->client;
|
||||||
|
int status;
|
||||||
|
|
||||||
|
status = atmel_i2c_send_receive(client, cmd);
|
||||||
|
work_data->cbk(work_data, work_data->areq, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
|
||||||
|
void (*cbk)(struct atmel_i2c_work_data *work_data,
|
||||||
|
void *areq, int status),
|
||||||
|
void *areq)
|
||||||
|
{
|
||||||
|
work_data->cbk = (void *)cbk;
|
||||||
|
work_data->areq = areq;
|
||||||
|
|
||||||
|
INIT_WORK(&work_data->work, atmel_i2c_work_handler);
|
||||||
|
schedule_work(&work_data->work);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(atmel_i2c_enqueue);
|
||||||
|
|
||||||
|
static inline size_t atmel_i2c_wake_token_sz(u32 bus_clk_rate)
|
||||||
|
{
|
||||||
|
u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
|
||||||
|
|
||||||
|
/* return the size of the wake_token in bytes */
|
||||||
|
return DIV_ROUND_UP(no_of_bits, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int device_sanity_check(struct i2c_client *client)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_cmd *cmd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
|
||||||
|
if (!cmd)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
atmel_i2c_init_read_cmd(cmd);
|
||||||
|
|
||||||
|
ret = atmel_i2c_send_receive(client, cmd);
|
||||||
|
if (ret)
|
||||||
|
goto free_cmd;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is vital that the Configuration, Data and OTP zones be locked
|
||||||
|
* prior to release into the field of the system containing the device.
|
||||||
|
* Failure to lock these zones may permit modification of any secret
|
||||||
|
* keys and may lead to other security problems.
|
||||||
|
*/
|
||||||
|
if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
|
||||||
|
dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
|
||||||
|
ret = -ENOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* fall through */
|
||||||
|
free_cmd:
|
||||||
|
kfree(cmd);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_client_priv *i2c_priv;
|
||||||
|
struct device *dev = &client->dev;
|
||||||
|
int ret;
|
||||||
|
u32 bus_clk_rate;
|
||||||
|
|
||||||
|
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
|
||||||
|
dev_err(dev, "I2C_FUNC_I2C not supported\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
bus_clk_rate = i2c_acpi_find_bus_speed(&client->adapter->dev);
|
||||||
|
if (!bus_clk_rate) {
|
||||||
|
ret = device_property_read_u32(&client->adapter->dev,
|
||||||
|
"clock-frequency", &bus_clk_rate);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "failed to read clock-frequency property\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bus_clk_rate > 1000000L) {
|
||||||
|
dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
|
||||||
|
bus_clk_rate);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
|
||||||
|
if (!i2c_priv)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
i2c_priv->client = client;
|
||||||
|
mutex_init(&i2c_priv->lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
|
||||||
|
* 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
|
||||||
|
* will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
|
||||||
|
*/
|
||||||
|
i2c_priv->wake_token_sz = atmel_i2c_wake_token_sz(bus_clk_rate);
|
||||||
|
|
||||||
|
memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
|
||||||
|
|
||||||
|
atomic_set(&i2c_priv->tfm_count, 0);
|
||||||
|
|
||||||
|
i2c_set_clientdata(client, i2c_priv);
|
||||||
|
|
||||||
|
ret = device_sanity_check(client);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(atmel_i2c_probe);
|
||||||
|
|
||||||
|
MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
|
||||||
|
MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
|
||||||
|
MODULE_LICENSE("GPL v2");
|
197
drivers/crypto/atmel-i2c.h
Normal file
197
drivers/crypto/atmel-i2c.h
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2017, Microchip Technology Inc.
|
||||||
|
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __ATMEL_I2C_H__
|
||||||
|
#define __ATMEL_I2C_H__
|
||||||
|
|
||||||
|
#include <linux/hw_random.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#define ATMEL_ECC_PRIORITY 300
|
||||||
|
|
||||||
|
#define COMMAND 0x03 /* packet function */
|
||||||
|
#define SLEEP_TOKEN 0x01
|
||||||
|
#define WAKE_TOKEN_MAX_SIZE 8
|
||||||
|
|
||||||
|
/* Definitions of Data and Command sizes */
|
||||||
|
#define WORD_ADDR_SIZE 1
|
||||||
|
#define COUNT_SIZE 1
|
||||||
|
#define CRC_SIZE 2
|
||||||
|
#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
|
||||||
|
|
||||||
|
/* size in bytes of the n prime */
|
||||||
|
#define ATMEL_ECC_NIST_P256_N_SIZE 32
|
||||||
|
#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
|
||||||
|
|
||||||
|
#define STATUS_RSP_SIZE 4
|
||||||
|
#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
|
||||||
|
#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
|
||||||
|
CMD_OVERHEAD_SIZE)
|
||||||
|
#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
|
||||||
|
#define RANDOM_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
|
||||||
|
#define MAX_RSP_SIZE GENKEY_RSP_SIZE
|
||||||
|
|
||||||
|
/**
|
||||||
|
* atmel_i2c_cmd - structure used for communicating with the device.
|
||||||
|
* @word_addr: indicates the function of the packet sent to the device. This
|
||||||
|
* byte should have a value of COMMAND for normal operation.
|
||||||
|
* @count : number of bytes to be transferred to (or from) the device.
|
||||||
|
* @opcode : the command code.
|
||||||
|
* @param1 : the first parameter; always present.
|
||||||
|
* @param2 : the second parameter; always present.
|
||||||
|
* @data : optional remaining input data. Includes a 2-byte CRC.
|
||||||
|
* @rxsize : size of the data received from i2c client.
|
||||||
|
* @msecs : command execution time in milliseconds
|
||||||
|
*/
|
||||||
|
struct atmel_i2c_cmd {
|
||||||
|
u8 word_addr;
|
||||||
|
u8 count;
|
||||||
|
u8 opcode;
|
||||||
|
u8 param1;
|
||||||
|
__le16 param2;
|
||||||
|
u8 data[MAX_RSP_SIZE];
|
||||||
|
u8 msecs;
|
||||||
|
u16 rxsize;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
/* Status/Error codes */
|
||||||
|
#define STATUS_SIZE 0x04
|
||||||
|
#define STATUS_NOERR 0x00
|
||||||
|
#define STATUS_WAKE_SUCCESSFUL 0x11
|
||||||
|
|
||||||
|
static const struct {
|
||||||
|
u8 value;
|
||||||
|
const char *error_text;
|
||||||
|
} error_list[] = {
|
||||||
|
{ 0x01, "CheckMac or Verify miscompare" },
|
||||||
|
{ 0x03, "Parse Error" },
|
||||||
|
{ 0x05, "ECC Fault" },
|
||||||
|
{ 0x0F, "Execution Error" },
|
||||||
|
{ 0xEE, "Watchdog about to expire" },
|
||||||
|
{ 0xFF, "CRC or other communication error" },
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Definitions for eeprom organization */
|
||||||
|
#define CONFIG_ZONE 0
|
||||||
|
|
||||||
|
/* Definitions for Indexes common to all commands */
|
||||||
|
#define RSP_DATA_IDX 1 /* buffer index of data in response */
|
||||||
|
#define DATA_SLOT_2 2 /* used for ECDH private key */
|
||||||
|
|
||||||
|
/* Definitions for the device lock state */
|
||||||
|
#define DEVICE_LOCK_ADDR 0x15
|
||||||
|
#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
|
||||||
|
#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wake High delay to data communication (microseconds). SDA should be stable
|
||||||
|
* high for this entire duration.
|
||||||
|
*/
|
||||||
|
#define TWHI_MIN 1500
|
||||||
|
#define TWHI_MAX 1550
|
||||||
|
|
||||||
|
/* Wake Low duration */
|
||||||
|
#define TWLO_USEC 60
|
||||||
|
|
||||||
|
/* Command execution time (milliseconds) */
|
||||||
|
#define MAX_EXEC_TIME_ECDH 58
|
||||||
|
#define MAX_EXEC_TIME_GENKEY 115
|
||||||
|
#define MAX_EXEC_TIME_READ 1
|
||||||
|
#define MAX_EXEC_TIME_RANDOM 50
|
||||||
|
|
||||||
|
/* Command opcode */
|
||||||
|
#define OPCODE_ECDH 0x43
|
||||||
|
#define OPCODE_GENKEY 0x40
|
||||||
|
#define OPCODE_READ 0x02
|
||||||
|
#define OPCODE_RANDOM 0x1b
|
||||||
|
|
||||||
|
/* Definitions for the READ Command */
|
||||||
|
#define READ_COUNT 7
|
||||||
|
|
||||||
|
/* Definitions for the RANDOM Command */
|
||||||
|
#define RANDOM_COUNT 7
|
||||||
|
|
||||||
|
/* Definitions for the GenKey Command */
|
||||||
|
#define GENKEY_COUNT 7
|
||||||
|
#define GENKEY_MODE_PRIVATE 0x04
|
||||||
|
|
||||||
|
/* Definitions for the ECDH Command */
|
||||||
|
#define ECDH_COUNT 71
|
||||||
|
#define ECDH_PREFIX_MODE 0x00
|
||||||
|
|
||||||
|
/* Used for binding tfm objects to i2c clients. */
|
||||||
|
struct atmel_ecc_driver_data {
|
||||||
|
struct list_head i2c_client_list;
|
||||||
|
spinlock_t i2c_list_lock;
|
||||||
|
} ____cacheline_aligned;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* atmel_i2c_client_priv - i2c_client private data
|
||||||
|
* @client : pointer to i2c client device
|
||||||
|
* @i2c_client_list_node: part of i2c_client_list
|
||||||
|
* @lock : lock for sending i2c commands
|
||||||
|
* @wake_token : wake token array of zeros
|
||||||
|
* @wake_token_sz : size in bytes of the wake_token
|
||||||
|
* @tfm_count : number of active crypto transformations on i2c client
|
||||||
|
*
|
||||||
|
* Reads and writes from/to the i2c client are sequential. The first byte
|
||||||
|
* transmitted to the device is treated as the byte size. Any attempt to send
|
||||||
|
* more than this number of bytes will cause the device to not ACK those bytes.
|
||||||
|
* After the host writes a single command byte to the input buffer, reads are
|
||||||
|
* prohibited until after the device completes command execution. Use a mutex
|
||||||
|
* when sending i2c commands.
|
||||||
|
*/
|
||||||
|
struct atmel_i2c_client_priv {
|
||||||
|
struct i2c_client *client;
|
||||||
|
struct list_head i2c_client_list_node;
|
||||||
|
struct mutex lock;
|
||||||
|
u8 wake_token[WAKE_TOKEN_MAX_SIZE];
|
||||||
|
size_t wake_token_sz;
|
||||||
|
atomic_t tfm_count ____cacheline_aligned;
|
||||||
|
struct hwrng hwrng;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* atmel_i2c_work_data - data structure representing the work
|
||||||
|
* @ctx : transformation context.
|
||||||
|
* @cbk : pointer to a callback function to be invoked upon completion of this
|
||||||
|
* request. This has the form:
|
||||||
|
* callback(struct atmel_i2c_work_data *work_data, void *areq, u8 status)
|
||||||
|
* where:
|
||||||
|
* @work_data: data structure representing the work
|
||||||
|
* @areq : optional pointer to an argument passed with the original
|
||||||
|
* request.
|
||||||
|
* @status : status returned from the i2c client device or i2c error.
|
||||||
|
* @areq: optional pointer to a user argument for use at callback time.
|
||||||
|
* @work: describes the task to be executed.
|
||||||
|
* @cmd : structure used for communicating with the device.
|
||||||
|
*/
|
||||||
|
struct atmel_i2c_work_data {
|
||||||
|
void *ctx;
|
||||||
|
struct i2c_client *client;
|
||||||
|
void (*cbk)(struct atmel_i2c_work_data *work_data, void *areq,
|
||||||
|
int status);
|
||||||
|
void *areq;
|
||||||
|
struct work_struct work;
|
||||||
|
struct atmel_i2c_cmd cmd;
|
||||||
|
};
|
||||||
|
|
||||||
|
int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id);
|
||||||
|
|
||||||
|
void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
|
||||||
|
void (*cbk)(struct atmel_i2c_work_data *work_data,
|
||||||
|
void *areq, int status),
|
||||||
|
void *areq);
|
||||||
|
|
||||||
|
int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd);
|
||||||
|
|
||||||
|
void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd);
|
||||||
|
void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd);
|
||||||
|
void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid);
|
||||||
|
int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
|
||||||
|
struct scatterlist *pubkey);
|
||||||
|
|
||||||
|
#endif /* __ATMEL_I2C_H__ */
|
171
drivers/crypto/atmel-sha204a.c
Normal file
171
drivers/crypto/atmel-sha204a.c
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Microchip / Atmel SHA204A (I2C) driver.
|
||||||
|
*
|
||||||
|
* Copyright (c) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/device.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/i2c.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/workqueue.h>
|
||||||
|
#include "atmel-i2c.h"
|
||||||
|
|
||||||
|
static void atmel_sha204a_rng_done(struct atmel_i2c_work_data *work_data,
|
||||||
|
void *areq, int status)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_client_priv *i2c_priv = work_data->ctx;
|
||||||
|
struct hwrng *rng = areq;
|
||||||
|
|
||||||
|
if (status)
|
||||||
|
dev_warn_ratelimited(&i2c_priv->client->dev,
|
||||||
|
"i2c transaction failed (%d)\n",
|
||||||
|
status);
|
||||||
|
|
||||||
|
rng->priv = (unsigned long)work_data;
|
||||||
|
atomic_dec(&i2c_priv->tfm_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int atmel_sha204a_rng_read_nonblocking(struct hwrng *rng, void *data,
|
||||||
|
size_t max)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_client_priv *i2c_priv;
|
||||||
|
struct atmel_i2c_work_data *work_data;
|
||||||
|
|
||||||
|
i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
|
||||||
|
|
||||||
|
/* keep maximum 1 asynchronous read in flight at any time */
|
||||||
|
if (!atomic_add_unless(&i2c_priv->tfm_count, 1, 1))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (rng->priv) {
|
||||||
|
work_data = (struct atmel_i2c_work_data *)rng->priv;
|
||||||
|
max = min(sizeof(work_data->cmd.data), max);
|
||||||
|
memcpy(data, &work_data->cmd.data, max);
|
||||||
|
rng->priv = 0;
|
||||||
|
} else {
|
||||||
|
work_data = kmalloc(sizeof(*work_data), GFP_ATOMIC);
|
||||||
|
if (!work_data)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
work_data->ctx = i2c_priv;
|
||||||
|
work_data->client = i2c_priv->client;
|
||||||
|
|
||||||
|
max = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
atmel_i2c_init_random_cmd(&work_data->cmd);
|
||||||
|
atmel_i2c_enqueue(work_data, atmel_sha204a_rng_done, rng);
|
||||||
|
|
||||||
|
return max;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
|
||||||
|
bool wait)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_client_priv *i2c_priv;
|
||||||
|
struct atmel_i2c_cmd cmd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!wait)
|
||||||
|
return atmel_sha204a_rng_read_nonblocking(rng, data, max);
|
||||||
|
|
||||||
|
i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
|
||||||
|
|
||||||
|
atmel_i2c_init_random_cmd(&cmd);
|
||||||
|
|
||||||
|
ret = atmel_i2c_send_receive(i2c_priv->client, &cmd);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
max = min(sizeof(cmd.data), max);
|
||||||
|
memcpy(data, cmd.data, max);
|
||||||
|
|
||||||
|
return max;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int atmel_sha204a_probe(struct i2c_client *client,
|
||||||
|
const struct i2c_device_id *id)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_client_priv *i2c_priv;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = atmel_i2c_probe(client, id);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
i2c_priv = i2c_get_clientdata(client);
|
||||||
|
|
||||||
|
memset(&i2c_priv->hwrng, 0, sizeof(i2c_priv->hwrng));
|
||||||
|
|
||||||
|
i2c_priv->hwrng.name = dev_name(&client->dev);
|
||||||
|
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
|
||||||
|
i2c_priv->hwrng.quality = 1024;
|
||||||
|
|
||||||
|
ret = hwrng_register(&i2c_priv->hwrng);
|
||||||
|
if (ret)
|
||||||
|
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int atmel_sha204a_remove(struct i2c_client *client)
|
||||||
|
{
|
||||||
|
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||||
|
|
||||||
|
if (atomic_read(&i2c_priv->tfm_count)) {
|
||||||
|
dev_err(&client->dev, "Device is busy\n");
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i2c_priv->hwrng.priv)
|
||||||
|
kfree((void *)i2c_priv->hwrng.priv);
|
||||||
|
hwrng_unregister(&i2c_priv->hwrng);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct of_device_id atmel_sha204a_dt_ids[] = {
|
||||||
|
{ .compatible = "atmel,atsha204a", },
|
||||||
|
{ /* sentinel */ }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
|
||||||
|
|
||||||
|
static const struct i2c_device_id atmel_sha204a_id[] = {
|
||||||
|
{ "atsha204a", 0 },
|
||||||
|
{ /* sentinel */ }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
|
||||||
|
|
||||||
|
static struct i2c_driver atmel_sha204a_driver = {
|
||||||
|
.probe = atmel_sha204a_probe,
|
||||||
|
.remove = atmel_sha204a_remove,
|
||||||
|
.id_table = atmel_sha204a_id,
|
||||||
|
|
||||||
|
.driver.name = "atmel-sha204a",
|
||||||
|
.driver.of_match_table = of_match_ptr(atmel_sha204a_dt_ids),
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init atmel_sha204a_init(void)
|
||||||
|
{
|
||||||
|
return i2c_add_driver(&atmel_sha204a_driver);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit atmel_sha204a_exit(void)
|
||||||
|
{
|
||||||
|
flush_scheduled_work();
|
||||||
|
i2c_del_driver(&atmel_sha204a_driver);
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(atmel_sha204a_init);
|
||||||
|
module_exit(atmel_sha204a_exit);
|
||||||
|
|
||||||
|
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||||
|
MODULE_LICENSE("GPL v2");
|
@ -85,7 +85,7 @@ MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
|
|||||||
* 0x70 - ring 2
|
* 0x70 - ring 2
|
||||||
* 0x78 - ring 3
|
* 0x78 - ring 3
|
||||||
*/
|
*/
|
||||||
char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
|
static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
|
||||||
/*
|
/*
|
||||||
* Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
|
* Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
|
||||||
* is set dynamically after reading SPU type from device tree.
|
* is set dynamically after reading SPU type from device tree.
|
||||||
@ -2083,7 +2083,7 @@ static int __ahash_init(struct ahash_request *req)
|
|||||||
* Return: true if incremental hashing is not supported
|
* Return: true if incremental hashing is not supported
|
||||||
* false otherwise
|
* false otherwise
|
||||||
*/
|
*/
|
||||||
bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
|
static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
|
||||||
{
|
{
|
||||||
struct spu_hw *spu = &iproc_priv.spu;
|
struct spu_hw *spu = &iproc_priv.spu;
|
||||||
|
|
||||||
@ -4809,7 +4809,7 @@ static int spu_dt_read(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bcm_spu_probe(struct platform_device *pdev)
|
static int bcm_spu_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct spu_hw *spu = &iproc_priv.spu;
|
struct spu_hw *spu = &iproc_priv.spu;
|
||||||
@ -4853,7 +4853,7 @@ int bcm_spu_probe(struct platform_device *pdev)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bcm_spu_remove(struct platform_device *pdev)
|
static int bcm_spu_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
|
@ -38,21 +38,21 @@ enum spu2_proto_sel {
|
|||||||
SPU2_DTLS_AEAD = 10
|
SPU2_DTLS_AEAD = 10
|
||||||
};
|
};
|
||||||
|
|
||||||
char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
|
static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
|
||||||
"DES", "3DES"
|
"DES", "3DES"
|
||||||
};
|
};
|
||||||
|
|
||||||
char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS",
|
static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB",
|
||||||
"CCM", "GCM"
|
"XTS", "CCM", "GCM"
|
||||||
};
|
};
|
||||||
|
|
||||||
char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
|
static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
|
||||||
"Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
|
"Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
|
||||||
"SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
|
"SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
|
||||||
"SHA3-384", "SHA3-512"
|
"SHA3-384", "SHA3-512"
|
||||||
};
|
};
|
||||||
|
|
||||||
char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
|
static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
|
||||||
"Rabin", "CCM", "GCM", "Reserved"
|
"Rabin", "CCM", "GCM", "Reserved"
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2,6 +2,12 @@
|
|||||||
config CRYPTO_DEV_FSL_CAAM_COMMON
|
config CRYPTO_DEV_FSL_CAAM_COMMON
|
||||||
tristate
|
tristate
|
||||||
|
|
||||||
|
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||||
|
tristate
|
||||||
|
|
||||||
|
config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
||||||
|
tristate
|
||||||
|
|
||||||
config CRYPTO_DEV_FSL_CAAM
|
config CRYPTO_DEV_FSL_CAAM
|
||||||
tristate "Freescale CAAM-Multicore platform driver backend"
|
tristate "Freescale CAAM-Multicore platform driver backend"
|
||||||
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
|
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
|
||||||
@ -25,7 +31,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
|
|||||||
Selecting this will enable printing of various debug
|
Selecting this will enable printing of various debug
|
||||||
information in the CAAM driver.
|
information in the CAAM driver.
|
||||||
|
|
||||||
config CRYPTO_DEV_FSL_CAAM_JR
|
menuconfig CRYPTO_DEV_FSL_CAAM_JR
|
||||||
tristate "Freescale CAAM Job Ring driver backend"
|
tristate "Freescale CAAM Job Ring driver backend"
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
@ -86,8 +92,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
|
|||||||
threshold. Range is 1-65535.
|
threshold. Range is 1-65535.
|
||||||
|
|
||||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||||
tristate "Register algorithm implementations with the Crypto API"
|
bool "Register algorithm implementations with the Crypto API"
|
||||||
default y
|
default y
|
||||||
|
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||||
select CRYPTO_AEAD
|
select CRYPTO_AEAD
|
||||||
select CRYPTO_AUTHENC
|
select CRYPTO_AUTHENC
|
||||||
select CRYPTO_BLKCIPHER
|
select CRYPTO_BLKCIPHER
|
||||||
@ -97,13 +104,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
|||||||
scatterlist crypto API (such as the linux native IPSec
|
scatterlist crypto API (such as the linux native IPSec
|
||||||
stack) to the SEC4 via job ring.
|
stack) to the SEC4 via job ring.
|
||||||
|
|
||||||
To compile this as a module, choose M here: the module
|
|
||||||
will be called caamalg.
|
|
||||||
|
|
||||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
|
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
|
||||||
tristate "Queue Interface as Crypto API backend"
|
bool "Queue Interface as Crypto API backend"
|
||||||
depends on FSL_DPAA && NET
|
depends on FSL_DPAA && NET
|
||||||
default y
|
default y
|
||||||
|
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||||
select CRYPTO_AUTHENC
|
select CRYPTO_AUTHENC
|
||||||
select CRYPTO_BLKCIPHER
|
select CRYPTO_BLKCIPHER
|
||||||
help
|
help
|
||||||
@ -114,33 +119,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
|
|||||||
assigned to the kernel should also be more than the number of
|
assigned to the kernel should also be more than the number of
|
||||||
job rings.
|
job rings.
|
||||||
|
|
||||||
To compile this as a module, choose M here: the module
|
|
||||||
will be called caamalg_qi.
|
|
||||||
|
|
||||||
config CRYPTO_DEV_FSL_CAAM_AHASH_API
|
config CRYPTO_DEV_FSL_CAAM_AHASH_API
|
||||||
tristate "Register hash algorithm implementations with Crypto API"
|
bool "Register hash algorithm implementations with Crypto API"
|
||||||
default y
|
default y
|
||||||
|
select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
||||||
select CRYPTO_HASH
|
select CRYPTO_HASH
|
||||||
help
|
help
|
||||||
Selecting this will offload ahash for users of the
|
Selecting this will offload ahash for users of the
|
||||||
scatterlist crypto API to the SEC4 via job ring.
|
scatterlist crypto API to the SEC4 via job ring.
|
||||||
|
|
||||||
To compile this as a module, choose M here: the module
|
|
||||||
will be called caamhash.
|
|
||||||
|
|
||||||
config CRYPTO_DEV_FSL_CAAM_PKC_API
|
config CRYPTO_DEV_FSL_CAAM_PKC_API
|
||||||
tristate "Register public key cryptography implementations with Crypto API"
|
bool "Register public key cryptography implementations with Crypto API"
|
||||||
default y
|
default y
|
||||||
select CRYPTO_RSA
|
select CRYPTO_RSA
|
||||||
help
|
help
|
||||||
Selecting this will allow SEC Public key support for RSA.
|
Selecting this will allow SEC Public key support for RSA.
|
||||||
Supported cryptographic primitives: encryption, decryption,
|
Supported cryptographic primitives: encryption, decryption,
|
||||||
signature and verification.
|
signature and verification.
|
||||||
To compile this as a module, choose M here: the module
|
|
||||||
will be called caam_pkc.
|
|
||||||
|
|
||||||
config CRYPTO_DEV_FSL_CAAM_RNG_API
|
config CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||||
tristate "Register caam device for hwrng API"
|
bool "Register caam device for hwrng API"
|
||||||
default y
|
default y
|
||||||
select CRYPTO_RNG
|
select CRYPTO_RNG
|
||||||
select HW_RANDOM
|
select HW_RANDOM
|
||||||
@ -148,9 +146,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
|
|||||||
Selecting this will register the SEC4 hardware rng to
|
Selecting this will register the SEC4 hardware rng to
|
||||||
the hw_random API for suppying the kernel entropy pool.
|
the hw_random API for suppying the kernel entropy pool.
|
||||||
|
|
||||||
To compile this as a module, choose M here: the module
|
|
||||||
will be called caamrng.
|
|
||||||
|
|
||||||
endif # CRYPTO_DEV_FSL_CAAM_JR
|
endif # CRYPTO_DEV_FSL_CAAM_JR
|
||||||
|
|
||||||
endif # CRYPTO_DEV_FSL_CAAM
|
endif # CRYPTO_DEV_FSL_CAAM
|
||||||
@ -160,6 +155,8 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
|
|||||||
depends on FSL_MC_DPIO
|
depends on FSL_MC_DPIO
|
||||||
depends on NETDEVICES
|
depends on NETDEVICES
|
||||||
select CRYPTO_DEV_FSL_CAAM_COMMON
|
select CRYPTO_DEV_FSL_CAAM_COMMON
|
||||||
|
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||||
|
select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
||||||
select CRYPTO_BLKCIPHER
|
select CRYPTO_BLKCIPHER
|
||||||
select CRYPTO_AUTHENC
|
select CRYPTO_AUTHENC
|
||||||
select CRYPTO_AEAD
|
select CRYPTO_AEAD
|
||||||
@ -171,12 +168,3 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
|
|||||||
|
|
||||||
To compile this as a module, choose M here: the module
|
To compile this as a module, choose M here: the module
|
||||||
will be called dpaa2_caam.
|
will be called dpaa2_caam.
|
||||||
|
|
||||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
|
||||||
def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
|
|
||||||
CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
|
|
||||||
CRYPTO_DEV_FSL_DPAA2_CAAM)
|
|
||||||
|
|
||||||
config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
|
||||||
def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
|
|
||||||
CRYPTO_DEV_FSL_DPAA2_CAAM)
|
|
||||||
|
@ -11,20 +11,20 @@ ccflags-y += -DVERSION=\"\"
|
|||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
|
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
|
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
|
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
|
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
|
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
|
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
|
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
|
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
|
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
|
|
||||||
|
|
||||||
caam-objs := ctrl.o
|
caam-y := ctrl.o
|
||||||
caam_jr-objs := jr.o key_gen.o
|
caam_jr-y := jr.o key_gen.o
|
||||||
caam_pkc-y := caampkc.o pkc_desc.o
|
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
|
||||||
|
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
|
||||||
|
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
|
||||||
|
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
|
||||||
|
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
|
||||||
|
|
||||||
|
caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
|
||||||
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
|
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
|
||||||
ccflags-y += -DCONFIG_CAAM_QI
|
ccflags-y += -DCONFIG_CAAM_QI
|
||||||
caam-objs += qi.o
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
|
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
|
||||||
|
@ -77,13 +77,6 @@
|
|||||||
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
|
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
|
||||||
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
|
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
/* for print_hex_dumps with line references */
|
|
||||||
#define debug(format, arg...) printk(format, arg)
|
|
||||||
#else
|
|
||||||
#define debug(format, arg...)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct caam_alg_entry {
|
struct caam_alg_entry {
|
||||||
int class1_alg_type;
|
int class1_alg_type;
|
||||||
int class2_alg_type;
|
int class2_alg_type;
|
||||||
@ -583,13 +576,11 @@ static int aead_setkey(struct crypto_aead *aead,
|
|||||||
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
|
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
|
||||||
goto badkey;
|
goto badkey;
|
||||||
|
|
||||||
#ifdef DEBUG
|
dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
|
||||||
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
|
|
||||||
keys.authkeylen + keys.enckeylen, keys.enckeylen,
|
keys.authkeylen + keys.enckeylen, keys.enckeylen,
|
||||||
keys.authkeylen);
|
keys.authkeylen);
|
||||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If DKP is supported, use it in the shared descriptor to generate
|
* If DKP is supported, use it in the shared descriptor to generate
|
||||||
@ -623,11 +614,10 @@ static int aead_setkey(struct crypto_aead *aead,
|
|||||||
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
|
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
|
||||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
|
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
|
||||||
keys.enckeylen, ctx->dir);
|
keys.enckeylen, ctx->dir);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
||||||
ctx->adata.keylen_pad + keys.enckeylen, 1);
|
ctx->adata.keylen_pad + keys.enckeylen, 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
skip_split_key:
|
skip_split_key:
|
||||||
ctx->cdata.keylen = keys.enckeylen;
|
ctx->cdata.keylen = keys.enckeylen;
|
||||||
@ -678,10 +668,8 @@ static int gcm_setkey(struct crypto_aead *aead,
|
|||||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
memcpy(ctx->key, key, keylen);
|
memcpy(ctx->key, key, keylen);
|
||||||
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
|
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
|
||||||
@ -699,10 +687,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
|
|||||||
if (keylen < 4)
|
if (keylen < 4)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
memcpy(ctx->key, key, keylen);
|
memcpy(ctx->key, key, keylen);
|
||||||
|
|
||||||
@ -725,10 +711,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
|
|||||||
if (keylen < 4)
|
if (keylen < 4)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
memcpy(ctx->key, key, keylen);
|
memcpy(ctx->key, key, keylen);
|
||||||
|
|
||||||
@ -757,10 +741,8 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
|||||||
OP_ALG_AAI_CTR_MOD128);
|
OP_ALG_AAI_CTR_MOD128);
|
||||||
const bool is_rfc3686 = alg->caam.rfc3686;
|
const bool is_rfc3686 = alg->caam.rfc3686;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
||||||
#endif
|
|
||||||
/*
|
/*
|
||||||
* AES-CTR needs to load IV in CONTEXT1 reg
|
* AES-CTR needs to load IV in CONTEXT1 reg
|
||||||
* at an offset of 128bits (16bytes)
|
* at an offset of 128bits (16bytes)
|
||||||
@ -916,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (iv_dma)
|
if (iv_dma)
|
||||||
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
|
dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
|
||||||
if (sec4_sg_bytes)
|
if (sec4_sg_bytes)
|
||||||
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
|
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
@ -949,9 +931,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
struct aead_request *req = context;
|
struct aead_request *req = context;
|
||||||
struct aead_edesc *edesc;
|
struct aead_edesc *edesc;
|
||||||
|
|
||||||
#ifdef DEBUG
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
|
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
|
||||||
|
|
||||||
@ -971,9 +951,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
struct aead_request *req = context;
|
struct aead_request *req = context;
|
||||||
struct aead_edesc *edesc;
|
struct aead_edesc *edesc;
|
||||||
|
|
||||||
#ifdef DEBUG
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
|
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
|
||||||
|
|
||||||
@ -1001,33 +979,32 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||||
|
|
||||||
#ifdef DEBUG
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
|
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
caam_jr_strstatus(jrdev, err);
|
caam_jr_strstatus(jrdev, err);
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
|
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
|
||||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
|
||||||
#endif
|
|
||||||
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
|
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
|
||||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
|
||||||
|
|
||||||
skcipher_unmap(jrdev, edesc, req);
|
skcipher_unmap(jrdev, edesc, req);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The crypto API expects us to set the IV (req->iv) to the last
|
* The crypto API expects us to set the IV (req->iv) to the last
|
||||||
* ciphertext block. This is used e.g. by the CTS mode.
|
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||||
|
* This is used e.g. by the CTS mode.
|
||||||
*/
|
*/
|
||||||
if (ivsize)
|
if (ivsize) {
|
||||||
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
|
memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
|
||||||
ivsize, ivsize, 0);
|
ivsize);
|
||||||
|
|
||||||
|
print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
|
||||||
|
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||||
|
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||||
|
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||||
|
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||||
|
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
|
||||||
@ -1039,26 +1016,35 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
{
|
{
|
||||||
struct skcipher_request *req = context;
|
struct skcipher_request *req = context;
|
||||||
struct skcipher_edesc *edesc;
|
struct skcipher_edesc *edesc;
|
||||||
#ifdef DEBUG
|
|
||||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||||
|
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
#endif
|
|
||||||
|
|
||||||
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
|
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
|
||||||
if (err)
|
if (err)
|
||||||
caam_jr_strstatus(jrdev, err);
|
caam_jr_strstatus(jrdev, err);
|
||||||
|
|
||||||
#ifdef DEBUG
|
skcipher_unmap(jrdev, edesc, req);
|
||||||
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
|
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
|
/*
|
||||||
#endif
|
* The crypto API expects us to set the IV (req->iv) to the last
|
||||||
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
|
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||||
|
* This is used e.g. by the CTS mode.
|
||||||
|
*/
|
||||||
|
if (ivsize) {
|
||||||
|
memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
|
||||||
|
ivsize);
|
||||||
|
|
||||||
|
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
||||||
|
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||||
|
ivsize, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||||
|
|
||||||
skcipher_unmap(jrdev, edesc, req);
|
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
|
||||||
skcipher_request_complete(req, err);
|
skcipher_request_complete(req, err);
|
||||||
@ -1106,6 +1092,7 @@ static void init_aead_job(struct aead_request *req,
|
|||||||
if (unlikely(req->src != req->dst)) {
|
if (unlikely(req->src != req->dst)) {
|
||||||
if (!edesc->mapped_dst_nents) {
|
if (!edesc->mapped_dst_nents) {
|
||||||
dst_dma = 0;
|
dst_dma = 0;
|
||||||
|
out_options = 0;
|
||||||
} else if (edesc->mapped_dst_nents == 1) {
|
} else if (edesc->mapped_dst_nents == 1) {
|
||||||
dst_dma = sg_dma_address(req->dst);
|
dst_dma = sg_dma_address(req->dst);
|
||||||
out_options = 0;
|
out_options = 0;
|
||||||
@ -1249,6 +1236,7 @@ static void init_skcipher_job(struct skcipher_request *req,
|
|||||||
{
|
{
|
||||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||||
|
struct device *jrdev = ctx->jrdev;
|
||||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||||
u32 *desc = edesc->hw_desc;
|
u32 *desc = edesc->hw_desc;
|
||||||
u32 *sh_desc;
|
u32 *sh_desc;
|
||||||
@ -1256,13 +1244,12 @@ static void init_skcipher_job(struct skcipher_request *req,
|
|||||||
dma_addr_t src_dma, dst_dma, ptr;
|
dma_addr_t src_dma, dst_dma, ptr;
|
||||||
int len, sec4_sg_index = 0;
|
int len, sec4_sg_index = 0;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
|
dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
|
||||||
pr_err("asked=%d, cryptlen%d\n",
|
|
||||||
(int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
|
(int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
|
||||||
#endif
|
|
||||||
caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
|
caam_dump_sg("src @" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
|
||||||
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
|
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
|
||||||
|
|
||||||
@ -1285,7 +1272,7 @@ static void init_skcipher_job(struct skcipher_request *req,
|
|||||||
if (likely(req->src == req->dst)) {
|
if (likely(req->src == req->dst)) {
|
||||||
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
|
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
|
||||||
out_options = in_options;
|
out_options = in_options;
|
||||||
} else if (edesc->mapped_dst_nents == 1) {
|
} else if (!ivsize && edesc->mapped_dst_nents == 1) {
|
||||||
dst_dma = sg_dma_address(req->dst);
|
dst_dma = sg_dma_address(req->dst);
|
||||||
} else {
|
} else {
|
||||||
dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
|
dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
|
||||||
@ -1293,7 +1280,7 @@ static void init_skcipher_job(struct skcipher_request *req,
|
|||||||
out_options = LDST_SGF;
|
out_options = LDST_SGF;
|
||||||
}
|
}
|
||||||
|
|
||||||
append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
|
append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1309,37 +1296,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||||
|
int src_len, dst_len = 0;
|
||||||
struct aead_edesc *edesc;
|
struct aead_edesc *edesc;
|
||||||
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
|
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
|
||||||
unsigned int authsize = ctx->authsize;
|
unsigned int authsize = ctx->authsize;
|
||||||
|
|
||||||
if (unlikely(req->dst != req->src)) {
|
if (unlikely(req->dst != req->src)) {
|
||||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
src_len = req->assoclen + req->cryptlen;
|
||||||
req->cryptlen);
|
dst_len = src_len + (encrypt ? authsize : (-authsize));
|
||||||
|
|
||||||
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
if (unlikely(src_nents < 0)) {
|
if (unlikely(src_nents < 0)) {
|
||||||
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
|
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
|
||||||
req->assoclen + req->cryptlen);
|
src_len);
|
||||||
return ERR_PTR(src_nents);
|
return ERR_PTR(src_nents);
|
||||||
}
|
}
|
||||||
|
|
||||||
dst_nents = sg_nents_for_len(req->dst, req->assoclen +
|
dst_nents = sg_nents_for_len(req->dst, dst_len);
|
||||||
req->cryptlen +
|
|
||||||
(encrypt ? authsize :
|
|
||||||
(-authsize)));
|
|
||||||
if (unlikely(dst_nents < 0)) {
|
if (unlikely(dst_nents < 0)) {
|
||||||
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
|
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
|
||||||
req->assoclen + req->cryptlen +
|
dst_len);
|
||||||
(encrypt ? authsize : (-authsize)));
|
|
||||||
return ERR_PTR(dst_nents);
|
return ERR_PTR(dst_nents);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
src_len = req->assoclen + req->cryptlen +
|
||||||
req->cryptlen +
|
(encrypt ? authsize : 0);
|
||||||
(encrypt ? authsize : 0));
|
|
||||||
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
if (unlikely(src_nents < 0)) {
|
if (unlikely(src_nents < 0)) {
|
||||||
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
|
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
|
||||||
req->assoclen + req->cryptlen +
|
src_len);
|
||||||
(encrypt ? authsize : 0));
|
|
||||||
return ERR_PTR(src_nents);
|
return ERR_PTR(src_nents);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1380,8 +1366,16 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||||
|
* the end of the table by allocating more S/G entries.
|
||||||
|
*/
|
||||||
sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
|
sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
|
||||||
sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
if (mapped_dst_nents > 1)
|
||||||
|
sec4_sg_len += pad_sg_nents(mapped_dst_nents);
|
||||||
|
else
|
||||||
|
sec4_sg_len = pad_sg_nents(sec4_sg_len);
|
||||||
|
|
||||||
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
|
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
|
||||||
|
|
||||||
/* allocate space for base edesc and hw desc commands, link tables */
|
/* allocate space for base edesc and hw desc commands, link tables */
|
||||||
@ -1403,12 +1397,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
|
|
||||||
sec4_sg_index = 0;
|
sec4_sg_index = 0;
|
||||||
if (mapped_src_nents > 1) {
|
if (mapped_src_nents > 1) {
|
||||||
sg_to_sec4_sg_last(req->src, mapped_src_nents,
|
sg_to_sec4_sg_last(req->src, src_len,
|
||||||
edesc->sec4_sg + sec4_sg_index, 0);
|
edesc->sec4_sg + sec4_sg_index, 0);
|
||||||
sec4_sg_index += mapped_src_nents;
|
sec4_sg_index += mapped_src_nents;
|
||||||
}
|
}
|
||||||
if (mapped_dst_nents > 1) {
|
if (mapped_dst_nents > 1) {
|
||||||
sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
|
sg_to_sec4_sg_last(req->dst, dst_len,
|
||||||
edesc->sec4_sg + sec4_sg_index, 0);
|
edesc->sec4_sg + sec4_sg_index, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1446,11 +1440,10 @@ static int gcm_encrypt(struct aead_request *req)
|
|||||||
|
|
||||||
/* Create and submit job descriptor */
|
/* Create and submit job descriptor */
|
||||||
init_gcm_job(req, edesc, all_contig, true);
|
init_gcm_job(req, edesc, all_contig, true);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||||
desc_bytes(edesc->hw_desc), 1);
|
desc_bytes(edesc->hw_desc), 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
desc = edesc->hw_desc;
|
desc = edesc->hw_desc;
|
||||||
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
|
||||||
@ -1556,11 +1549,10 @@ static int aead_encrypt(struct aead_request *req)
|
|||||||
|
|
||||||
/* Create and submit job descriptor */
|
/* Create and submit job descriptor */
|
||||||
init_authenc_job(req, edesc, all_contig, true);
|
init_authenc_job(req, edesc, all_contig, true);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||||
desc_bytes(edesc->hw_desc), 1);
|
desc_bytes(edesc->hw_desc), 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
desc = edesc->hw_desc;
|
desc = edesc->hw_desc;
|
||||||
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
|
||||||
@ -1591,11 +1583,10 @@ static int gcm_decrypt(struct aead_request *req)
|
|||||||
|
|
||||||
/* Create and submit job descriptor*/
|
/* Create and submit job descriptor*/
|
||||||
init_gcm_job(req, edesc, all_contig, false);
|
init_gcm_job(req, edesc, all_contig, false);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||||
desc_bytes(edesc->hw_desc), 1);
|
desc_bytes(edesc->hw_desc), 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
desc = edesc->hw_desc;
|
desc = edesc->hw_desc;
|
||||||
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
|
||||||
@ -1627,7 +1618,7 @@ static int aead_decrypt(struct aead_request *req)
|
|||||||
u32 *desc;
|
u32 *desc;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
|
caam_dump_sg("dec src@" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
|
||||||
req->assoclen + req->cryptlen, 1);
|
req->assoclen + req->cryptlen, 1);
|
||||||
|
|
||||||
@ -1639,11 +1630,10 @@ static int aead_decrypt(struct aead_request *req)
|
|||||||
|
|
||||||
/* Create and submit job descriptor*/
|
/* Create and submit job descriptor*/
|
||||||
init_authenc_job(req, edesc, all_contig, false);
|
init_authenc_job(req, edesc, all_contig, false);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||||
desc_bytes(edesc->hw_desc), 1);
|
desc_bytes(edesc->hw_desc), 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
desc = edesc->hw_desc;
|
desc = edesc->hw_desc;
|
||||||
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
|
||||||
@ -1719,7 +1709,29 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||||||
else
|
else
|
||||||
sec4_sg_ents = mapped_src_nents + !!ivsize;
|
sec4_sg_ents = mapped_src_nents + !!ivsize;
|
||||||
dst_sg_idx = sec4_sg_ents;
|
dst_sg_idx = sec4_sg_ents;
|
||||||
sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
|
||||||
|
/*
|
||||||
|
* Input, output HW S/G tables: [IV, src][dst, IV]
|
||||||
|
* IV entries point to the same buffer
|
||||||
|
* If src == dst, S/G entries are reused (S/G tables overlap)
|
||||||
|
*
|
||||||
|
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||||
|
* the end of the table by allocating more S/G entries. Logic:
|
||||||
|
* if (output S/G)
|
||||||
|
* pad output S/G, if needed
|
||||||
|
* else if (input S/G) ...
|
||||||
|
* pad input S/G, if needed
|
||||||
|
*/
|
||||||
|
if (ivsize || mapped_dst_nents > 1) {
|
||||||
|
if (req->src == req->dst)
|
||||||
|
sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
|
||||||
|
else
|
||||||
|
sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
|
||||||
|
!!ivsize);
|
||||||
|
} else {
|
||||||
|
sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
|
||||||
|
}
|
||||||
|
|
||||||
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
|
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1744,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||||||
|
|
||||||
/* Make sure IV is located in a DMAable area */
|
/* Make sure IV is located in a DMAable area */
|
||||||
if (ivsize) {
|
if (ivsize) {
|
||||||
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
|
iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
|
||||||
memcpy(iv, req->iv, ivsize);
|
memcpy(iv, req->iv, ivsize);
|
||||||
|
|
||||||
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
|
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
|
||||||
if (dma_mapping_error(jrdev, iv_dma)) {
|
if (dma_mapping_error(jrdev, iv_dma)) {
|
||||||
dev_err(jrdev, "unable to map IV\n");
|
dev_err(jrdev, "unable to map IV\n");
|
||||||
caam_unmap(jrdev, req->src, req->dst, src_nents,
|
caam_unmap(jrdev, req->src, req->dst, src_nents,
|
||||||
@ -1759,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||||||
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
|
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
|
||||||
}
|
}
|
||||||
if (dst_sg_idx)
|
if (dst_sg_idx)
|
||||||
sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
|
sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
|
||||||
!!ivsize, 0);
|
!!ivsize, 0);
|
||||||
|
|
||||||
if (mapped_dst_nents > 1) {
|
if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
|
||||||
sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
|
sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
|
||||||
edesc->sec4_sg + dst_sg_idx, 0);
|
dst_sg_idx, 0);
|
||||||
}
|
|
||||||
|
if (ivsize)
|
||||||
|
dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
|
||||||
|
mapped_dst_nents, iv_dma, ivsize, 0);
|
||||||
|
|
||||||
|
if (ivsize || mapped_dst_nents > 1)
|
||||||
|
sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
|
||||||
|
mapped_dst_nents);
|
||||||
|
|
||||||
if (sec4_sg_bytes) {
|
if (sec4_sg_bytes) {
|
||||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||||
@ -1782,11 +1801,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||||||
|
|
||||||
edesc->iv_dma = iv_dma;
|
edesc->iv_dma = iv_dma;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
|
sec4_sg_bytes, 1);
|
||||||
sec4_sg_bytes, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return edesc;
|
return edesc;
|
||||||
}
|
}
|
||||||
@ -1807,11 +1824,11 @@ static int skcipher_encrypt(struct skcipher_request *req)
|
|||||||
|
|
||||||
/* Create and submit job descriptor*/
|
/* Create and submit job descriptor*/
|
||||||
init_skcipher_job(req, edesc, true);
|
init_skcipher_job(req, edesc, true);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
|
print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||||
desc_bytes(edesc->hw_desc), 1);
|
desc_bytes(edesc->hw_desc), 1);
|
||||||
#endif
|
|
||||||
desc = edesc->hw_desc;
|
desc = edesc->hw_desc;
|
||||||
ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
|
||||||
|
|
||||||
@ -1830,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
|
|||||||
struct skcipher_edesc *edesc;
|
struct skcipher_edesc *edesc;
|
||||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
u32 *desc;
|
u32 *desc;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -1840,22 +1856,13 @@ static int skcipher_decrypt(struct skcipher_request *req)
|
|||||||
if (IS_ERR(edesc))
|
if (IS_ERR(edesc))
|
||||||
return PTR_ERR(edesc);
|
return PTR_ERR(edesc);
|
||||||
|
|
||||||
/*
|
|
||||||
* The crypto API expects us to set the IV (req->iv) to the last
|
|
||||||
* ciphertext block.
|
|
||||||
*/
|
|
||||||
if (ivsize)
|
|
||||||
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
|
|
||||||
ivsize, ivsize, 0);
|
|
||||||
|
|
||||||
/* Create and submit job descriptor*/
|
/* Create and submit job descriptor*/
|
||||||
init_skcipher_job(req, edesc, false);
|
init_skcipher_job(req, edesc, false);
|
||||||
desc = edesc->hw_desc;
|
desc = edesc->hw_desc;
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
|
print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||||
desc_bytes(edesc->hw_desc), 1);
|
desc_bytes(edesc->hw_desc), 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
@ -3444,7 +3451,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
|
|||||||
caam_exit_common(crypto_aead_ctx(tfm));
|
caam_exit_common(crypto_aead_ctx(tfm));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit caam_algapi_exit(void)
|
void caam_algapi_exit(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -3489,43 +3496,15 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
|
|||||||
alg->exit = caam_aead_exit;
|
alg->exit = caam_aead_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init caam_algapi_init(void)
|
int caam_algapi_init(struct device *ctrldev)
|
||||||
{
|
{
|
||||||
struct device_node *dev_node;
|
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||||
struct platform_device *pdev;
|
|
||||||
struct caam_drv_private *priv;
|
|
||||||
int i = 0, err = 0;
|
int i = 0, err = 0;
|
||||||
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
|
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
|
||||||
u32 arc4_inst;
|
u32 arc4_inst;
|
||||||
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
||||||
bool registered = false, gcm_support;
|
bool registered = false, gcm_support;
|
||||||
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
|
||||||
if (!dev_node) {
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
|
||||||
if (!dev_node)
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
pdev = of_find_device_by_node(dev_node);
|
|
||||||
if (!pdev) {
|
|
||||||
of_node_put(dev_node);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
priv = dev_get_drvdata(&pdev->dev);
|
|
||||||
of_node_put(dev_node);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If priv is NULL, it's probably because the caam driver wasn't
|
|
||||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
|
||||||
*/
|
|
||||||
if (!priv) {
|
|
||||||
err = -ENODEV;
|
|
||||||
goto out_put_dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register crypto algorithms the device supports.
|
* Register crypto algorithms the device supports.
|
||||||
* First, detect presence and attributes of DES, AES, and MD blocks.
|
* First, detect presence and attributes of DES, AES, and MD blocks.
|
||||||
@ -3668,14 +3647,5 @@ static int __init caam_algapi_init(void)
|
|||||||
if (registered)
|
if (registered)
|
||||||
pr_info("caam algorithms registered in /proc/crypto\n");
|
pr_info("caam algorithms registered in /proc/crypto\n");
|
||||||
|
|
||||||
out_put_dev:
|
|
||||||
put_device(&pdev->dev);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(caam_algapi_init);
|
|
||||||
module_exit(caam_algapi_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_DESCRIPTION("FSL CAAM support for crypto API");
|
|
||||||
MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
|
|
||||||
|
@ -33,12 +33,11 @@ static inline void append_dec_op1(u32 *desc, u32 type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
|
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
|
||||||
append_operation(desc, type | OP_ALG_AS_INITFINAL |
|
append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT);
|
||||||
OP_ALG_DECRYPT);
|
|
||||||
uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
|
uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
|
||||||
set_jump_tgt_here(desc, jump_cmd);
|
set_jump_tgt_here(desc, jump_cmd);
|
||||||
append_operation(desc, type | OP_ALG_AS_INITFINAL |
|
append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT |
|
||||||
OP_ALG_DECRYPT | OP_ALG_AAI_DK);
|
OP_ALG_AAI_DK);
|
||||||
set_jump_tgt_here(desc, uncond_jump_cmd);
|
set_jump_tgt_here(desc, uncond_jump_cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,11 +114,9 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
|
|||||||
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
|
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
|
||||||
LDST_SRCDST_BYTE_CONTEXT);
|
LDST_SRCDST_BYTE_CONTEXT);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR,
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
"aead null enc shdesc@" __stringify(__LINE__)": ",
|
1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
|
EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
|
||||||
|
|
||||||
@ -204,11 +201,9 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
|
|||||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
|
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
|
||||||
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
|
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR,
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
"aead null dec shdesc@" __stringify(__LINE__)": ",
|
1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
|
EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
|
||||||
|
|
||||||
@ -358,10 +353,9 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
|
|||||||
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
|
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
|
||||||
LDST_SRCDST_BYTE_CONTEXT);
|
LDST_SRCDST_BYTE_CONTEXT);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
|
EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
|
||||||
|
|
||||||
@ -475,10 +469,9 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
|
|||||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
|
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
|
||||||
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
|
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
|
EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
|
||||||
|
|
||||||
@ -613,11 +606,9 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
|
|||||||
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
|
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
|
||||||
LDST_SRCDST_BYTE_CONTEXT);
|
LDST_SRCDST_BYTE_CONTEXT);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR,
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
"aead givenc shdesc@" __stringify(__LINE__)": ",
|
1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
|
EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
|
||||||
|
|
||||||
@ -742,10 +733,9 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
|
|||||||
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
||||||
LDST_SRCDST_BYTE_CONTEXT);
|
LDST_SRCDST_BYTE_CONTEXT);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
|
EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
|
||||||
|
|
||||||
@ -838,10 +828,9 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
|
|||||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
|
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
|
||||||
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
|
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
|
EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
|
||||||
|
|
||||||
@ -933,11 +922,9 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
|
|||||||
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
||||||
LDST_SRCDST_BYTE_CONTEXT);
|
LDST_SRCDST_BYTE_CONTEXT);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR,
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
"rfc4106 enc shdesc@" __stringify(__LINE__)": ",
|
1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
|
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
|
||||||
|
|
||||||
@ -1030,11 +1017,9 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
|
|||||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
|
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
|
||||||
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
|
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR,
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
"rfc4106 dec shdesc@" __stringify(__LINE__)": ",
|
1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
|
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
|
||||||
|
|
||||||
@ -1115,11 +1100,9 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
|
|||||||
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
||||||
LDST_SRCDST_BYTE_CONTEXT);
|
LDST_SRCDST_BYTE_CONTEXT);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR,
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
"rfc4543 enc shdesc@" __stringify(__LINE__)": ",
|
1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
|
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
|
||||||
|
|
||||||
@ -1205,11 +1188,9 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
|
|||||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
|
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
|
||||||
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
|
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR,
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
"rfc4543 dec shdesc@" __stringify(__LINE__)": ",
|
1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
|
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
|
||||||
|
|
||||||
@ -1410,17 +1391,21 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
|
|||||||
LDST_OFFSET_SHIFT));
|
LDST_OFFSET_SHIFT));
|
||||||
|
|
||||||
/* Load operation */
|
/* Load operation */
|
||||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
|
||||||
OP_ALG_ENCRYPT);
|
OP_ALG_ENCRYPT);
|
||||||
|
|
||||||
/* Perform operation */
|
/* Perform operation */
|
||||||
skcipher_append_src_dst(desc);
|
skcipher_append_src_dst(desc);
|
||||||
|
|
||||||
#ifdef DEBUG
|
/* Store IV */
|
||||||
print_hex_dump(KERN_ERR,
|
if (ivsize)
|
||||||
"skcipher enc shdesc@" __stringify(__LINE__)": ",
|
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
LDST_CLASS_1_CCB | (ctx1_iv_off <<
|
||||||
#endif
|
LDST_OFFSET_SHIFT));
|
||||||
|
|
||||||
|
print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ",
|
||||||
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
|
1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
|
EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
|
||||||
|
|
||||||
@ -1479,7 +1464,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
|
|||||||
|
|
||||||
/* Choose operation */
|
/* Choose operation */
|
||||||
if (ctx1_iv_off)
|
if (ctx1_iv_off)
|
||||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
|
||||||
OP_ALG_DECRYPT);
|
OP_ALG_DECRYPT);
|
||||||
else
|
else
|
||||||
append_dec_op1(desc, cdata->algtype);
|
append_dec_op1(desc, cdata->algtype);
|
||||||
@ -1487,11 +1472,15 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
|
|||||||
/* Perform operation */
|
/* Perform operation */
|
||||||
skcipher_append_src_dst(desc);
|
skcipher_append_src_dst(desc);
|
||||||
|
|
||||||
#ifdef DEBUG
|
/* Store IV */
|
||||||
print_hex_dump(KERN_ERR,
|
if (ivsize)
|
||||||
"skcipher dec shdesc@" __stringify(__LINE__)": ",
|
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
LDST_CLASS_1_CCB | (ctx1_iv_off <<
|
||||||
#endif
|
LDST_OFFSET_SHIFT));
|
||||||
|
|
||||||
|
print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ",
|
||||||
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
|
1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
|
EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
|
||||||
|
|
||||||
@ -1538,11 +1527,13 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
|
|||||||
/* Perform operation */
|
/* Perform operation */
|
||||||
skcipher_append_src_dst(desc);
|
skcipher_append_src_dst(desc);
|
||||||
|
|
||||||
#ifdef DEBUG
|
/* Store upper 8B of IV */
|
||||||
print_hex_dump(KERN_ERR,
|
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
|
||||||
"xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
|
(0x20 << LDST_OFFSET_SHIFT));
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
|
||||||
|
": ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||||
|
desc, desc_bytes(desc), 1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
|
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
|
||||||
|
|
||||||
@ -1588,11 +1579,13 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
|
|||||||
/* Perform operation */
|
/* Perform operation */
|
||||||
skcipher_append_src_dst(desc);
|
skcipher_append_src_dst(desc);
|
||||||
|
|
||||||
#ifdef DEBUG
|
/* Store upper 8B of IV */
|
||||||
print_hex_dump(KERN_ERR,
|
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
|
||||||
"xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
|
(0x20 << LDST_OFFSET_SHIFT));
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
|
||||||
|
": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||||
|
desc_bytes(desc), 1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
|
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
|
||||||
|
|
||||||
|
@ -44,9 +44,9 @@
|
|||||||
|
|
||||||
#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
|
#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
|
||||||
#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
|
#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
|
||||||
20 * CAAM_CMD_SZ)
|
21 * CAAM_CMD_SZ)
|
||||||
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
|
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
|
||||||
15 * CAAM_CMD_SZ)
|
16 * CAAM_CMD_SZ)
|
||||||
|
|
||||||
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
|
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
|
||||||
unsigned int icvsize, int era);
|
unsigned int icvsize, int era);
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
* Based on caamalg.c
|
* Based on caamalg.c
|
||||||
*
|
*
|
||||||
* Copyright 2013-2016 Freescale Semiconductor, Inc.
|
* Copyright 2013-2016 Freescale Semiconductor, Inc.
|
||||||
* Copyright 2016-2018 NXP
|
* Copyright 2016-2019 NXP
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
@ -214,13 +214,11 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
|
|||||||
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
|
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
|
||||||
goto badkey;
|
goto badkey;
|
||||||
|
|
||||||
#ifdef DEBUG
|
dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
|
||||||
dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
|
|
||||||
keys.authkeylen + keys.enckeylen, keys.enckeylen,
|
keys.authkeylen + keys.enckeylen, keys.enckeylen,
|
||||||
keys.authkeylen);
|
keys.authkeylen);
|
||||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If DKP is supported, use it in the shared descriptor to generate
|
* If DKP is supported, use it in the shared descriptor to generate
|
||||||
@ -237,7 +235,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
|
|||||||
memcpy(ctx->key, keys.authkey, keys.authkeylen);
|
memcpy(ctx->key, keys.authkey, keys.authkeylen);
|
||||||
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
|
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
|
||||||
keys.enckeylen);
|
keys.enckeylen);
|
||||||
dma_sync_single_for_device(jrdev, ctx->key_dma,
|
dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
|
||||||
ctx->adata.keylen_pad +
|
ctx->adata.keylen_pad +
|
||||||
keys.enckeylen, ctx->dir);
|
keys.enckeylen, ctx->dir);
|
||||||
goto skip_split_key;
|
goto skip_split_key;
|
||||||
@ -251,8 +249,9 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
|
|||||||
|
|
||||||
/* postpend encryption key to auth split key */
|
/* postpend encryption key to auth split key */
|
||||||
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
|
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
|
||||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
|
dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
|
||||||
keys.enckeylen, ctx->dir);
|
ctx->adata.keylen_pad + keys.enckeylen,
|
||||||
|
ctx->dir);
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
|
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
||||||
@ -386,13 +385,12 @@ static int gcm_setkey(struct crypto_aead *aead,
|
|||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
memcpy(ctx->key, key, keylen);
|
memcpy(ctx->key, key, keylen);
|
||||||
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
|
dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen,
|
||||||
|
ctx->dir);
|
||||||
ctx->cdata.keylen = keylen;
|
ctx->cdata.keylen = keylen;
|
||||||
|
|
||||||
ret = gcm_set_sh_desc(aead);
|
ret = gcm_set_sh_desc(aead);
|
||||||
@ -485,10 +483,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
|
|||||||
if (keylen < 4)
|
if (keylen < 4)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
memcpy(ctx->key, key, keylen);
|
memcpy(ctx->key, key, keylen);
|
||||||
/*
|
/*
|
||||||
@ -496,8 +492,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
|
|||||||
* in the nonce. Update the AES key length.
|
* in the nonce. Update the AES key length.
|
||||||
*/
|
*/
|
||||||
ctx->cdata.keylen = keylen - 4;
|
ctx->cdata.keylen = keylen - 4;
|
||||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
|
dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
|
||||||
ctx->dir);
|
ctx->cdata.keylen, ctx->dir);
|
||||||
|
|
||||||
ret = rfc4106_set_sh_desc(aead);
|
ret = rfc4106_set_sh_desc(aead);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -589,10 +585,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
|
|||||||
if (keylen < 4)
|
if (keylen < 4)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
memcpy(ctx->key, key, keylen);
|
memcpy(ctx->key, key, keylen);
|
||||||
/*
|
/*
|
||||||
@ -600,8 +594,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
|
|||||||
* in the nonce. Update the AES key length.
|
* in the nonce. Update the AES key length.
|
||||||
*/
|
*/
|
||||||
ctx->cdata.keylen = keylen - 4;
|
ctx->cdata.keylen = keylen - 4;
|
||||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
|
dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
|
||||||
ctx->dir);
|
ctx->cdata.keylen, ctx->dir);
|
||||||
|
|
||||||
ret = rfc4543_set_sh_desc(aead);
|
ret = rfc4543_set_sh_desc(aead);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -644,10 +638,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
|||||||
const bool is_rfc3686 = alg->caam.rfc3686;
|
const bool is_rfc3686 = alg->caam.rfc3686;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
|
||||||
#endif
|
|
||||||
/*
|
/*
|
||||||
* AES-CTR needs to load IV in CONTEXT1 reg
|
* AES-CTR needs to load IV in CONTEXT1 reg
|
||||||
* at an offset of 128bits (16bytes)
|
* at an offset of 128bits (16bytes)
|
||||||
@ -838,7 +831,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
|
|||||||
static void caam_unmap(struct device *dev, struct scatterlist *src,
|
static void caam_unmap(struct device *dev, struct scatterlist *src,
|
||||||
struct scatterlist *dst, int src_nents,
|
struct scatterlist *dst, int src_nents,
|
||||||
int dst_nents, dma_addr_t iv_dma, int ivsize,
|
int dst_nents, dma_addr_t iv_dma, int ivsize,
|
||||||
dma_addr_t qm_sg_dma, int qm_sg_bytes)
|
enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
|
||||||
|
int qm_sg_bytes)
|
||||||
{
|
{
|
||||||
if (dst != src) {
|
if (dst != src) {
|
||||||
if (src_nents)
|
if (src_nents)
|
||||||
@ -850,7 +844,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (iv_dma)
|
if (iv_dma)
|
||||||
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
|
dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
|
||||||
if (qm_sg_bytes)
|
if (qm_sg_bytes)
|
||||||
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
|
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
@ -863,7 +857,8 @@ static void aead_unmap(struct device *dev,
|
|||||||
int ivsize = crypto_aead_ivsize(aead);
|
int ivsize = crypto_aead_ivsize(aead);
|
||||||
|
|
||||||
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
||||||
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
|
edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
|
||||||
|
edesc->qm_sg_bytes);
|
||||||
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -874,7 +869,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
|
|||||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||||
|
|
||||||
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
||||||
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
|
edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
|
||||||
|
edesc->qm_sg_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aead_done(struct caam_drv_req *drv_req, u32 status)
|
static void aead_done(struct caam_drv_req *drv_req, u32 status)
|
||||||
@ -924,6 +920,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||||
|
int src_len, dst_len = 0;
|
||||||
struct aead_edesc *edesc;
|
struct aead_edesc *edesc;
|
||||||
dma_addr_t qm_sg_dma, iv_dma = 0;
|
dma_addr_t qm_sg_dma, iv_dma = 0;
|
||||||
int ivsize = 0;
|
int ivsize = 0;
|
||||||
@ -945,13 +942,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (likely(req->src == req->dst)) {
|
if (likely(req->src == req->dst)) {
|
||||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
src_len = req->assoclen + req->cryptlen +
|
||||||
req->cryptlen +
|
(encrypt ? authsize : 0);
|
||||||
(encrypt ? authsize : 0));
|
|
||||||
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
if (unlikely(src_nents < 0)) {
|
if (unlikely(src_nents < 0)) {
|
||||||
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
|
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
|
||||||
req->assoclen + req->cryptlen +
|
src_len);
|
||||||
(encrypt ? authsize : 0));
|
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(src_nents);
|
return ERR_PTR(src_nents);
|
||||||
}
|
}
|
||||||
@ -964,23 +961,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
src_len = req->assoclen + req->cryptlen;
|
||||||
req->cryptlen);
|
dst_len = src_len + (encrypt ? authsize : (-authsize));
|
||||||
|
|
||||||
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
if (unlikely(src_nents < 0)) {
|
if (unlikely(src_nents < 0)) {
|
||||||
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
|
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
|
||||||
req->assoclen + req->cryptlen);
|
src_len);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(src_nents);
|
return ERR_PTR(src_nents);
|
||||||
}
|
}
|
||||||
|
|
||||||
dst_nents = sg_nents_for_len(req->dst, req->assoclen +
|
dst_nents = sg_nents_for_len(req->dst, dst_len);
|
||||||
req->cryptlen +
|
|
||||||
(encrypt ? authsize :
|
|
||||||
(-authsize)));
|
|
||||||
if (unlikely(dst_nents < 0)) {
|
if (unlikely(dst_nents < 0)) {
|
||||||
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
|
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
|
||||||
req->assoclen + req->cryptlen +
|
dst_len);
|
||||||
(encrypt ? authsize : (-authsize)));
|
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(dst_nents);
|
return ERR_PTR(dst_nents);
|
||||||
}
|
}
|
||||||
@ -1019,9 +1014,24 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
/*
|
/*
|
||||||
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
|
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
|
||||||
* Input is not contiguous.
|
* Input is not contiguous.
|
||||||
|
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||||
|
* the end of the table by allocating more S/G entries. Logic:
|
||||||
|
* if (src != dst && output S/G)
|
||||||
|
* pad output S/G, if needed
|
||||||
|
* else if (src == dst && S/G)
|
||||||
|
* overlapping S/Gs; pad one of them
|
||||||
|
* else if (input S/G) ...
|
||||||
|
* pad input S/G, if needed
|
||||||
*/
|
*/
|
||||||
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
|
qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
|
||||||
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
|
if (mapped_dst_nents > 1)
|
||||||
|
qm_sg_ents += pad_sg_nents(mapped_dst_nents);
|
||||||
|
else if ((req->src == req->dst) && (mapped_src_nents > 1))
|
||||||
|
qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
|
||||||
|
1 + !!ivsize + pad_sg_nents(mapped_src_nents));
|
||||||
|
else
|
||||||
|
qm_sg_ents = pad_sg_nents(qm_sg_ents);
|
||||||
|
|
||||||
sg_table = &edesc->sgt[0];
|
sg_table = &edesc->sgt[0];
|
||||||
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
|
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
|
||||||
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
|
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
|
||||||
@ -1029,7 +1039,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
|
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
|
||||||
qm_sg_ents, ivsize);
|
qm_sg_ents, ivsize);
|
||||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||||
0, 0, 0);
|
0, DMA_NONE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -1044,7 +1054,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
if (dma_mapping_error(qidev, iv_dma)) {
|
if (dma_mapping_error(qidev, iv_dma)) {
|
||||||
dev_err(qidev, "unable to map IV\n");
|
dev_err(qidev, "unable to map IV\n");
|
||||||
caam_unmap(qidev, req->src, req->dst, src_nents,
|
caam_unmap(qidev, req->src, req->dst, src_nents,
|
||||||
dst_nents, 0, 0, 0, 0);
|
dst_nents, 0, 0, DMA_NONE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -1063,7 +1073,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
|
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
|
||||||
dev_err(qidev, "unable to map assoclen\n");
|
dev_err(qidev, "unable to map assoclen\n");
|
||||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||||
iv_dma, ivsize, 0, 0);
|
iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -1074,19 +1084,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
|
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
|
||||||
qm_sg_index++;
|
qm_sg_index++;
|
||||||
}
|
}
|
||||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
|
sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
|
||||||
qm_sg_index += mapped_src_nents;
|
qm_sg_index += mapped_src_nents;
|
||||||
|
|
||||||
if (mapped_dst_nents > 1)
|
if (mapped_dst_nents > 1)
|
||||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
|
||||||
qm_sg_index, 0);
|
|
||||||
|
|
||||||
qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
|
qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(qidev, qm_sg_dma)) {
|
if (dma_mapping_error(qidev, qm_sg_dma)) {
|
||||||
dev_err(qidev, "unable to map S/G table\n");
|
dev_err(qidev, "unable to map S/G table\n");
|
||||||
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
||||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||||
iv_dma, ivsize, 0, 0);
|
iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -1109,7 +1118,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
|
dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
|
||||||
(1 + !!ivsize) * sizeof(*sg_table),
|
(1 + !!ivsize) * sizeof(*sg_table),
|
||||||
out_len, 0);
|
out_len, 0);
|
||||||
} else if (mapped_dst_nents == 1) {
|
} else if (mapped_dst_nents <= 1) {
|
||||||
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
|
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
|
||||||
0);
|
0);
|
||||||
} else {
|
} else {
|
||||||
@ -1182,33 +1191,28 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
|
|||||||
struct device *qidev = caam_ctx->qidev;
|
struct device *qidev = caam_ctx->qidev;
|
||||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||||
|
|
||||||
#ifdef DEBUG
|
dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
|
||||||
dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
edesc = container_of(drv_req, typeof(*edesc), drv_req);
|
edesc = container_of(drv_req, typeof(*edesc), drv_req);
|
||||||
|
|
||||||
if (status)
|
if (status)
|
||||||
caam_jr_strstatus(qidev, status);
|
caam_jr_strstatus(qidev, status);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||||
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
|
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
skcipher_unmap(qidev, edesc, req);
|
skcipher_unmap(qidev, edesc, req);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The crypto API expects us to set the IV (req->iv) to the last
|
* The crypto API expects us to set the IV (req->iv) to the last
|
||||||
* ciphertext block. This is used e.g. by the CTS mode.
|
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||||
|
* This is used e.g. by the CTS mode.
|
||||||
*/
|
*/
|
||||||
if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
|
memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
|
||||||
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
|
|
||||||
ivsize, ivsize, 0);
|
|
||||||
|
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
skcipher_request_complete(req, status);
|
skcipher_request_complete(req, status);
|
||||||
@ -1276,14 +1280,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||||||
qm_sg_ents = 1 + mapped_src_nents;
|
qm_sg_ents = 1 + mapped_src_nents;
|
||||||
dst_sg_idx = qm_sg_ents;
|
dst_sg_idx = qm_sg_ents;
|
||||||
|
|
||||||
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
/*
|
||||||
|
* Input, output HW S/G tables: [IV, src][dst, IV]
|
||||||
|
* IV entries point to the same buffer
|
||||||
|
* If src == dst, S/G entries are reused (S/G tables overlap)
|
||||||
|
*
|
||||||
|
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||||
|
* the end of the table by allocating more S/G entries.
|
||||||
|
*/
|
||||||
|
if (req->src != req->dst)
|
||||||
|
qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
|
||||||
|
else
|
||||||
|
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
|
||||||
|
|
||||||
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
|
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
|
||||||
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
|
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
|
||||||
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
|
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
|
||||||
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
|
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
|
||||||
qm_sg_ents, ivsize);
|
qm_sg_ents, ivsize);
|
||||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||||
0, 0, 0);
|
0, DMA_NONE, 0, 0);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1292,7 +1308,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||||||
if (unlikely(!edesc)) {
|
if (unlikely(!edesc)) {
|
||||||
dev_err(qidev, "could not allocate extended descriptor\n");
|
dev_err(qidev, "could not allocate extended descriptor\n");
|
||||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||||
0, 0, 0);
|
0, DMA_NONE, 0, 0);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1301,11 +1317,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||||||
iv = (u8 *)(sg_table + qm_sg_ents);
|
iv = (u8 *)(sg_table + qm_sg_ents);
|
||||||
memcpy(iv, req->iv, ivsize);
|
memcpy(iv, req->iv, ivsize);
|
||||||
|
|
||||||
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
|
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
|
||||||
if (dma_mapping_error(qidev, iv_dma)) {
|
if (dma_mapping_error(qidev, iv_dma)) {
|
||||||
dev_err(qidev, "unable to map IV\n");
|
dev_err(qidev, "unable to map IV\n");
|
||||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||||
0, 0, 0);
|
0, DMA_NONE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -1319,18 +1335,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||||||
edesc->drv_req.drv_ctx = drv_ctx;
|
edesc->drv_req.drv_ctx = drv_ctx;
|
||||||
|
|
||||||
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
|
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
|
||||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
|
sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
|
||||||
|
|
||||||
if (mapped_dst_nents > 1)
|
if (req->src != req->dst)
|
||||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
|
||||||
dst_sg_idx, 0);
|
|
||||||
|
dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
|
||||||
|
ivsize, 0);
|
||||||
|
|
||||||
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
|
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
|
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
|
||||||
dev_err(qidev, "unable to map S/G table\n");
|
dev_err(qidev, "unable to map S/G table\n");
|
||||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||||
iv_dma, ivsize, 0, 0);
|
iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -1340,16 +1358,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||||||
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
|
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
|
||||||
ivsize + req->cryptlen, 0);
|
ivsize + req->cryptlen, 0);
|
||||||
|
|
||||||
if (req->src == req->dst) {
|
if (req->src == req->dst)
|
||||||
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
|
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
|
||||||
sizeof(*sg_table), req->cryptlen, 0);
|
sizeof(*sg_table), req->cryptlen + ivsize,
|
||||||
} else if (mapped_dst_nents > 1) {
|
0);
|
||||||
|
else
|
||||||
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
|
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
|
||||||
sizeof(*sg_table), req->cryptlen, 0);
|
sizeof(*sg_table), req->cryptlen + ivsize,
|
||||||
} else {
|
0);
|
||||||
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
|
|
||||||
req->cryptlen, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
return edesc;
|
return edesc;
|
||||||
}
|
}
|
||||||
@ -1359,7 +1375,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
|||||||
struct skcipher_edesc *edesc;
|
struct skcipher_edesc *edesc;
|
||||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (unlikely(caam_congested))
|
if (unlikely(caam_congested))
|
||||||
@ -1370,14 +1385,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
|||||||
if (IS_ERR(edesc))
|
if (IS_ERR(edesc))
|
||||||
return PTR_ERR(edesc);
|
return PTR_ERR(edesc);
|
||||||
|
|
||||||
/*
|
|
||||||
* The crypto API expects us to set the IV (req->iv) to the last
|
|
||||||
* ciphertext block.
|
|
||||||
*/
|
|
||||||
if (!encrypt)
|
|
||||||
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
|
|
||||||
ivsize, ivsize, 0);
|
|
||||||
|
|
||||||
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
|
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = -EINPROGRESS;
|
ret = -EINPROGRESS;
|
||||||
@ -2382,6 +2389,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
|
|||||||
bool uses_dkp)
|
bool uses_dkp)
|
||||||
{
|
{
|
||||||
struct caam_drv_private *priv;
|
struct caam_drv_private *priv;
|
||||||
|
struct device *dev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* distribute tfms across job rings to ensure in-order
|
* distribute tfms across job rings to ensure in-order
|
||||||
@ -2393,16 +2401,17 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
|
|||||||
return PTR_ERR(ctx->jrdev);
|
return PTR_ERR(ctx->jrdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
priv = dev_get_drvdata(ctx->jrdev->parent);
|
dev = ctx->jrdev->parent;
|
||||||
|
priv = dev_get_drvdata(dev);
|
||||||
if (priv->era >= 6 && uses_dkp)
|
if (priv->era >= 6 && uses_dkp)
|
||||||
ctx->dir = DMA_BIDIRECTIONAL;
|
ctx->dir = DMA_BIDIRECTIONAL;
|
||||||
else
|
else
|
||||||
ctx->dir = DMA_TO_DEVICE;
|
ctx->dir = DMA_TO_DEVICE;
|
||||||
|
|
||||||
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
|
ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
|
||||||
ctx->dir);
|
ctx->dir);
|
||||||
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
|
if (dma_mapping_error(dev, ctx->key_dma)) {
|
||||||
dev_err(ctx->jrdev, "unable to map key\n");
|
dev_err(dev, "unable to map key\n");
|
||||||
caam_jr_free(ctx->jrdev);
|
caam_jr_free(ctx->jrdev);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -2411,7 +2420,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
|
|||||||
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
|
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
|
||||||
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
|
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
|
||||||
|
|
||||||
ctx->qidev = priv->qidev;
|
ctx->qidev = dev;
|
||||||
|
|
||||||
spin_lock_init(&ctx->lock);
|
spin_lock_init(&ctx->lock);
|
||||||
ctx->drv_ctx[ENCRYPT] = NULL;
|
ctx->drv_ctx[ENCRYPT] = NULL;
|
||||||
@ -2445,7 +2454,8 @@ static void caam_exit_common(struct caam_ctx *ctx)
|
|||||||
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
|
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
|
||||||
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
|
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
|
||||||
|
|
||||||
dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
|
dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key),
|
||||||
|
ctx->dir);
|
||||||
|
|
||||||
caam_jr_free(ctx->jrdev);
|
caam_jr_free(ctx->jrdev);
|
||||||
}
|
}
|
||||||
@ -2460,7 +2470,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
|
|||||||
caam_exit_common(crypto_aead_ctx(tfm));
|
caam_exit_common(crypto_aead_ctx(tfm));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit caam_qi_algapi_exit(void)
|
void caam_qi_algapi_exit(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -2505,45 +2515,17 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
|
|||||||
alg->exit = caam_aead_exit;
|
alg->exit = caam_aead_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init caam_qi_algapi_init(void)
|
int caam_qi_algapi_init(struct device *ctrldev)
|
||||||
{
|
{
|
||||||
struct device_node *dev_node;
|
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||||
struct platform_device *pdev;
|
|
||||||
struct device *ctrldev;
|
|
||||||
struct caam_drv_private *priv;
|
|
||||||
int i = 0, err = 0;
|
int i = 0, err = 0;
|
||||||
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
|
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
|
||||||
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
||||||
bool registered = false;
|
bool registered = false;
|
||||||
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
|
||||||
if (!dev_node) {
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
|
||||||
if (!dev_node)
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
pdev = of_find_device_by_node(dev_node);
|
|
||||||
of_node_put(dev_node);
|
|
||||||
if (!pdev)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
ctrldev = &pdev->dev;
|
|
||||||
priv = dev_get_drvdata(ctrldev);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If priv is NULL, it's probably because the caam driver wasn't
|
|
||||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
|
||||||
*/
|
|
||||||
if (!priv || !priv->qi_present) {
|
|
||||||
err = -ENODEV;
|
|
||||||
goto out_put_dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (caam_dpaa2) {
|
if (caam_dpaa2) {
|
||||||
dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
|
dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
|
||||||
err = -ENODEV;
|
return -ENODEV;
|
||||||
goto out_put_dev;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2598,7 +2580,7 @@ static int __init caam_qi_algapi_init(void)
|
|||||||
|
|
||||||
err = crypto_register_skcipher(&t_alg->skcipher);
|
err = crypto_register_skcipher(&t_alg->skcipher);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_warn(priv->qidev, "%s alg registration failed\n",
|
dev_warn(ctrldev, "%s alg registration failed\n",
|
||||||
t_alg->skcipher.base.cra_driver_name);
|
t_alg->skcipher.base.cra_driver_name);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -2654,16 +2636,7 @@ static int __init caam_qi_algapi_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (registered)
|
if (registered)
|
||||||
dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
|
dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
|
||||||
|
|
||||||
out_put_dev:
|
|
||||||
put_device(ctrldev);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(caam_qi_algapi_init);
|
|
||||||
module_exit(caam_qi_algapi_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
|
|
||||||
MODULE_AUTHOR("Freescale Semiconductor");
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||||
/*
|
/*
|
||||||
* Copyright 2015-2016 Freescale Semiconductor Inc.
|
* Copyright 2015-2016 Freescale Semiconductor Inc.
|
||||||
* Copyright 2017-2018 NXP
|
* Copyright 2017-2019 NXP
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
|
|||||||
static void caam_unmap(struct device *dev, struct scatterlist *src,
|
static void caam_unmap(struct device *dev, struct scatterlist *src,
|
||||||
struct scatterlist *dst, int src_nents,
|
struct scatterlist *dst, int src_nents,
|
||||||
int dst_nents, dma_addr_t iv_dma, int ivsize,
|
int dst_nents, dma_addr_t iv_dma, int ivsize,
|
||||||
dma_addr_t qm_sg_dma, int qm_sg_bytes)
|
enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
|
||||||
|
int qm_sg_bytes)
|
||||||
{
|
{
|
||||||
if (dst != src) {
|
if (dst != src) {
|
||||||
if (src_nents)
|
if (src_nents)
|
||||||
@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (iv_dma)
|
if (iv_dma)
|
||||||
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
|
dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
|
||||||
|
|
||||||
if (qm_sg_bytes)
|
if (qm_sg_bytes)
|
||||||
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
|
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
|
||||||
@ -371,6 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||||
|
int src_len, dst_len = 0;
|
||||||
struct aead_edesc *edesc;
|
struct aead_edesc *edesc;
|
||||||
dma_addr_t qm_sg_dma, iv_dma = 0;
|
dma_addr_t qm_sg_dma, iv_dma = 0;
|
||||||
int ivsize = 0;
|
int ivsize = 0;
|
||||||
@ -387,23 +389,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(req->dst != req->src)) {
|
if (unlikely(req->dst != req->src)) {
|
||||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
src_len = req->assoclen + req->cryptlen;
|
||||||
req->cryptlen);
|
dst_len = src_len + (encrypt ? authsize : (-authsize));
|
||||||
|
|
||||||
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
if (unlikely(src_nents < 0)) {
|
if (unlikely(src_nents < 0)) {
|
||||||
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
|
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
|
||||||
req->assoclen + req->cryptlen);
|
src_len);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(src_nents);
|
return ERR_PTR(src_nents);
|
||||||
}
|
}
|
||||||
|
|
||||||
dst_nents = sg_nents_for_len(req->dst, req->assoclen +
|
dst_nents = sg_nents_for_len(req->dst, dst_len);
|
||||||
req->cryptlen +
|
|
||||||
(encrypt ? authsize :
|
|
||||||
(-authsize)));
|
|
||||||
if (unlikely(dst_nents < 0)) {
|
if (unlikely(dst_nents < 0)) {
|
||||||
dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
|
dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
|
||||||
req->assoclen + req->cryptlen +
|
dst_len);
|
||||||
(encrypt ? authsize : (-authsize)));
|
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(dst_nents);
|
return ERR_PTR(dst_nents);
|
||||||
}
|
}
|
||||||
@ -434,13 +434,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
mapped_dst_nents = 0;
|
mapped_dst_nents = 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
src_len = req->assoclen + req->cryptlen +
|
||||||
req->cryptlen +
|
(encrypt ? authsize : 0);
|
||||||
(encrypt ? authsize : 0));
|
|
||||||
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
if (unlikely(src_nents < 0)) {
|
if (unlikely(src_nents < 0)) {
|
||||||
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
|
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
|
||||||
req->assoclen + req->cryptlen +
|
src_len);
|
||||||
(encrypt ? authsize : 0));
|
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(src_nents);
|
return ERR_PTR(src_nents);
|
||||||
}
|
}
|
||||||
@ -460,9 +460,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
/*
|
/*
|
||||||
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
|
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
|
||||||
* Input is not contiguous.
|
* Input is not contiguous.
|
||||||
|
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||||
|
* the end of the table by allocating more S/G entries. Logic:
|
||||||
|
* if (src != dst && output S/G)
|
||||||
|
* pad output S/G, if needed
|
||||||
|
* else if (src == dst && S/G)
|
||||||
|
* overlapping S/Gs; pad one of them
|
||||||
|
* else if (input S/G) ...
|
||||||
|
* pad input S/G, if needed
|
||||||
*/
|
*/
|
||||||
qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
|
qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
|
||||||
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
|
if (mapped_dst_nents > 1)
|
||||||
|
qm_sg_nents += pad_sg_nents(mapped_dst_nents);
|
||||||
|
else if ((req->src == req->dst) && (mapped_src_nents > 1))
|
||||||
|
qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
|
||||||
|
1 + !!ivsize +
|
||||||
|
pad_sg_nents(mapped_src_nents));
|
||||||
|
else
|
||||||
|
qm_sg_nents = pad_sg_nents(qm_sg_nents);
|
||||||
|
|
||||||
sg_table = &edesc->sgt[0];
|
sg_table = &edesc->sgt[0];
|
||||||
qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
|
qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
|
||||||
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
|
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
|
||||||
@ -470,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
|
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
|
||||||
qm_sg_nents, ivsize);
|
qm_sg_nents, ivsize);
|
||||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||||
0, 0, 0);
|
0, DMA_NONE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -485,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
if (dma_mapping_error(dev, iv_dma)) {
|
if (dma_mapping_error(dev, iv_dma)) {
|
||||||
dev_err(dev, "unable to map IV\n");
|
dev_err(dev, "unable to map IV\n");
|
||||||
caam_unmap(dev, req->src, req->dst, src_nents,
|
caam_unmap(dev, req->src, req->dst, src_nents,
|
||||||
dst_nents, 0, 0, 0, 0);
|
dst_nents, 0, 0, DMA_NONE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -509,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
if (dma_mapping_error(dev, edesc->assoclen_dma)) {
|
if (dma_mapping_error(dev, edesc->assoclen_dma)) {
|
||||||
dev_err(dev, "unable to map assoclen\n");
|
dev_err(dev, "unable to map assoclen\n");
|
||||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
|
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
|
||||||
iv_dma, ivsize, 0, 0);
|
iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -520,19 +536,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
|
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
|
||||||
qm_sg_index++;
|
qm_sg_index++;
|
||||||
}
|
}
|
||||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
|
sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
|
||||||
qm_sg_index += mapped_src_nents;
|
qm_sg_index += mapped_src_nents;
|
||||||
|
|
||||||
if (mapped_dst_nents > 1)
|
if (mapped_dst_nents > 1)
|
||||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
|
||||||
qm_sg_index, 0);
|
|
||||||
|
|
||||||
qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
|
qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dev, qm_sg_dma)) {
|
if (dma_mapping_error(dev, qm_sg_dma)) {
|
||||||
dev_err(dev, "unable to map S/G table\n");
|
dev_err(dev, "unable to map S/G table\n");
|
||||||
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
||||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
|
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
|
||||||
iv_dma, ivsize, 0, 0);
|
iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -559,6 +574,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
dpaa2_fl_set_addr(out_fle, qm_sg_dma +
|
dpaa2_fl_set_addr(out_fle, qm_sg_dma +
|
||||||
(1 + !!ivsize) * sizeof(*sg_table));
|
(1 + !!ivsize) * sizeof(*sg_table));
|
||||||
}
|
}
|
||||||
|
} else if (!mapped_dst_nents) {
|
||||||
|
/*
|
||||||
|
* crypto engine requires the output entry to be present when
|
||||||
|
* "frame list" FD is used.
|
||||||
|
* Since engine does not support FMT=2'b11 (unused entry type),
|
||||||
|
* leaving out_fle zeroized is the best option.
|
||||||
|
*/
|
||||||
|
goto skip_out_fle;
|
||||||
} else if (mapped_dst_nents == 1) {
|
} else if (mapped_dst_nents == 1) {
|
||||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||||
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
|
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
|
||||||
@ -570,6 +593,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
|||||||
|
|
||||||
dpaa2_fl_set_len(out_fle, out_len);
|
dpaa2_fl_set_len(out_fle, out_len);
|
||||||
|
|
||||||
|
skip_out_fle:
|
||||||
return edesc;
|
return edesc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1077,14 +1101,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
|||||||
qm_sg_ents = 1 + mapped_src_nents;
|
qm_sg_ents = 1 + mapped_src_nents;
|
||||||
dst_sg_idx = qm_sg_ents;
|
dst_sg_idx = qm_sg_ents;
|
||||||
|
|
||||||
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
/*
|
||||||
|
* Input, output HW S/G tables: [IV, src][dst, IV]
|
||||||
|
* IV entries point to the same buffer
|
||||||
|
* If src == dst, S/G entries are reused (S/G tables overlap)
|
||||||
|
*
|
||||||
|
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||||
|
* the end of the table by allocating more S/G entries.
|
||||||
|
*/
|
||||||
|
if (req->src != req->dst)
|
||||||
|
qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
|
||||||
|
else
|
||||||
|
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
|
||||||
|
|
||||||
qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
|
qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
|
||||||
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
|
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
|
||||||
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
|
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
|
||||||
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
|
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
|
||||||
qm_sg_ents, ivsize);
|
qm_sg_ents, ivsize);
|
||||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||||
0, 0, 0);
|
0, DMA_NONE, 0, 0);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1093,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
|||||||
if (unlikely(!edesc)) {
|
if (unlikely(!edesc)) {
|
||||||
dev_err(dev, "could not allocate extended descriptor\n");
|
dev_err(dev, "could not allocate extended descriptor\n");
|
||||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||||
0, 0, 0);
|
0, DMA_NONE, 0, 0);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1102,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
|||||||
iv = (u8 *)(sg_table + qm_sg_ents);
|
iv = (u8 *)(sg_table + qm_sg_ents);
|
||||||
memcpy(iv, req->iv, ivsize);
|
memcpy(iv, req->iv, ivsize);
|
||||||
|
|
||||||
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
|
iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
|
||||||
if (dma_mapping_error(dev, iv_dma)) {
|
if (dma_mapping_error(dev, iv_dma)) {
|
||||||
dev_err(dev, "unable to map IV\n");
|
dev_err(dev, "unable to map IV\n");
|
||||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||||
0, 0, 0);
|
0, DMA_NONE, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -1117,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
|||||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||||
|
|
||||||
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
|
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
|
||||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
|
sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
|
||||||
|
|
||||||
if (mapped_dst_nents > 1)
|
if (req->src != req->dst)
|
||||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
|
||||||
dst_sg_idx, 0);
|
|
||||||
|
dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
|
||||||
|
ivsize, 0);
|
||||||
|
|
||||||
edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
|
edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
|
if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
|
||||||
dev_err(dev, "unable to map S/G table\n");
|
dev_err(dev, "unable to map S/G table\n");
|
||||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
|
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
|
||||||
iv_dma, ivsize, 0, 0);
|
iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -1136,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
|||||||
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
|
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
|
||||||
dpaa2_fl_set_final(in_fle, true);
|
dpaa2_fl_set_final(in_fle, true);
|
||||||
dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
|
dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
|
||||||
dpaa2_fl_set_len(out_fle, req->cryptlen);
|
dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
|
||||||
|
|
||||||
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
|
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
|
||||||
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
|
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
|
||||||
|
|
||||||
if (req->src == req->dst) {
|
dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
|
||||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
|
|
||||||
|
if (req->src == req->dst)
|
||||||
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
|
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
|
||||||
sizeof(*sg_table));
|
sizeof(*sg_table));
|
||||||
} else if (mapped_dst_nents > 1) {
|
else
|
||||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
|
|
||||||
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
|
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
|
||||||
sizeof(*sg_table));
|
sizeof(*sg_table));
|
||||||
} else {
|
|
||||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
|
||||||
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
|
|
||||||
}
|
|
||||||
|
|
||||||
return edesc;
|
return edesc;
|
||||||
}
|
}
|
||||||
@ -1164,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
|
|||||||
int ivsize = crypto_aead_ivsize(aead);
|
int ivsize = crypto_aead_ivsize(aead);
|
||||||
|
|
||||||
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
||||||
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
|
edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
|
||||||
|
edesc->qm_sg_bytes);
|
||||||
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1175,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
|
|||||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||||
|
|
||||||
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
||||||
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
|
edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
|
||||||
|
edesc->qm_sg_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aead_encrypt_done(void *cbk_ctx, u32 status)
|
static void aead_encrypt_done(void *cbk_ctx, u32 status)
|
||||||
@ -1324,7 +1360,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
|
|||||||
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||||
caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
|
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||||
|
|
||||||
@ -1332,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The crypto API expects us to set the IV (req->iv) to the last
|
* The crypto API expects us to set the IV (req->iv) to the last
|
||||||
* ciphertext block. This is used e.g. by the CTS mode.
|
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||||
|
* This is used e.g. by the CTS mode.
|
||||||
*/
|
*/
|
||||||
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
|
memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
|
||||||
ivsize, 0);
|
|
||||||
|
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
skcipher_request_complete(req, ecode);
|
skcipher_request_complete(req, ecode);
|
||||||
@ -1362,11 +1398,19 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
|
|||||||
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||||
caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
|
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||||
|
|
||||||
skcipher_unmap(ctx->dev, edesc, req);
|
skcipher_unmap(ctx->dev, edesc, req);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The crypto API expects us to set the IV (req->iv) to the last
|
||||||
|
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||||
|
* This is used e.g. by the CTS mode.
|
||||||
|
*/
|
||||||
|
memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
|
||||||
|
|
||||||
qi_cache_free(edesc);
|
qi_cache_free(edesc);
|
||||||
skcipher_request_complete(req, ecode);
|
skcipher_request_complete(req, ecode);
|
||||||
}
|
}
|
||||||
@ -1405,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
|
|||||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||||
struct caam_request *caam_req = skcipher_request_ctx(req);
|
struct caam_request *caam_req = skcipher_request_ctx(req);
|
||||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* allocate extended descriptor */
|
/* allocate extended descriptor */
|
||||||
@ -1413,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
|
|||||||
if (IS_ERR(edesc))
|
if (IS_ERR(edesc))
|
||||||
return PTR_ERR(edesc);
|
return PTR_ERR(edesc);
|
||||||
|
|
||||||
/*
|
|
||||||
* The crypto API expects us to set the IV (req->iv) to the last
|
|
||||||
* ciphertext block.
|
|
||||||
*/
|
|
||||||
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
|
|
||||||
ivsize, 0);
|
|
||||||
|
|
||||||
caam_req->flc = &ctx->flc[DECRYPT];
|
caam_req->flc = &ctx->flc[DECRYPT];
|
||||||
caam_req->flc_dma = ctx->flc_dma[DECRYPT];
|
caam_req->flc_dma = ctx->flc_dma[DECRYPT];
|
||||||
caam_req->cbk = skcipher_decrypt_done;
|
caam_req->cbk = skcipher_decrypt_done;
|
||||||
@ -3380,9 +3416,9 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||||||
|
|
||||||
if (to_hash) {
|
if (to_hash) {
|
||||||
struct dpaa2_sg_entry *sg_table;
|
struct dpaa2_sg_entry *sg_table;
|
||||||
|
int src_len = req->nbytes - *next_buflen;
|
||||||
|
|
||||||
src_nents = sg_nents_for_len(req->src,
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
req->nbytes - (*next_buflen));
|
|
||||||
if (src_nents < 0) {
|
if (src_nents < 0) {
|
||||||
dev_err(ctx->dev, "Invalid number of src SG.\n");
|
dev_err(ctx->dev, "Invalid number of src SG.\n");
|
||||||
return src_nents;
|
return src_nents;
|
||||||
@ -3409,7 +3445,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||||||
|
|
||||||
edesc->src_nents = src_nents;
|
edesc->src_nents = src_nents;
|
||||||
qm_sg_src_index = 1 + (*buflen ? 1 : 0);
|
qm_sg_src_index = 1 + (*buflen ? 1 : 0);
|
||||||
qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
|
qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
|
||||||
sizeof(*sg_table);
|
sizeof(*sg_table);
|
||||||
sg_table = &edesc->sgt[0];
|
sg_table = &edesc->sgt[0];
|
||||||
|
|
||||||
@ -3423,7 +3459,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
if (mapped_nents) {
|
if (mapped_nents) {
|
||||||
sg_to_qm_sg_last(req->src, mapped_nents,
|
sg_to_qm_sg_last(req->src, src_len,
|
||||||
sg_table + qm_sg_src_index, 0);
|
sg_table + qm_sg_src_index, 0);
|
||||||
if (*next_buflen)
|
if (*next_buflen)
|
||||||
scatterwalk_map_and_copy(next_buf, req->src,
|
scatterwalk_map_and_copy(next_buf, req->src,
|
||||||
@ -3494,7 +3530,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int buflen = *current_buflen(state);
|
int buflen = *current_buflen(state);
|
||||||
int qm_sg_bytes, qm_sg_src_index;
|
int qm_sg_bytes;
|
||||||
int digestsize = crypto_ahash_digestsize(ahash);
|
int digestsize = crypto_ahash_digestsize(ahash);
|
||||||
struct ahash_edesc *edesc;
|
struct ahash_edesc *edesc;
|
||||||
struct dpaa2_sg_entry *sg_table;
|
struct dpaa2_sg_entry *sg_table;
|
||||||
@ -3505,8 +3541,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||||||
if (!edesc)
|
if (!edesc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
qm_sg_src_index = 1 + (buflen ? 1 : 0);
|
qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
|
||||||
qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
|
|
||||||
sg_table = &edesc->sgt[0];
|
sg_table = &edesc->sgt[0];
|
||||||
|
|
||||||
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
||||||
@ -3518,7 +3553,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
|
dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
|
||||||
|
|
||||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
|
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
@ -3599,7 +3634,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|||||||
|
|
||||||
edesc->src_nents = src_nents;
|
edesc->src_nents = src_nents;
|
||||||
qm_sg_src_index = 1 + (buflen ? 1 : 0);
|
qm_sg_src_index = 1 + (buflen ? 1 : 0);
|
||||||
qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
|
qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
|
||||||
|
sizeof(*sg_table);
|
||||||
sg_table = &edesc->sgt[0];
|
sg_table = &edesc->sgt[0];
|
||||||
|
|
||||||
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
||||||
@ -3611,7 +3647,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
|
sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
|
||||||
|
|
||||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
|
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
@ -3696,8 +3732,8 @@ static int ahash_digest(struct ahash_request *req)
|
|||||||
int qm_sg_bytes;
|
int qm_sg_bytes;
|
||||||
struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
|
struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
|
||||||
|
|
||||||
qm_sg_bytes = mapped_nents * sizeof(*sg_table);
|
qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
|
||||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
|
sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
|
||||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
|
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
|
||||||
qm_sg_bytes, DMA_TO_DEVICE);
|
qm_sg_bytes, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
|
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
|
||||||
@ -3840,9 +3876,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||||||
|
|
||||||
if (to_hash) {
|
if (to_hash) {
|
||||||
struct dpaa2_sg_entry *sg_table;
|
struct dpaa2_sg_entry *sg_table;
|
||||||
|
int src_len = req->nbytes - *next_buflen;
|
||||||
|
|
||||||
src_nents = sg_nents_for_len(req->src,
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
req->nbytes - *next_buflen);
|
|
||||||
if (src_nents < 0) {
|
if (src_nents < 0) {
|
||||||
dev_err(ctx->dev, "Invalid number of src SG.\n");
|
dev_err(ctx->dev, "Invalid number of src SG.\n");
|
||||||
return src_nents;
|
return src_nents;
|
||||||
@ -3868,14 +3904,15 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
edesc->src_nents = src_nents;
|
edesc->src_nents = src_nents;
|
||||||
qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
|
qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
|
||||||
|
sizeof(*sg_table);
|
||||||
sg_table = &edesc->sgt[0];
|
sg_table = &edesc->sgt[0];
|
||||||
|
|
||||||
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
|
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
|
sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
|
||||||
|
|
||||||
if (*next_buflen)
|
if (*next_buflen)
|
||||||
scatterwalk_map_and_copy(next_buf, req->src,
|
scatterwalk_map_and_copy(next_buf, req->src,
|
||||||
@ -3987,14 +4024,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
edesc->src_nents = src_nents;
|
edesc->src_nents = src_nents;
|
||||||
qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
|
qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
|
||||||
sg_table = &edesc->sgt[0];
|
sg_table = &edesc->sgt[0];
|
||||||
|
|
||||||
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
|
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unmap;
|
goto unmap;
|
||||||
|
|
||||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
|
sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
|
||||||
|
|
||||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
|
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
@ -4064,9 +4101,9 @@ static int ahash_update_first(struct ahash_request *req)
|
|||||||
|
|
||||||
if (to_hash) {
|
if (to_hash) {
|
||||||
struct dpaa2_sg_entry *sg_table;
|
struct dpaa2_sg_entry *sg_table;
|
||||||
|
int src_len = req->nbytes - *next_buflen;
|
||||||
|
|
||||||
src_nents = sg_nents_for_len(req->src,
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
req->nbytes - (*next_buflen));
|
|
||||||
if (src_nents < 0) {
|
if (src_nents < 0) {
|
||||||
dev_err(ctx->dev, "Invalid number of src SG.\n");
|
dev_err(ctx->dev, "Invalid number of src SG.\n");
|
||||||
return src_nents;
|
return src_nents;
|
||||||
@ -4101,8 +4138,9 @@ static int ahash_update_first(struct ahash_request *req)
|
|||||||
if (mapped_nents > 1) {
|
if (mapped_nents > 1) {
|
||||||
int qm_sg_bytes;
|
int qm_sg_bytes;
|
||||||
|
|
||||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
|
sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
|
||||||
qm_sg_bytes = mapped_nents * sizeof(*sg_table);
|
qm_sg_bytes = pad_sg_nents(mapped_nents) *
|
||||||
|
sizeof(*sg_table);
|
||||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
|
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
|
||||||
qm_sg_bytes,
|
qm_sg_bytes,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
@ -82,14 +82,6 @@
|
|||||||
#define HASH_MSG_LEN 8
|
#define HASH_MSG_LEN 8
|
||||||
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
|
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
/* for print_hex_dumps with line references */
|
|
||||||
#define debug(format, arg...) printk(format, arg)
|
|
||||||
#else
|
|
||||||
#define debug(format, arg...)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
static struct list_head hash_list;
|
static struct list_head hash_list;
|
||||||
|
|
||||||
/* ahash per-session context */
|
/* ahash per-session context */
|
||||||
@ -243,11 +235,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|||||||
ctx->ctx_len, true, ctrlpriv->era);
|
ctx->ctx_len, true, ctrlpriv->era);
|
||||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
|
||||||
desc_bytes(desc), ctx->dir);
|
desc_bytes(desc), ctx->dir);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR,
|
print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
|
||||||
"ahash update shdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
/* ahash_update_first shared descriptor */
|
/* ahash_update_first shared descriptor */
|
||||||
desc = ctx->sh_desc_update_first;
|
desc = ctx->sh_desc_update_first;
|
||||||
@ -255,11 +246,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|||||||
ctx->ctx_len, false, ctrlpriv->era);
|
ctx->ctx_len, false, ctrlpriv->era);
|
||||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
||||||
desc_bytes(desc), ctx->dir);
|
desc_bytes(desc), ctx->dir);
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
|
||||||
print_hex_dump(KERN_ERR,
|
": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||||
"ahash update first shdesc@"__stringify(__LINE__)": ",
|
desc_bytes(desc), 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* ahash_final shared descriptor */
|
/* ahash_final shared descriptor */
|
||||||
desc = ctx->sh_desc_fin;
|
desc = ctx->sh_desc_fin;
|
||||||
@ -267,11 +256,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|||||||
ctx->ctx_len, true, ctrlpriv->era);
|
ctx->ctx_len, true, ctrlpriv->era);
|
||||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
|
||||||
desc_bytes(desc), ctx->dir);
|
desc_bytes(desc), ctx->dir);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||||
desc_bytes(desc), 1);
|
desc_bytes(desc), 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
/* ahash_digest shared descriptor */
|
/* ahash_digest shared descriptor */
|
||||||
desc = ctx->sh_desc_digest;
|
desc = ctx->sh_desc_digest;
|
||||||
@ -279,12 +267,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
|||||||
ctx->ctx_len, false, ctrlpriv->era);
|
ctx->ctx_len, false, ctrlpriv->era);
|
||||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
|
||||||
desc_bytes(desc), ctx->dir);
|
desc_bytes(desc), ctx->dir);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR,
|
print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
|
||||||
"ahash digest shdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
desc_bytes(desc), 1);
|
||||||
desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -328,9 +314,9 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
|
|||||||
ctx->ctx_len, ctx->key_dma);
|
ctx->ctx_len, ctx->key_dma);
|
||||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
||||||
desc_bytes(desc), ctx->dir);
|
desc_bytes(desc), ctx->dir);
|
||||||
print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
|
print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||||
1);
|
desc_bytes(desc), 1);
|
||||||
|
|
||||||
/* shared descriptor for ahash_digest */
|
/* shared descriptor for ahash_digest */
|
||||||
desc = ctx->sh_desc_digest;
|
desc = ctx->sh_desc_digest;
|
||||||
@ -377,8 +363,8 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
|
|||||||
ctx->ctx_len, 0);
|
ctx->ctx_len, 0);
|
||||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
||||||
desc_bytes(desc), ctx->dir);
|
desc_bytes(desc), ctx->dir);
|
||||||
print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ",
|
print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||||
desc_bytes(desc), 1);
|
desc_bytes(desc), 1);
|
||||||
|
|
||||||
/* shared descriptor for ahash_digest */
|
/* shared descriptor for ahash_digest */
|
||||||
@ -429,12 +415,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
|
|||||||
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
|
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
|
||||||
LDST_SRCDST_BYTE_CONTEXT);
|
LDST_SRCDST_BYTE_CONTEXT);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
result.err = 0;
|
result.err = 0;
|
||||||
init_completion(&result.completion);
|
init_completion(&result.completion);
|
||||||
@ -444,11 +429,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
|
|||||||
/* in progress */
|
/* in progress */
|
||||||
wait_for_completion(&result.completion);
|
wait_for_completion(&result.completion);
|
||||||
ret = result.err;
|
ret = result.err;
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR,
|
print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
|
||||||
"digested key@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, key,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1);
|
digestsize, 1);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
|
dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
@ -463,15 +447,14 @@ static int ahash_setkey(struct crypto_ahash *ahash,
|
|||||||
const u8 *key, unsigned int keylen)
|
const u8 *key, unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
|
struct device *jrdev = ctx->jrdev;
|
||||||
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
|
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
|
||||||
int digestsize = crypto_ahash_digestsize(ahash);
|
int digestsize = crypto_ahash_digestsize(ahash);
|
||||||
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
|
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
|
||||||
int ret;
|
int ret;
|
||||||
u8 *hashed_key = NULL;
|
u8 *hashed_key = NULL;
|
||||||
|
|
||||||
#ifdef DEBUG
|
dev_dbg(jrdev, "keylen %d\n", keylen);
|
||||||
printk(KERN_ERR "keylen %d\n", keylen);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (keylen > blocksize) {
|
if (keylen > blocksize) {
|
||||||
hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
|
hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
|
||||||
@ -600,11 +583,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||||
int digestsize = crypto_ahash_digestsize(ahash);
|
int digestsize = crypto_ahash_digestsize(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
#ifdef DEBUG
|
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
|
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
#endif
|
|
||||||
|
|
||||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||||
if (err)
|
if (err)
|
||||||
@ -614,11 +595,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
memcpy(req->result, state->caam_ctx, digestsize);
|
memcpy(req->result, state->caam_ctx, digestsize);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
ctx->ctx_len, 1);
|
||||||
ctx->ctx_len, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
req->base.complete(&req->base, err);
|
req->base.complete(&req->base, err);
|
||||||
}
|
}
|
||||||
@ -631,11 +610,9 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
#ifdef DEBUG
|
|
||||||
int digestsize = crypto_ahash_digestsize(ahash);
|
int digestsize = crypto_ahash_digestsize(ahash);
|
||||||
|
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
#endif
|
|
||||||
|
|
||||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||||
if (err)
|
if (err)
|
||||||
@ -645,15 +622,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
switch_buf(state);
|
switch_buf(state);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
ctx->ctx_len, 1);
|
||||||
ctx->ctx_len, 1);
|
|
||||||
if (req->result)
|
if (req->result)
|
||||||
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("result@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||||
digestsize, 1);
|
digestsize, 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
req->base.complete(&req->base, err);
|
req->base.complete(&req->base, err);
|
||||||
}
|
}
|
||||||
@ -666,11 +641,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||||
int digestsize = crypto_ahash_digestsize(ahash);
|
int digestsize = crypto_ahash_digestsize(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
#ifdef DEBUG
|
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
|
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
#endif
|
|
||||||
|
|
||||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||||
if (err)
|
if (err)
|
||||||
@ -680,11 +653,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
memcpy(req->result, state->caam_ctx, digestsize);
|
memcpy(req->result, state->caam_ctx, digestsize);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
ctx->ctx_len, 1);
|
||||||
ctx->ctx_len, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
req->base.complete(&req->base, err);
|
req->base.complete(&req->base, err);
|
||||||
}
|
}
|
||||||
@ -697,11 +668,9 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
#ifdef DEBUG
|
|
||||||
int digestsize = crypto_ahash_digestsize(ahash);
|
int digestsize = crypto_ahash_digestsize(ahash);
|
||||||
|
|
||||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
#endif
|
|
||||||
|
|
||||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||||
if (err)
|
if (err)
|
||||||
@ -711,15 +680,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
|||||||
switch_buf(state);
|
switch_buf(state);
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
ctx->ctx_len, 1);
|
||||||
ctx->ctx_len, 1);
|
|
||||||
if (req->result)
|
if (req->result)
|
||||||
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("result@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||||
digestsize, 1);
|
digestsize, 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
req->base.complete(&req->base, err);
|
req->base.complete(&req->base, err);
|
||||||
}
|
}
|
||||||
@ -759,9 +726,10 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
|
|||||||
|
|
||||||
if (nents > 1 || first_sg) {
|
if (nents > 1 || first_sg) {
|
||||||
struct sec4_sg_entry *sg = edesc->sec4_sg;
|
struct sec4_sg_entry *sg = edesc->sec4_sg;
|
||||||
unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
|
unsigned int sgsize = sizeof(*sg) *
|
||||||
|
pad_sg_nents(first_sg + nents);
|
||||||
|
|
||||||
sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
|
sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
|
||||||
|
|
||||||
src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
|
src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(ctx->jrdev, src_dma)) {
|
if (dma_mapping_error(ctx->jrdev, src_dma)) {
|
||||||
@ -819,8 +787,10 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (to_hash) {
|
if (to_hash) {
|
||||||
src_nents = sg_nents_for_len(req->src,
|
int pad_nents;
|
||||||
req->nbytes - (*next_buflen));
|
int src_len = req->nbytes - *next_buflen;
|
||||||
|
|
||||||
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
if (src_nents < 0) {
|
if (src_nents < 0) {
|
||||||
dev_err(jrdev, "Invalid number of src SG.\n");
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
||||||
return src_nents;
|
return src_nents;
|
||||||
@ -838,15 +808,14 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
|
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
|
||||||
sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
|
pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
|
||||||
sizeof(struct sec4_sg_entry);
|
sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* allocate space for base edesc and hw desc commands,
|
* allocate space for base edesc and hw desc commands,
|
||||||
* link tables
|
* link tables
|
||||||
*/
|
*/
|
||||||
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
|
edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
|
||||||
ctx->sh_desc_update,
|
|
||||||
ctx->sh_desc_update_dma, flags);
|
ctx->sh_desc_update_dma, flags);
|
||||||
if (!edesc) {
|
if (!edesc) {
|
||||||
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
||||||
@ -866,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
if (mapped_nents)
|
if (mapped_nents)
|
||||||
sg_to_sec4_sg_last(req->src, mapped_nents,
|
sg_to_sec4_sg_last(req->src, src_len,
|
||||||
edesc->sec4_sg + sec4_sg_src_index,
|
edesc->sec4_sg + sec4_sg_src_index,
|
||||||
0);
|
0);
|
||||||
else
|
else
|
||||||
@ -893,11 +862,9 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||||||
|
|
||||||
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
|
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
desc_bytes(desc), 1);
|
||||||
desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -910,13 +877,12 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||||||
*buflen = *next_buflen;
|
*buflen = *next_buflen;
|
||||||
*next_buflen = last_buflen;
|
*next_buflen = last_buflen;
|
||||||
}
|
}
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
||||||
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
||||||
*next_buflen, 1);
|
*next_buflen, 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
unmap_ctx:
|
unmap_ctx:
|
||||||
@ -935,18 +901,17 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int buflen = *current_buflen(state);
|
int buflen = *current_buflen(state);
|
||||||
u32 *desc;
|
u32 *desc;
|
||||||
int sec4_sg_bytes, sec4_sg_src_index;
|
int sec4_sg_bytes;
|
||||||
int digestsize = crypto_ahash_digestsize(ahash);
|
int digestsize = crypto_ahash_digestsize(ahash);
|
||||||
struct ahash_edesc *edesc;
|
struct ahash_edesc *edesc;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
|
||||||
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
|
sizeof(struct sec4_sg_entry);
|
||||||
|
|
||||||
/* allocate space for base edesc and hw desc commands, link tables */
|
/* allocate space for base edesc and hw desc commands, link tables */
|
||||||
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
|
edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
|
||||||
ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
|
ctx->sh_desc_fin_dma, flags);
|
||||||
flags);
|
|
||||||
if (!edesc)
|
if (!edesc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -963,7 +928,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
|
sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
|
||||||
|
|
||||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||||
@ -977,10 +942,9 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|||||||
LDST_SGF);
|
LDST_SGF);
|
||||||
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1058,10 +1022,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|||||||
|
|
||||||
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1135,10 +1098,9 @@ static int ahash_digest(struct ahash_request *req)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
@ -1190,10 +1152,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto unmap;
|
goto unmap;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
@ -1246,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (to_hash) {
|
if (to_hash) {
|
||||||
src_nents = sg_nents_for_len(req->src,
|
int pad_nents;
|
||||||
req->nbytes - *next_buflen);
|
int src_len = req->nbytes - *next_buflen;
|
||||||
|
|
||||||
|
src_nents = sg_nents_for_len(req->src, src_len);
|
||||||
if (src_nents < 0) {
|
if (src_nents < 0) {
|
||||||
dev_err(jrdev, "Invalid number of src SG.\n");
|
dev_err(jrdev, "Invalid number of src SG.\n");
|
||||||
return src_nents;
|
return src_nents;
|
||||||
@ -1264,14 +1227,14 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||||||
mapped_nents = 0;
|
mapped_nents = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
sec4_sg_bytes = (1 + mapped_nents) *
|
pad_nents = pad_sg_nents(1 + mapped_nents);
|
||||||
sizeof(struct sec4_sg_entry);
|
sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* allocate space for base edesc and hw desc commands,
|
* allocate space for base edesc and hw desc commands,
|
||||||
* link tables
|
* link tables
|
||||||
*/
|
*/
|
||||||
edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
|
edesc = ahash_edesc_alloc(ctx, pad_nents,
|
||||||
ctx->sh_desc_update_first,
|
ctx->sh_desc_update_first,
|
||||||
ctx->sh_desc_update_first_dma,
|
ctx->sh_desc_update_first_dma,
|
||||||
flags);
|
flags);
|
||||||
@ -1287,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
sg_to_sec4_sg_last(req->src, mapped_nents,
|
sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
|
||||||
edesc->sec4_sg + 1, 0);
|
|
||||||
|
|
||||||
if (*next_buflen) {
|
if (*next_buflen) {
|
||||||
scatterwalk_map_and_copy(next_buf, req->src,
|
scatterwalk_map_and_copy(next_buf, req->src,
|
||||||
@ -1313,11 +1275,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
desc_bytes(desc), 1);
|
||||||
desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1333,13 +1293,12 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
|||||||
*buflen = *next_buflen;
|
*buflen = *next_buflen;
|
||||||
*next_buflen = 0;
|
*next_buflen = 0;
|
||||||
}
|
}
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
||||||
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
|
||||||
*next_buflen, 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
unmap_ctx:
|
unmap_ctx:
|
||||||
@ -1414,10 +1373,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto unmap;
|
goto unmap;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
@ -1517,11 +1475,9 @@ static int ahash_update_first(struct ahash_request *req)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto unmap_ctx;
|
goto unmap_ctx;
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
desc_bytes(desc), 1);
|
||||||
desc_bytes(desc), 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1539,11 +1495,10 @@ static int ahash_update_first(struct ahash_request *req)
|
|||||||
req->nbytes, 0);
|
req->nbytes, 0);
|
||||||
switch_buf(state);
|
switch_buf(state);
|
||||||
}
|
}
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
|
||||||
*next_buflen, 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
unmap_ctx:
|
unmap_ctx:
|
||||||
@ -1930,7 +1885,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
|
|||||||
caam_jr_free(ctx->jrdev);
|
caam_jr_free(ctx->jrdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit caam_algapi_hash_exit(void)
|
void caam_algapi_hash_exit(void)
|
||||||
{
|
{
|
||||||
struct caam_hash_alg *t_alg, *n;
|
struct caam_hash_alg *t_alg, *n;
|
||||||
|
|
||||||
@ -1988,40 +1943,13 @@ caam_hash_alloc(struct caam_hash_template *template,
|
|||||||
return t_alg;
|
return t_alg;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init caam_algapi_hash_init(void)
|
int caam_algapi_hash_init(struct device *ctrldev)
|
||||||
{
|
{
|
||||||
struct device_node *dev_node;
|
|
||||||
struct platform_device *pdev;
|
|
||||||
int i = 0, err = 0;
|
int i = 0, err = 0;
|
||||||
struct caam_drv_private *priv;
|
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||||
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
||||||
u32 md_inst, md_vid;
|
u32 md_inst, md_vid;
|
||||||
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
|
||||||
if (!dev_node) {
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
|
||||||
if (!dev_node)
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
pdev = of_find_device_by_node(dev_node);
|
|
||||||
if (!pdev) {
|
|
||||||
of_node_put(dev_node);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
priv = dev_get_drvdata(&pdev->dev);
|
|
||||||
of_node_put(dev_node);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If priv is NULL, it's probably because the caam driver wasn't
|
|
||||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
|
||||||
*/
|
|
||||||
if (!priv) {
|
|
||||||
err = -ENODEV;
|
|
||||||
goto out_put_dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register crypto algorithms the device supports. First, identify
|
* Register crypto algorithms the device supports. First, identify
|
||||||
* presence and attributes of MD block.
|
* presence and attributes of MD block.
|
||||||
@ -2042,10 +1970,8 @@ static int __init caam_algapi_hash_init(void)
|
|||||||
* Skip registration of any hashing algorithms if MD block
|
* Skip registration of any hashing algorithms if MD block
|
||||||
* is not present.
|
* is not present.
|
||||||
*/
|
*/
|
||||||
if (!md_inst) {
|
if (!md_inst)
|
||||||
err = -ENODEV;
|
return -ENODEV;
|
||||||
goto out_put_dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Limit digest size based on LP256 */
|
/* Limit digest size based on LP256 */
|
||||||
if (md_vid == CHA_VER_VID_MD_LP256)
|
if (md_vid == CHA_VER_VID_MD_LP256)
|
||||||
@ -2102,14 +2028,5 @@ static int __init caam_algapi_hash_init(void)
|
|||||||
list_add_tail(&t_alg->entry, &hash_list);
|
list_add_tail(&t_alg->entry, &hash_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
out_put_dev:
|
|
||||||
put_device(&pdev->dev);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(caam_algapi_hash_init);
|
|
||||||
module_exit(caam_algapi_hash_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
|
|
||||||
MODULE_AUTHOR("Freescale Semiconductor - NMG");
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
* caam - Freescale FSL CAAM support for Public Key Cryptography
|
* caam - Freescale FSL CAAM support for Public Key Cryptography
|
||||||
*
|
*
|
||||||
* Copyright 2016 Freescale Semiconductor, Inc.
|
* Copyright 2016 Freescale Semiconductor, Inc.
|
||||||
* Copyright 2018 NXP
|
* Copyright 2018-2019 NXP
|
||||||
*
|
*
|
||||||
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
|
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
|
||||||
* all the desired key parameters, input and output pointers.
|
* all the desired key parameters, input and output pointers.
|
||||||
@ -24,12 +24,18 @@
|
|||||||
sizeof(struct rsa_priv_f2_pdb))
|
sizeof(struct rsa_priv_f2_pdb))
|
||||||
#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
|
#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
|
||||||
sizeof(struct rsa_priv_f3_pdb))
|
sizeof(struct rsa_priv_f3_pdb))
|
||||||
|
#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
|
||||||
|
|
||||||
|
/* buffer filled with zeros, used for padding */
|
||||||
|
static u8 *zero_buffer;
|
||||||
|
|
||||||
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
|
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
|
||||||
struct akcipher_request *req)
|
struct akcipher_request *req)
|
||||||
{
|
{
|
||||||
|
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||||
|
|
||||||
dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
|
dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
|
||||||
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
|
dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (edesc->sec4_sg_bytes)
|
if (edesc->sec4_sg_bytes)
|
||||||
dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
|
dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
|
||||||
@ -168,6 +174,13 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
|
|||||||
akcipher_request_complete(req, err);
|
akcipher_request_complete(req, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Count leading zeros, need it to strip, from a given scatterlist
|
||||||
|
*
|
||||||
|
* @sgl : scatterlist to count zeros from
|
||||||
|
* @nbytes: number of zeros, in bytes, to strip
|
||||||
|
* @flags : operation flags
|
||||||
|
*/
|
||||||
static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
|
static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
|
||||||
unsigned int nbytes,
|
unsigned int nbytes,
|
||||||
unsigned int flags)
|
unsigned int flags)
|
||||||
@ -187,7 +200,8 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
|
|||||||
lzeros = 0;
|
lzeros = 0;
|
||||||
len = 0;
|
len = 0;
|
||||||
while (nbytes > 0) {
|
while (nbytes > 0) {
|
||||||
while (len && !*buff) {
|
/* do not strip more than given bytes */
|
||||||
|
while (len && !*buff && lzeros < nbytes) {
|
||||||
lzeros++;
|
lzeros++;
|
||||||
len--;
|
len--;
|
||||||
buff++;
|
buff++;
|
||||||
@ -218,6 +232,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
|||||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
struct device *dev = ctx->dev;
|
struct device *dev = ctx->dev;
|
||||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||||
|
struct caam_rsa_key *key = &ctx->key;
|
||||||
struct rsa_edesc *edesc;
|
struct rsa_edesc *edesc;
|
||||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
@ -225,22 +240,45 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
|||||||
int sgc;
|
int sgc;
|
||||||
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
|
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
|
||||||
int src_nents, dst_nents;
|
int src_nents, dst_nents;
|
||||||
|
unsigned int diff_size = 0;
|
||||||
int lzeros;
|
int lzeros;
|
||||||
|
|
||||||
lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
|
if (req->src_len > key->n_sz) {
|
||||||
if (lzeros < 0)
|
/*
|
||||||
return ERR_PTR(lzeros);
|
* strip leading zeros and
|
||||||
|
* return the number of zeros to skip
|
||||||
|
*/
|
||||||
|
lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
|
||||||
|
key->n_sz, sg_flags);
|
||||||
|
if (lzeros < 0)
|
||||||
|
return ERR_PTR(lzeros);
|
||||||
|
|
||||||
req->src_len -= lzeros;
|
req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
|
||||||
req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
|
lzeros);
|
||||||
|
req_ctx->fixup_src_len = req->src_len - lzeros;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* input src is less then n key modulus,
|
||||||
|
* so there will be zero padding
|
||||||
|
*/
|
||||||
|
diff_size = key->n_sz - req->src_len;
|
||||||
|
req_ctx->fixup_src = req->src;
|
||||||
|
req_ctx->fixup_src_len = req->src_len;
|
||||||
|
}
|
||||||
|
|
||||||
src_nents = sg_nents_for_len(req->src, req->src_len);
|
src_nents = sg_nents_for_len(req_ctx->fixup_src,
|
||||||
|
req_ctx->fixup_src_len);
|
||||||
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
|
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
|
||||||
|
|
||||||
if (src_nents > 1)
|
if (!diff_size && src_nents == 1)
|
||||||
sec4_sg_len = src_nents;
|
sec4_sg_len = 0; /* no need for an input hw s/g table */
|
||||||
|
else
|
||||||
|
sec4_sg_len = src_nents + !!diff_size;
|
||||||
|
sec4_sg_index = sec4_sg_len;
|
||||||
if (dst_nents > 1)
|
if (dst_nents > 1)
|
||||||
sec4_sg_len += dst_nents;
|
sec4_sg_len += pad_sg_nents(dst_nents);
|
||||||
|
else
|
||||||
|
sec4_sg_len = pad_sg_nents(sec4_sg_len);
|
||||||
|
|
||||||
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
|
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
|
||||||
|
|
||||||
@ -250,7 +288,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
|||||||
if (!edesc)
|
if (!edesc)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
|
sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
|
||||||
if (unlikely(!sgc)) {
|
if (unlikely(!sgc)) {
|
||||||
dev_err(dev, "unable to map source\n");
|
dev_err(dev, "unable to map source\n");
|
||||||
goto src_fail;
|
goto src_fail;
|
||||||
@ -263,14 +301,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
|||||||
}
|
}
|
||||||
|
|
||||||
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
|
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
|
||||||
|
if (diff_size)
|
||||||
|
dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
|
||||||
|
0);
|
||||||
|
|
||||||
|
if (sec4_sg_index)
|
||||||
|
sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
|
||||||
|
edesc->sec4_sg + !!diff_size, 0);
|
||||||
|
|
||||||
sec4_sg_index = 0;
|
|
||||||
if (src_nents > 1) {
|
|
||||||
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
|
|
||||||
sec4_sg_index += src_nents;
|
|
||||||
}
|
|
||||||
if (dst_nents > 1)
|
if (dst_nents > 1)
|
||||||
sg_to_sec4_sg_last(req->dst, dst_nents,
|
sg_to_sec4_sg_last(req->dst, req->dst_len,
|
||||||
edesc->sec4_sg + sec4_sg_index, 0);
|
edesc->sec4_sg + sec4_sg_index, 0);
|
||||||
|
|
||||||
/* Save nents for later use in Job Descriptor */
|
/* Save nents for later use in Job Descriptor */
|
||||||
@ -289,12 +329,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
|||||||
|
|
||||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||||
|
|
||||||
|
print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
|
||||||
|
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
|
||||||
|
edesc->sec4_sg_bytes, 1);
|
||||||
|
|
||||||
return edesc;
|
return edesc;
|
||||||
|
|
||||||
sec4_sg_fail:
|
sec4_sg_fail:
|
||||||
dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
|
dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
|
||||||
dst_fail:
|
dst_fail:
|
||||||
dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
|
dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
|
||||||
src_fail:
|
src_fail:
|
||||||
kfree(edesc);
|
kfree(edesc);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
@ -304,6 +348,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
|
|||||||
struct rsa_edesc *edesc)
|
struct rsa_edesc *edesc)
|
||||||
{
|
{
|
||||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||||
|
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
struct caam_rsa_key *key = &ctx->key;
|
struct caam_rsa_key *key = &ctx->key;
|
||||||
struct device *dev = ctx->dev;
|
struct device *dev = ctx->dev;
|
||||||
@ -328,7 +373,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
|
|||||||
pdb->f_dma = edesc->sec4_sg_dma;
|
pdb->f_dma = edesc->sec4_sg_dma;
|
||||||
sec4_sg_index += edesc->src_nents;
|
sec4_sg_index += edesc->src_nents;
|
||||||
} else {
|
} else {
|
||||||
pdb->f_dma = sg_dma_address(req->src);
|
pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (edesc->dst_nents > 1) {
|
if (edesc->dst_nents > 1) {
|
||||||
@ -340,7 +385,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
|
|||||||
}
|
}
|
||||||
|
|
||||||
pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
|
pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
|
||||||
pdb->f_len = req->src_len;
|
pdb->f_len = req_ctx->fixup_src_len;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -373,7 +418,9 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
|
|||||||
pdb->g_dma = edesc->sec4_sg_dma;
|
pdb->g_dma = edesc->sec4_sg_dma;
|
||||||
sec4_sg_index += edesc->src_nents;
|
sec4_sg_index += edesc->src_nents;
|
||||||
} else {
|
} else {
|
||||||
pdb->g_dma = sg_dma_address(req->src);
|
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||||
|
|
||||||
|
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (edesc->dst_nents > 1) {
|
if (edesc->dst_nents > 1) {
|
||||||
@ -436,7 +483,9 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
|
|||||||
pdb->g_dma = edesc->sec4_sg_dma;
|
pdb->g_dma = edesc->sec4_sg_dma;
|
||||||
sec4_sg_index += edesc->src_nents;
|
sec4_sg_index += edesc->src_nents;
|
||||||
} else {
|
} else {
|
||||||
pdb->g_dma = sg_dma_address(req->src);
|
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||||
|
|
||||||
|
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (edesc->dst_nents > 1) {
|
if (edesc->dst_nents > 1) {
|
||||||
@ -523,7 +572,9 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
|
|||||||
pdb->g_dma = edesc->sec4_sg_dma;
|
pdb->g_dma = edesc->sec4_sg_dma;
|
||||||
sec4_sg_index += edesc->src_nents;
|
sec4_sg_index += edesc->src_nents;
|
||||||
} else {
|
} else {
|
||||||
pdb->g_dma = sg_dma_address(req->src);
|
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||||
|
|
||||||
|
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (edesc->dst_nents > 1) {
|
if (edesc->dst_nents > 1) {
|
||||||
@ -978,6 +1029,15 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
|
|||||||
return PTR_ERR(ctx->dev);
|
return PTR_ERR(ctx->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
|
||||||
|
CAAM_RSA_MAX_INPUT_SIZE - 1,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
|
||||||
|
dev_err(ctx->dev, "unable to map padding\n");
|
||||||
|
caam_jr_free(ctx->dev);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -987,6 +1047,8 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
|
|||||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
struct caam_rsa_key *key = &ctx->key;
|
struct caam_rsa_key *key = &ctx->key;
|
||||||
|
|
||||||
|
dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
|
||||||
|
1, DMA_TO_DEVICE);
|
||||||
caam_rsa_free_key(key);
|
caam_rsa_free_key(key);
|
||||||
caam_jr_free(ctx->dev);
|
caam_jr_free(ctx->dev);
|
||||||
}
|
}
|
||||||
@ -1010,41 +1072,12 @@ static struct akcipher_alg caam_rsa = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* Public Key Cryptography module initialization handler */
|
/* Public Key Cryptography module initialization handler */
|
||||||
static int __init caam_pkc_init(void)
|
int caam_pkc_init(struct device *ctrldev)
|
||||||
{
|
{
|
||||||
struct device_node *dev_node;
|
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||||
struct platform_device *pdev;
|
|
||||||
struct device *ctrldev;
|
|
||||||
struct caam_drv_private *priv;
|
|
||||||
u32 pk_inst;
|
u32 pk_inst;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
|
||||||
if (!dev_node) {
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
|
||||||
if (!dev_node)
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
pdev = of_find_device_by_node(dev_node);
|
|
||||||
if (!pdev) {
|
|
||||||
of_node_put(dev_node);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctrldev = &pdev->dev;
|
|
||||||
priv = dev_get_drvdata(ctrldev);
|
|
||||||
of_node_put(dev_node);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If priv is NULL, it's probably because the caam driver wasn't
|
|
||||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
|
||||||
*/
|
|
||||||
if (!priv) {
|
|
||||||
err = -ENODEV;
|
|
||||||
goto out_put_dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine public key hardware accelerator presence. */
|
/* Determine public key hardware accelerator presence. */
|
||||||
if (priv->era < 10)
|
if (priv->era < 10)
|
||||||
pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
|
pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
|
||||||
@ -1053,31 +1086,29 @@ static int __init caam_pkc_init(void)
|
|||||||
pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
|
pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
|
||||||
|
|
||||||
/* Do not register algorithms if PKHA is not present. */
|
/* Do not register algorithms if PKHA is not present. */
|
||||||
if (!pk_inst) {
|
if (!pk_inst)
|
||||||
err = -ENODEV;
|
return 0;
|
||||||
goto out_put_dev;
|
|
||||||
}
|
/* allocate zero buffer, used for padding input */
|
||||||
|
zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!zero_buffer)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
err = crypto_register_akcipher(&caam_rsa);
|
err = crypto_register_akcipher(&caam_rsa);
|
||||||
if (err)
|
if (err) {
|
||||||
|
kfree(zero_buffer);
|
||||||
dev_warn(ctrldev, "%s alg registration failed\n",
|
dev_warn(ctrldev, "%s alg registration failed\n",
|
||||||
caam_rsa.base.cra_driver_name);
|
caam_rsa.base.cra_driver_name);
|
||||||
else
|
} else {
|
||||||
dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
|
dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
|
||||||
|
}
|
||||||
|
|
||||||
out_put_dev:
|
|
||||||
put_device(ctrldev);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit caam_pkc_exit(void)
|
void caam_pkc_exit(void)
|
||||||
{
|
{
|
||||||
|
kfree(zero_buffer);
|
||||||
crypto_unregister_akcipher(&caam_rsa);
|
crypto_unregister_akcipher(&caam_rsa);
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(caam_pkc_init);
|
|
||||||
module_exit(caam_pkc_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("Dual BSD/GPL");
|
|
||||||
MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
|
|
||||||
MODULE_AUTHOR("Freescale Semiconductor");
|
|
||||||
|
@ -89,18 +89,25 @@ struct caam_rsa_key {
|
|||||||
* caam_rsa_ctx - per session context.
|
* caam_rsa_ctx - per session context.
|
||||||
* @key : RSA key in DMA zone
|
* @key : RSA key in DMA zone
|
||||||
* @dev : device structure
|
* @dev : device structure
|
||||||
|
* @padding_dma : dma address of padding, for adding it to the input
|
||||||
*/
|
*/
|
||||||
struct caam_rsa_ctx {
|
struct caam_rsa_ctx {
|
||||||
struct caam_rsa_key key;
|
struct caam_rsa_key key;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
dma_addr_t padding_dma;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* caam_rsa_req_ctx - per request context.
|
* caam_rsa_req_ctx - per request context.
|
||||||
* @src: input scatterlist (stripped of leading zeros)
|
* @src : input scatterlist (stripped of leading zeros)
|
||||||
|
* @fixup_src : input scatterlist (that might be stripped of leading zeros)
|
||||||
|
* @fixup_src_len : length of the fixup_src input scatterlist
|
||||||
*/
|
*/
|
||||||
struct caam_rsa_req_ctx {
|
struct caam_rsa_req_ctx {
|
||||||
struct scatterlist src[2];
|
struct scatterlist src[2];
|
||||||
|
struct scatterlist *fixup_src;
|
||||||
|
unsigned int fixup_src_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
* caam - Freescale FSL CAAM support for hw_random
|
* caam - Freescale FSL CAAM support for hw_random
|
||||||
*
|
*
|
||||||
* Copyright 2011 Freescale Semiconductor, Inc.
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
||||||
* Copyright 2018 NXP
|
* Copyright 2018-2019 NXP
|
||||||
*
|
*
|
||||||
* Based on caamalg.c crypto API driver.
|
* Based on caamalg.c crypto API driver.
|
||||||
*
|
*
|
||||||
@ -113,10 +113,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
|
|||||||
/* Buffer refilled, invalidate cache */
|
/* Buffer refilled, invalidate cache */
|
||||||
dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
|
dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||||
print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
|
bd->buf, RN_BUF_SIZE, 1);
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
|
static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
|
||||||
@ -209,10 +207,10 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
|
|||||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||||
desc, desc_bytes(desc), 1);
|
desc, desc_bytes(desc), 1);
|
||||||
#endif
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,10 +231,10 @@ static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
|
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||||
desc, desc_bytes(desc), 1);
|
desc, desc_bytes(desc), 1);
|
||||||
#endif
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,47 +294,20 @@ static struct hwrng caam_rng = {
|
|||||||
.read = caam_read,
|
.read = caam_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __exit caam_rng_exit(void)
|
void caam_rng_exit(void)
|
||||||
{
|
{
|
||||||
caam_jr_free(rng_ctx->jrdev);
|
caam_jr_free(rng_ctx->jrdev);
|
||||||
hwrng_unregister(&caam_rng);
|
hwrng_unregister(&caam_rng);
|
||||||
kfree(rng_ctx);
|
kfree(rng_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init caam_rng_init(void)
|
int caam_rng_init(struct device *ctrldev)
|
||||||
{
|
{
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct device_node *dev_node;
|
|
||||||
struct platform_device *pdev;
|
|
||||||
struct caam_drv_private *priv;
|
|
||||||
u32 rng_inst;
|
u32 rng_inst;
|
||||||
|
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
|
||||||
if (!dev_node) {
|
|
||||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
|
||||||
if (!dev_node)
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
pdev = of_find_device_by_node(dev_node);
|
|
||||||
if (!pdev) {
|
|
||||||
of_node_put(dev_node);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
priv = dev_get_drvdata(&pdev->dev);
|
|
||||||
of_node_put(dev_node);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If priv is NULL, it's probably because the caam driver wasn't
|
|
||||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
|
||||||
*/
|
|
||||||
if (!priv) {
|
|
||||||
err = -ENODEV;
|
|
||||||
goto out_put_dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check for an instantiated RNG before registration */
|
/* Check for an instantiated RNG before registration */
|
||||||
if (priv->era < 10)
|
if (priv->era < 10)
|
||||||
rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
|
rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
|
||||||
@ -344,16 +315,13 @@ static int __init caam_rng_init(void)
|
|||||||
else
|
else
|
||||||
rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
|
rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
|
||||||
|
|
||||||
if (!rng_inst) {
|
if (!rng_inst)
|
||||||
err = -ENODEV;
|
return 0;
|
||||||
goto out_put_dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev = caam_jr_alloc();
|
dev = caam_jr_alloc();
|
||||||
if (IS_ERR(dev)) {
|
if (IS_ERR(dev)) {
|
||||||
pr_err("Job Ring Device allocation for transform failed\n");
|
pr_err("Job Ring Device allocation for transform failed\n");
|
||||||
err = PTR_ERR(dev);
|
return PTR_ERR(dev);
|
||||||
goto out_put_dev;
|
|
||||||
}
|
}
|
||||||
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
|
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
|
||||||
if (!rng_ctx) {
|
if (!rng_ctx) {
|
||||||
@ -364,7 +332,6 @@ static int __init caam_rng_init(void)
|
|||||||
if (err)
|
if (err)
|
||||||
goto free_rng_ctx;
|
goto free_rng_ctx;
|
||||||
|
|
||||||
put_device(&pdev->dev);
|
|
||||||
dev_info(dev, "registering rng-caam\n");
|
dev_info(dev, "registering rng-caam\n");
|
||||||
return hwrng_register(&caam_rng);
|
return hwrng_register(&caam_rng);
|
||||||
|
|
||||||
@ -372,14 +339,5 @@ static int __init caam_rng_init(void)
|
|||||||
kfree(rng_ctx);
|
kfree(rng_ctx);
|
||||||
free_caam_alloc:
|
free_caam_alloc:
|
||||||
caam_jr_free(dev);
|
caam_jr_free(dev);
|
||||||
out_put_dev:
|
|
||||||
put_device(&pdev->dev);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(caam_rng_init);
|
|
||||||
module_exit(caam_rng_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
|
|
||||||
MODULE_AUTHOR("Freescale Semiconductor - NMG");
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
* Controller-level driver, kernel property detection, initialization
|
* Controller-level driver, kernel property detection, initialization
|
||||||
*
|
*
|
||||||
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
||||||
* Copyright 2018 NXP
|
* Copyright 2018-2019 NXP
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
@ -323,8 +323,8 @@ static int caam_remove(struct platform_device *pdev)
|
|||||||
of_platform_depopulate(ctrldev);
|
of_platform_depopulate(ctrldev);
|
||||||
|
|
||||||
#ifdef CONFIG_CAAM_QI
|
#ifdef CONFIG_CAAM_QI
|
||||||
if (ctrlpriv->qidev)
|
if (ctrlpriv->qi_init)
|
||||||
caam_qi_shutdown(ctrlpriv->qidev);
|
caam_qi_shutdown(ctrldev);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -540,7 +540,8 @@ static int caam_probe(struct platform_device *pdev)
|
|||||||
ctrlpriv->caam_ipg = clk;
|
ctrlpriv->caam_ipg = clk;
|
||||||
|
|
||||||
if (!of_machine_is_compatible("fsl,imx7d") &&
|
if (!of_machine_is_compatible("fsl,imx7d") &&
|
||||||
!of_machine_is_compatible("fsl,imx7s")) {
|
!of_machine_is_compatible("fsl,imx7s") &&
|
||||||
|
!of_machine_is_compatible("fsl,imx7ulp")) {
|
||||||
clk = caam_drv_identify_clk(&pdev->dev, "mem");
|
clk = caam_drv_identify_clk(&pdev->dev, "mem");
|
||||||
if (IS_ERR(clk)) {
|
if (IS_ERR(clk)) {
|
||||||
ret = PTR_ERR(clk);
|
ret = PTR_ERR(clk);
|
||||||
@ -562,7 +563,8 @@ static int caam_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
if (!of_machine_is_compatible("fsl,imx6ul") &&
|
if (!of_machine_is_compatible("fsl,imx6ul") &&
|
||||||
!of_machine_is_compatible("fsl,imx7d") &&
|
!of_machine_is_compatible("fsl,imx7d") &&
|
||||||
!of_machine_is_compatible("fsl,imx7s")) {
|
!of_machine_is_compatible("fsl,imx7s") &&
|
||||||
|
!of_machine_is_compatible("fsl,imx7ulp")) {
|
||||||
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
|
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
|
||||||
if (IS_ERR(clk)) {
|
if (IS_ERR(clk)) {
|
||||||
ret = PTR_ERR(clk);
|
ret = PTR_ERR(clk);
|
||||||
@ -702,12 +704,7 @@ static int caam_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctrlpriv->era = caam_get_era(ctrl);
|
ctrlpriv->era = caam_get_era(ctrl);
|
||||||
|
ctrlpriv->domain = iommu_get_domain_for_dev(dev);
|
||||||
ret = of_platform_populate(nprop, caam_match, NULL, dev);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "JR platform devices creation error\n");
|
|
||||||
goto iounmap_ctrl;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
/*
|
/*
|
||||||
@ -721,19 +718,6 @@ static int caam_probe(struct platform_device *pdev)
|
|||||||
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
|
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ring = 0;
|
|
||||||
for_each_available_child_of_node(nprop, np)
|
|
||||||
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
|
|
||||||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
|
|
||||||
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
|
|
||||||
((__force uint8_t *)ctrl +
|
|
||||||
(ring + JR_BLOCK_NUMBER) *
|
|
||||||
BLOCK_OFFSET
|
|
||||||
);
|
|
||||||
ctrlpriv->total_jobrs++;
|
|
||||||
ring++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check to see if (DPAA 1.x) QI present. If so, enable */
|
/* Check to see if (DPAA 1.x) QI present. If so, enable */
|
||||||
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
|
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
|
||||||
if (ctrlpriv->qi_present && !caam_dpaa2) {
|
if (ctrlpriv->qi_present && !caam_dpaa2) {
|
||||||
@ -752,6 +736,25 @@ static int caam_probe(struct platform_device *pdev)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = of_platform_populate(nprop, caam_match, NULL, dev);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "JR platform devices creation error\n");
|
||||||
|
goto shutdown_qi;
|
||||||
|
}
|
||||||
|
|
||||||
|
ring = 0;
|
||||||
|
for_each_available_child_of_node(nprop, np)
|
||||||
|
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
|
||||||
|
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
|
||||||
|
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
|
||||||
|
((__force uint8_t *)ctrl +
|
||||||
|
(ring + JR_BLOCK_NUMBER) *
|
||||||
|
BLOCK_OFFSET
|
||||||
|
);
|
||||||
|
ctrlpriv->total_jobrs++;
|
||||||
|
ring++;
|
||||||
|
}
|
||||||
|
|
||||||
/* If no QI and no rings specified, quit and go home */
|
/* If no QI and no rings specified, quit and go home */
|
||||||
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
|
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
|
||||||
dev_err(dev, "no queues configured, terminating\n");
|
dev_err(dev, "no queues configured, terminating\n");
|
||||||
@ -898,6 +901,11 @@ static int caam_probe(struct platform_device *pdev)
|
|||||||
caam_remove(pdev);
|
caam_remove(pdev);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
shutdown_qi:
|
||||||
|
#ifdef CONFIG_CAAM_QI
|
||||||
|
if (ctrlpriv->qi_init)
|
||||||
|
caam_qi_shutdown(dev);
|
||||||
|
#endif
|
||||||
iounmap_ctrl:
|
iounmap_ctrl:
|
||||||
iounmap(ctrl);
|
iounmap(ctrl);
|
||||||
disable_caam_emi_slow:
|
disable_caam_emi_slow:
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
* caam descriptor construction helper functions
|
* caam descriptor construction helper functions
|
||||||
*
|
*
|
||||||
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
||||||
|
* Copyright 2019 NXP
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef DESC_CONSTR_H
|
#ifndef DESC_CONSTR_H
|
||||||
@ -37,6 +38,16 @@
|
|||||||
|
|
||||||
extern bool caam_little_end;
|
extern bool caam_little_end;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HW fetches 4 S/G table entries at a time, irrespective of how many entries
|
||||||
|
* are in the table. It's SW's responsibility to make sure these accesses
|
||||||
|
* do not have side effects.
|
||||||
|
*/
|
||||||
|
static inline int pad_sg_nents(int sg_nents)
|
||||||
|
{
|
||||||
|
return ALIGN(sg_nents, 4);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int desc_len(u32 * const desc)
|
static inline int desc_len(u32 * const desc)
|
||||||
{
|
{
|
||||||
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
|
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
|
|
||||||
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
|
void caam_dump_sg(const char *prefix_str, int prefix_type,
|
||||||
int rowsize, int groupsize, struct scatterlist *sg,
|
int rowsize, int groupsize, struct scatterlist *sg,
|
||||||
size_t tlen, bool ascii)
|
size_t tlen, bool ascii)
|
||||||
{
|
{
|
||||||
@ -35,15 +35,15 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
|
|||||||
|
|
||||||
buf = it_page + it->offset;
|
buf = it_page + it->offset;
|
||||||
len = min_t(size_t, tlen, it->length);
|
len = min_t(size_t, tlen, it->length);
|
||||||
print_hex_dump(level, prefix_str, prefix_type, rowsize,
|
print_hex_dump_debug(prefix_str, prefix_type, rowsize,
|
||||||
groupsize, buf, len, ascii);
|
groupsize, buf, len, ascii);
|
||||||
tlen -= len;
|
tlen -= len;
|
||||||
|
|
||||||
kunmap_atomic(it_page);
|
kunmap_atomic(it_page);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
|
void caam_dump_sg(const char *prefix_str, int prefix_type,
|
||||||
int rowsize, int groupsize, struct scatterlist *sg,
|
int rowsize, int groupsize, struct scatterlist *sg,
|
||||||
size_t tlen, bool ascii)
|
size_t tlen, bool ascii)
|
||||||
{}
|
{}
|
||||||
|
@ -17,7 +17,7 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
|
|||||||
#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
|
#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
|
||||||
#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
|
#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
|
||||||
|
|
||||||
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
|
void caam_dump_sg(const char *prefix_str, int prefix_type,
|
||||||
int rowsize, int groupsize, struct scatterlist *sg,
|
int rowsize, int groupsize, struct scatterlist *sg,
|
||||||
size_t tlen, bool ascii);
|
size_t tlen, bool ascii);
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
* Private/internal definitions between modules
|
* Private/internal definitions between modules
|
||||||
*
|
*
|
||||||
* Copyright 2008-2011 Freescale Semiconductor, Inc.
|
* Copyright 2008-2011 Freescale Semiconductor, Inc.
|
||||||
*
|
* Copyright 2019 NXP
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef INTERN_H
|
#ifndef INTERN_H
|
||||||
@ -63,10 +63,6 @@ struct caam_drv_private_jr {
|
|||||||
* Driver-private storage for a single CAAM block instance
|
* Driver-private storage for a single CAAM block instance
|
||||||
*/
|
*/
|
||||||
struct caam_drv_private {
|
struct caam_drv_private {
|
||||||
#ifdef CONFIG_CAAM_QI
|
|
||||||
struct device *qidev;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Physical-presence section */
|
/* Physical-presence section */
|
||||||
struct caam_ctrl __iomem *ctrl; /* controller region */
|
struct caam_ctrl __iomem *ctrl; /* controller region */
|
||||||
struct caam_deco __iomem *deco; /* DECO/CCB views */
|
struct caam_deco __iomem *deco; /* DECO/CCB views */
|
||||||
@ -74,12 +70,17 @@ struct caam_drv_private {
|
|||||||
struct caam_queue_if __iomem *qi; /* QI control region */
|
struct caam_queue_if __iomem *qi; /* QI control region */
|
||||||
struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
|
struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
|
||||||
|
|
||||||
|
struct iommu_domain *domain;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Detected geometry block. Filled in from device tree if powerpc,
|
* Detected geometry block. Filled in from device tree if powerpc,
|
||||||
* or from register-based version detection code
|
* or from register-based version detection code
|
||||||
*/
|
*/
|
||||||
u8 total_jobrs; /* Total Job Rings in device */
|
u8 total_jobrs; /* Total Job Rings in device */
|
||||||
u8 qi_present; /* Nonzero if QI present in device */
|
u8 qi_present; /* Nonzero if QI present in device */
|
||||||
|
#ifdef CONFIG_CAAM_QI
|
||||||
|
u8 qi_init; /* Nonzero if QI has been initialized */
|
||||||
|
#endif
|
||||||
u8 mc_en; /* Nonzero if MC f/w is active */
|
u8 mc_en; /* Nonzero if MC f/w is active */
|
||||||
int secvio_irq; /* Security violation interrupt number */
|
int secvio_irq; /* Security violation interrupt number */
|
||||||
int virt_en; /* Virtualization enabled in CAAM */
|
int virt_en; /* Virtualization enabled in CAAM */
|
||||||
@ -107,8 +108,95 @@ struct caam_drv_private {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
void caam_jr_algapi_init(struct device *dev);
|
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||||
void caam_jr_algapi_remove(struct device *dev);
|
|
||||||
|
int caam_algapi_init(struct device *dev);
|
||||||
|
void caam_algapi_exit(void);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline int caam_algapi_init(struct device *dev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void caam_algapi_exit(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
|
||||||
|
|
||||||
|
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
|
||||||
|
|
||||||
|
int caam_algapi_hash_init(struct device *dev);
|
||||||
|
void caam_algapi_hash_exit(void);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline int caam_algapi_hash_init(struct device *dev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void caam_algapi_hash_exit(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
|
||||||
|
|
||||||
|
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
|
||||||
|
|
||||||
|
int caam_pkc_init(struct device *dev);
|
||||||
|
void caam_pkc_exit(void);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline int caam_pkc_init(struct device *dev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void caam_pkc_exit(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
|
||||||
|
|
||||||
|
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||||
|
|
||||||
|
int caam_rng_init(struct device *dev);
|
||||||
|
void caam_rng_exit(void);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline int caam_rng_init(struct device *dev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void caam_rng_exit(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
|
||||||
|
|
||||||
|
#ifdef CONFIG_CAAM_QI
|
||||||
|
|
||||||
|
int caam_qi_algapi_init(struct device *dev);
|
||||||
|
void caam_qi_algapi_exit(void);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline int caam_qi_algapi_init(struct device *dev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void caam_qi_algapi_exit(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_CAAM_QI */
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
static int caam_debugfs_u64_get(void *data, u64 *val)
|
static int caam_debugfs_u64_get(void *data, u64 *val)
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
* JobR backend functionality
|
* JobR backend functionality
|
||||||
*
|
*
|
||||||
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
||||||
|
* Copyright 2019 NXP
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/of_irq.h>
|
#include <linux/of_irq.h>
|
||||||
@ -23,6 +24,43 @@ struct jr_driver_data {
|
|||||||
} ____cacheline_aligned;
|
} ____cacheline_aligned;
|
||||||
|
|
||||||
static struct jr_driver_data driver_data;
|
static struct jr_driver_data driver_data;
|
||||||
|
static DEFINE_MUTEX(algs_lock);
|
||||||
|
static unsigned int active_devs;
|
||||||
|
|
||||||
|
static void register_algs(struct device *dev)
|
||||||
|
{
|
||||||
|
mutex_lock(&algs_lock);
|
||||||
|
|
||||||
|
if (++active_devs != 1)
|
||||||
|
goto algs_unlock;
|
||||||
|
|
||||||
|
caam_algapi_init(dev);
|
||||||
|
caam_algapi_hash_init(dev);
|
||||||
|
caam_pkc_init(dev);
|
||||||
|
caam_rng_init(dev);
|
||||||
|
caam_qi_algapi_init(dev);
|
||||||
|
|
||||||
|
algs_unlock:
|
||||||
|
mutex_unlock(&algs_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void unregister_algs(void)
|
||||||
|
{
|
||||||
|
mutex_lock(&algs_lock);
|
||||||
|
|
||||||
|
if (--active_devs != 0)
|
||||||
|
goto algs_unlock;
|
||||||
|
|
||||||
|
caam_qi_algapi_exit();
|
||||||
|
|
||||||
|
caam_rng_exit();
|
||||||
|
caam_pkc_exit();
|
||||||
|
caam_algapi_hash_exit();
|
||||||
|
caam_algapi_exit();
|
||||||
|
|
||||||
|
algs_unlock:
|
||||||
|
mutex_unlock(&algs_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static int caam_reset_hw_jr(struct device *dev)
|
static int caam_reset_hw_jr(struct device *dev)
|
||||||
{
|
{
|
||||||
@ -109,6 +147,9 @@ static int caam_jr_remove(struct platform_device *pdev)
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Unregister JR-based RNG & crypto algorithms */
|
||||||
|
unregister_algs();
|
||||||
|
|
||||||
/* Remove the node from Physical JobR list maintained by driver */
|
/* Remove the node from Physical JobR list maintained by driver */
|
||||||
spin_lock(&driver_data.jr_alloc_lock);
|
spin_lock(&driver_data.jr_alloc_lock);
|
||||||
list_del(&jrpriv->list_node);
|
list_del(&jrpriv->list_node);
|
||||||
@ -541,6 +582,8 @@ static int caam_jr_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
atomic_set(&jrpriv->tfm_count, 0);
|
atomic_set(&jrpriv->tfm_count, 0);
|
||||||
|
|
||||||
|
register_algs(jrdev->parent);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,9 +16,7 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
|
|||||||
{
|
{
|
||||||
struct split_key_result *res = context;
|
struct split_key_result *res = context;
|
||||||
|
|
||||||
#ifdef DEBUG
|
dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||||
dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
caam_jr_strstatus(dev, err);
|
caam_jr_strstatus(dev, err);
|
||||||
@ -55,12 +53,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
|||||||
adata->keylen_pad = split_key_pad_len(adata->algtype &
|
adata->keylen_pad = split_key_pad_len(adata->algtype &
|
||||||
OP_ALG_ALGSEL_MASK);
|
OP_ALG_ALGSEL_MASK);
|
||||||
|
|
||||||
#ifdef DEBUG
|
dev_dbg(jrdev, "split keylen %d split keylen padded %d\n",
|
||||||
dev_err(jrdev, "split keylen %d split keylen padded %d\n",
|
|
||||||
adata->keylen, adata->keylen_pad);
|
adata->keylen, adata->keylen_pad);
|
||||||
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
|
print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
|
DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
|
||||||
#endif
|
|
||||||
|
|
||||||
if (adata->keylen_pad > max_keylen)
|
if (adata->keylen_pad > max_keylen)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -102,10 +98,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
|||||||
append_fifo_store(desc, dma_addr, adata->keylen,
|
append_fifo_store(desc, dma_addr, adata->keylen,
|
||||||
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
|
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
|
||||||
|
|
||||||
#ifdef DEBUG
|
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
1);
|
||||||
#endif
|
|
||||||
|
|
||||||
result.err = 0;
|
result.err = 0;
|
||||||
init_completion(&result.completion);
|
init_completion(&result.completion);
|
||||||
@ -115,11 +110,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
|||||||
/* in progress */
|
/* in progress */
|
||||||
wait_for_completion(&result.completion);
|
wait_for_completion(&result.completion);
|
||||||
ret = result.err;
|
ret = result.err;
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
|
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
|
||||||
adata->keylen_pad, 1);
|
adata->keylen_pad, 1);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
|
dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
* Queue Interface backend functionality
|
* Queue Interface backend functionality
|
||||||
*
|
*
|
||||||
* Copyright 2013-2016 Freescale Semiconductor, Inc.
|
* Copyright 2013-2016 Freescale Semiconductor, Inc.
|
||||||
* Copyright 2016-2017 NXP
|
* Copyright 2016-2017, 2019 NXP
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
@ -18,6 +18,7 @@
|
|||||||
#include "desc_constr.h"
|
#include "desc_constr.h"
|
||||||
|
|
||||||
#define PREHDR_RSLS_SHIFT 31
|
#define PREHDR_RSLS_SHIFT 31
|
||||||
|
#define PREHDR_ABS BIT(25)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use a reasonable backlog of frames (per CPU) as congestion threshold,
|
* Use a reasonable backlog of frames (per CPU) as congestion threshold,
|
||||||
@ -58,11 +59,9 @@ static DEFINE_PER_CPU(int, last_cpu);
|
|||||||
/*
|
/*
|
||||||
* caam_qi_priv - CAAM QI backend private params
|
* caam_qi_priv - CAAM QI backend private params
|
||||||
* @cgr: QMan congestion group
|
* @cgr: QMan congestion group
|
||||||
* @qi_pdev: platform device for QI backend
|
|
||||||
*/
|
*/
|
||||||
struct caam_qi_priv {
|
struct caam_qi_priv {
|
||||||
struct qman_cgr cgr;
|
struct qman_cgr cgr;
|
||||||
struct platform_device *qi_pdev;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct caam_qi_priv qipriv ____cacheline_aligned;
|
static struct caam_qi_priv qipriv ____cacheline_aligned;
|
||||||
@ -95,6 +94,16 @@ static u64 times_congested;
|
|||||||
*/
|
*/
|
||||||
static struct kmem_cache *qi_cache;
|
static struct kmem_cache *qi_cache;
|
||||||
|
|
||||||
|
static void *caam_iova_to_virt(struct iommu_domain *domain,
|
||||||
|
dma_addr_t iova_addr)
|
||||||
|
{
|
||||||
|
phys_addr_t phys_addr;
|
||||||
|
|
||||||
|
phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
|
||||||
|
|
||||||
|
return phys_to_virt(phys_addr);
|
||||||
|
}
|
||||||
|
|
||||||
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
|
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
|
||||||
{
|
{
|
||||||
struct qm_fd fd;
|
struct qm_fd fd;
|
||||||
@ -135,6 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
|
|||||||
const struct qm_fd *fd;
|
const struct qm_fd *fd;
|
||||||
struct caam_drv_req *drv_req;
|
struct caam_drv_req *drv_req;
|
||||||
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
|
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
|
||||||
|
struct caam_drv_private *priv = dev_get_drvdata(qidev);
|
||||||
|
|
||||||
fd = &msg->ern.fd;
|
fd = &msg->ern.fd;
|
||||||
|
|
||||||
@ -143,7 +153,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
|
drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
|
||||||
if (!drv_req) {
|
if (!drv_req) {
|
||||||
dev_err(qidev,
|
dev_err(qidev,
|
||||||
"Can't find original request for CAAM response\n");
|
"Can't find original request for CAAM response\n");
|
||||||
@ -346,6 +356,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
|
|||||||
*/
|
*/
|
||||||
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
|
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
|
||||||
num_words);
|
num_words);
|
||||||
|
drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
|
||||||
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
|
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
|
||||||
dma_sync_single_for_device(qidev, drv_ctx->context_a,
|
dma_sync_single_for_device(qidev, drv_ctx->context_a,
|
||||||
sizeof(drv_ctx->sh_desc) +
|
sizeof(drv_ctx->sh_desc) +
|
||||||
@ -401,6 +412,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
|
|||||||
*/
|
*/
|
||||||
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
|
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
|
||||||
num_words);
|
num_words);
|
||||||
|
drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
|
||||||
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
|
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
|
||||||
size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
|
size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
|
||||||
hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
|
hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
|
||||||
@ -488,7 +500,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
|
|||||||
void caam_qi_shutdown(struct device *qidev)
|
void caam_qi_shutdown(struct device *qidev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
|
struct caam_qi_priv *priv = &qipriv;
|
||||||
const cpumask_t *cpus = qman_affine_cpus();
|
const cpumask_t *cpus = qman_affine_cpus();
|
||||||
|
|
||||||
for_each_cpu(i, cpus) {
|
for_each_cpu(i, cpus) {
|
||||||
@ -506,8 +518,6 @@ void caam_qi_shutdown(struct device *qidev)
|
|||||||
qman_release_cgrid(priv->cgr.cgrid);
|
qman_release_cgrid(priv->cgr.cgrid);
|
||||||
|
|
||||||
kmem_cache_destroy(qi_cache);
|
kmem_cache_destroy(qi_cache);
|
||||||
|
|
||||||
platform_device_unregister(priv->qi_pdev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
|
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
|
||||||
@ -550,6 +560,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
|||||||
struct caam_drv_req *drv_req;
|
struct caam_drv_req *drv_req;
|
||||||
const struct qm_fd *fd;
|
const struct qm_fd *fd;
|
||||||
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
|
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
|
||||||
|
struct caam_drv_private *priv = dev_get_drvdata(qidev);
|
||||||
u32 status;
|
u32 status;
|
||||||
|
|
||||||
if (caam_qi_napi_schedule(p, caam_napi))
|
if (caam_qi_napi_schedule(p, caam_napi))
|
||||||
@ -572,7 +583,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
|||||||
return qman_cb_dqrr_consume;
|
return qman_cb_dqrr_consume;
|
||||||
}
|
}
|
||||||
|
|
||||||
drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
|
drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
|
||||||
if (unlikely(!drv_req)) {
|
if (unlikely(!drv_req)) {
|
||||||
dev_err(qidev,
|
dev_err(qidev,
|
||||||
"Can't find original request for caam response\n");
|
"Can't find original request for caam response\n");
|
||||||
@ -692,33 +703,17 @@ static void free_rsp_fqs(void)
|
|||||||
int caam_qi_init(struct platform_device *caam_pdev)
|
int caam_qi_init(struct platform_device *caam_pdev)
|
||||||
{
|
{
|
||||||
int err, i;
|
int err, i;
|
||||||
struct platform_device *qi_pdev;
|
|
||||||
struct device *ctrldev = &caam_pdev->dev, *qidev;
|
struct device *ctrldev = &caam_pdev->dev, *qidev;
|
||||||
struct caam_drv_private *ctrlpriv;
|
struct caam_drv_private *ctrlpriv;
|
||||||
const cpumask_t *cpus = qman_affine_cpus();
|
const cpumask_t *cpus = qman_affine_cpus();
|
||||||
static struct platform_device_info qi_pdev_info = {
|
|
||||||
.name = "caam_qi",
|
|
||||||
.id = PLATFORM_DEVID_NONE
|
|
||||||
};
|
|
||||||
|
|
||||||
qi_pdev_info.parent = ctrldev;
|
|
||||||
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
|
|
||||||
qi_pdev = platform_device_register_full(&qi_pdev_info);
|
|
||||||
if (IS_ERR(qi_pdev))
|
|
||||||
return PTR_ERR(qi_pdev);
|
|
||||||
set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
|
|
||||||
|
|
||||||
ctrlpriv = dev_get_drvdata(ctrldev);
|
ctrlpriv = dev_get_drvdata(ctrldev);
|
||||||
qidev = &qi_pdev->dev;
|
qidev = ctrldev;
|
||||||
|
|
||||||
qipriv.qi_pdev = qi_pdev;
|
|
||||||
dev_set_drvdata(qidev, &qipriv);
|
|
||||||
|
|
||||||
/* Initialize the congestion detection */
|
/* Initialize the congestion detection */
|
||||||
err = init_cgr(qidev);
|
err = init_cgr(qidev);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(qidev, "CGR initialization failed: %d\n", err);
|
dev_err(qidev, "CGR initialization failed: %d\n", err);
|
||||||
platform_device_unregister(qi_pdev);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -727,7 +722,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
|||||||
if (err) {
|
if (err) {
|
||||||
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
|
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
|
||||||
free_rsp_fqs();
|
free_rsp_fqs();
|
||||||
platform_device_unregister(qi_pdev);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -750,15 +744,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
|||||||
napi_enable(irqtask);
|
napi_enable(irqtask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Hook up QI device to parent controlling caam device */
|
|
||||||
ctrlpriv->qidev = qidev;
|
|
||||||
|
|
||||||
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
|
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
|
||||||
SLAB_CACHE_DMA, NULL);
|
SLAB_CACHE_DMA, NULL);
|
||||||
if (!qi_cache) {
|
if (!qi_cache) {
|
||||||
dev_err(qidev, "Can't allocate CAAM cache\n");
|
dev_err(qidev, "Can't allocate CAAM cache\n");
|
||||||
free_rsp_fqs();
|
free_rsp_fqs();
|
||||||
platform_device_unregister(qi_pdev);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -766,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
|||||||
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
|
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
|
||||||
×_congested, &caam_fops_u64_ro);
|
×_congested, &caam_fops_u64_ro);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
ctrlpriv->qi_init = 1;
|
||||||
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
|
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
|
|||||||
* but does not have final bit; instead, returns last entry
|
* but does not have final bit; instead, returns last entry
|
||||||
*/
|
*/
|
||||||
static inline struct qm_sg_entry *
|
static inline struct qm_sg_entry *
|
||||||
sg_to_qm_sg(struct scatterlist *sg, int sg_count,
|
sg_to_qm_sg(struct scatterlist *sg, int len,
|
||||||
struct qm_sg_entry *qm_sg_ptr, u16 offset)
|
struct qm_sg_entry *qm_sg_ptr, u16 offset)
|
||||||
{
|
{
|
||||||
while (sg_count && sg) {
|
int ent_len;
|
||||||
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
|
|
||||||
sg_dma_len(sg), offset);
|
while (len) {
|
||||||
|
ent_len = min_t(int, sg_dma_len(sg), len);
|
||||||
|
|
||||||
|
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
|
||||||
|
offset);
|
||||||
qm_sg_ptr++;
|
qm_sg_ptr++;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
sg_count--;
|
len -= ent_len;
|
||||||
}
|
}
|
||||||
return qm_sg_ptr - 1;
|
return qm_sg_ptr - 1;
|
||||||
}
|
}
|
||||||
@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
|
|||||||
* convert scatterlist to h/w link table format
|
* convert scatterlist to h/w link table format
|
||||||
* scatterlist must have been previously dma mapped
|
* scatterlist must have been previously dma mapped
|
||||||
*/
|
*/
|
||||||
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
|
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
|
||||||
struct qm_sg_entry *qm_sg_ptr, u16 offset)
|
struct qm_sg_entry *qm_sg_ptr, u16 offset)
|
||||||
{
|
{
|
||||||
qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
|
qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
|
||||||
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
|
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,15 +25,19 @@ static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
|
|||||||
* but does not have final bit; instead, returns last entry
|
* but does not have final bit; instead, returns last entry
|
||||||
*/
|
*/
|
||||||
static inline struct dpaa2_sg_entry *
|
static inline struct dpaa2_sg_entry *
|
||||||
sg_to_qm_sg(struct scatterlist *sg, int sg_count,
|
sg_to_qm_sg(struct scatterlist *sg, int len,
|
||||||
struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
|
struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
|
||||||
{
|
{
|
||||||
while (sg_count && sg) {
|
int ent_len;
|
||||||
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
|
|
||||||
sg_dma_len(sg), offset);
|
while (len) {
|
||||||
|
ent_len = min_t(int, sg_dma_len(sg), len);
|
||||||
|
|
||||||
|
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
|
||||||
|
offset);
|
||||||
qm_sg_ptr++;
|
qm_sg_ptr++;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
sg_count--;
|
len -= ent_len;
|
||||||
}
|
}
|
||||||
return qm_sg_ptr - 1;
|
return qm_sg_ptr - 1;
|
||||||
}
|
}
|
||||||
@ -42,11 +46,11 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
|
|||||||
* convert scatterlist to h/w link table format
|
* convert scatterlist to h/w link table format
|
||||||
* scatterlist must have been previously dma mapped
|
* scatterlist must have been previously dma mapped
|
||||||
*/
|
*/
|
||||||
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
|
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
|
||||||
struct dpaa2_sg_entry *qm_sg_ptr,
|
struct dpaa2_sg_entry *qm_sg_ptr,
|
||||||
u16 offset)
|
u16 offset)
|
||||||
{
|
{
|
||||||
qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
|
qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
|
||||||
dpaa2_sg_set_final(qm_sg_ptr, true);
|
dpaa2_sg_set_final(qm_sg_ptr, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,11 +35,9 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
|
|||||||
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
|
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
|
||||||
SEC4_SG_OFFSET_MASK);
|
SEC4_SG_OFFSET_MASK);
|
||||||
}
|
}
|
||||||
#ifdef DEBUG
|
|
||||||
print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
|
print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||||
DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
|
sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1);
|
||||||
sizeof(struct sec4_sg_entry), 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -47,15 +45,19 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
|
|||||||
* but does not have final bit; instead, returns last entry
|
* but does not have final bit; instead, returns last entry
|
||||||
*/
|
*/
|
||||||
static inline struct sec4_sg_entry *
|
static inline struct sec4_sg_entry *
|
||||||
sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
|
sg_to_sec4_sg(struct scatterlist *sg, int len,
|
||||||
struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
|
struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
|
||||||
{
|
{
|
||||||
while (sg_count) {
|
int ent_len;
|
||||||
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
|
|
||||||
sg_dma_len(sg), offset);
|
while (len) {
|
||||||
|
ent_len = min_t(int, sg_dma_len(sg), len);
|
||||||
|
|
||||||
|
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len,
|
||||||
|
offset);
|
||||||
sec4_sg_ptr++;
|
sec4_sg_ptr++;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
sg_count--;
|
len -= ent_len;
|
||||||
}
|
}
|
||||||
return sec4_sg_ptr - 1;
|
return sec4_sg_ptr - 1;
|
||||||
}
|
}
|
||||||
@ -72,11 +74,11 @@ static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
|
|||||||
* convert scatterlist to h/w link table format
|
* convert scatterlist to h/w link table format
|
||||||
* scatterlist must have been previously dma mapped
|
* scatterlist must have been previously dma mapped
|
||||||
*/
|
*/
|
||||||
static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
|
static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len,
|
||||||
struct sec4_sg_entry *sec4_sg_ptr,
|
struct sec4_sg_entry *sec4_sg_ptr,
|
||||||
u16 offset)
|
u16 offset)
|
||||||
{
|
{
|
||||||
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
|
sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset);
|
||||||
sg_to_sec4_set_last(sec4_sg_ptr);
|
sg_to_sec4_set_last(sec4_sg_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#include <crypto/aes.h>
|
#include <crypto/aes.h>
|
||||||
#include <crypto/algapi.h>
|
#include <crypto/algapi.h>
|
||||||
#include <crypto/authenc.h>
|
#include <crypto/authenc.h>
|
||||||
#include <crypto/crypto_wq.h>
|
|
||||||
#include <crypto/des.h>
|
#include <crypto/des.h>
|
||||||
#include <crypto/xts.h>
|
#include <crypto/xts.h>
|
||||||
#include <linux/crypto.h>
|
#include <linux/crypto.h>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
#ifndef __NITROX_DEBUGFS_H
|
#ifndef __NITROX_DEBUGFS_H
|
||||||
#define __NITROX_DEBUGFS_H
|
#define __NITROX_DEBUGFS_H
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
#ifndef __NITROX_MBX_H
|
#ifndef __NITROX_MBX_H
|
||||||
#define __NITROX_MBX_H
|
#define __NITROX_MBX_H
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
/*
|
/*
|
||||||
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
|
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
|
||||||
*
|
*
|
||||||
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
|
* Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
|
||||||
*
|
*
|
||||||
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
||||||
*/
|
*/
|
||||||
@ -76,8 +76,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
|
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
|
||||||
(ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
|
(ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
|
||||||
(ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
|
|
||||||
(req->nbytes & (AES_BLOCK_SIZE - 1)))
|
(req->nbytes & (AES_BLOCK_SIZE - 1)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -288,7 +287,7 @@ static struct ccp_aes_def aes_algs[] = {
|
|||||||
.version = CCP_VERSION(3, 0),
|
.version = CCP_VERSION(3, 0),
|
||||||
.name = "cfb(aes)",
|
.name = "cfb(aes)",
|
||||||
.driver_name = "cfb-aes-ccp",
|
.driver_name = "cfb-aes-ccp",
|
||||||
.blocksize = AES_BLOCK_SIZE,
|
.blocksize = 1,
|
||||||
.ivsize = AES_BLOCK_SIZE,
|
.ivsize = AES_BLOCK_SIZE,
|
||||||
.alg_defaults = &ccp_aes_defaults,
|
.alg_defaults = &ccp_aes_defaults,
|
||||||
},
|
},
|
||||||
|
@ -32,56 +32,62 @@ struct ccp_tasklet_data {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* Human-readable error strings */
|
/* Human-readable error strings */
|
||||||
|
#define CCP_MAX_ERROR_CODE 64
|
||||||
static char *ccp_error_codes[] = {
|
static char *ccp_error_codes[] = {
|
||||||
"",
|
"",
|
||||||
"ERR 01: ILLEGAL_ENGINE",
|
"ILLEGAL_ENGINE",
|
||||||
"ERR 02: ILLEGAL_KEY_ID",
|
"ILLEGAL_KEY_ID",
|
||||||
"ERR 03: ILLEGAL_FUNCTION_TYPE",
|
"ILLEGAL_FUNCTION_TYPE",
|
||||||
"ERR 04: ILLEGAL_FUNCTION_MODE",
|
"ILLEGAL_FUNCTION_MODE",
|
||||||
"ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
|
"ILLEGAL_FUNCTION_ENCRYPT",
|
||||||
"ERR 06: ILLEGAL_FUNCTION_SIZE",
|
"ILLEGAL_FUNCTION_SIZE",
|
||||||
"ERR 07: Zlib_MISSING_INIT_EOM",
|
"Zlib_MISSING_INIT_EOM",
|
||||||
"ERR 08: ILLEGAL_FUNCTION_RSVD",
|
"ILLEGAL_FUNCTION_RSVD",
|
||||||
"ERR 09: ILLEGAL_BUFFER_LENGTH",
|
"ILLEGAL_BUFFER_LENGTH",
|
||||||
"ERR 10: VLSB_FAULT",
|
"VLSB_FAULT",
|
||||||
"ERR 11: ILLEGAL_MEM_ADDR",
|
"ILLEGAL_MEM_ADDR",
|
||||||
"ERR 12: ILLEGAL_MEM_SEL",
|
"ILLEGAL_MEM_SEL",
|
||||||
"ERR 13: ILLEGAL_CONTEXT_ID",
|
"ILLEGAL_CONTEXT_ID",
|
||||||
"ERR 14: ILLEGAL_KEY_ADDR",
|
"ILLEGAL_KEY_ADDR",
|
||||||
"ERR 15: 0xF Reserved",
|
"0xF Reserved",
|
||||||
"ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
|
"Zlib_ILLEGAL_MULTI_QUEUE",
|
||||||
"ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
|
"Zlib_ILLEGAL_JOBID_CHANGE",
|
||||||
"ERR 18: CMD_TIMEOUT",
|
"CMD_TIMEOUT",
|
||||||
"ERR 19: IDMA0_AXI_SLVERR",
|
"IDMA0_AXI_SLVERR",
|
||||||
"ERR 20: IDMA0_AXI_DECERR",
|
"IDMA0_AXI_DECERR",
|
||||||
"ERR 21: 0x15 Reserved",
|
"0x15 Reserved",
|
||||||
"ERR 22: IDMA1_AXI_SLAVE_FAULT",
|
"IDMA1_AXI_SLAVE_FAULT",
|
||||||
"ERR 23: IDMA1_AIXI_DECERR",
|
"IDMA1_AIXI_DECERR",
|
||||||
"ERR 24: 0x18 Reserved",
|
"0x18 Reserved",
|
||||||
"ERR 25: ZLIBVHB_AXI_SLVERR",
|
"ZLIBVHB_AXI_SLVERR",
|
||||||
"ERR 26: ZLIBVHB_AXI_DECERR",
|
"ZLIBVHB_AXI_DECERR",
|
||||||
"ERR 27: 0x1B Reserved",
|
"0x1B Reserved",
|
||||||
"ERR 27: ZLIB_UNEXPECTED_EOM",
|
"ZLIB_UNEXPECTED_EOM",
|
||||||
"ERR 27: ZLIB_EXTRA_DATA",
|
"ZLIB_EXTRA_DATA",
|
||||||
"ERR 30: ZLIB_BTYPE",
|
"ZLIB_BTYPE",
|
||||||
"ERR 31: ZLIB_UNDEFINED_SYMBOL",
|
"ZLIB_UNDEFINED_SYMBOL",
|
||||||
"ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
|
"ZLIB_UNDEFINED_DISTANCE_S",
|
||||||
"ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
|
"ZLIB_CODE_LENGTH_SYMBOL",
|
||||||
"ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
|
"ZLIB _VHB_ILLEGAL_FETCH",
|
||||||
"ERR 35: ZLIB_UNCOMPRESSED_LEN",
|
"ZLIB_UNCOMPRESSED_LEN",
|
||||||
"ERR 36: ZLIB_LIMIT_REACHED",
|
"ZLIB_LIMIT_REACHED",
|
||||||
"ERR 37: ZLIB_CHECKSUM_MISMATCH0",
|
"ZLIB_CHECKSUM_MISMATCH0",
|
||||||
"ERR 38: ODMA0_AXI_SLVERR",
|
"ODMA0_AXI_SLVERR",
|
||||||
"ERR 39: ODMA0_AXI_DECERR",
|
"ODMA0_AXI_DECERR",
|
||||||
"ERR 40: 0x28 Reserved",
|
"0x28 Reserved",
|
||||||
"ERR 41: ODMA1_AXI_SLVERR",
|
"ODMA1_AXI_SLVERR",
|
||||||
"ERR 42: ODMA1_AXI_DECERR",
|
"ODMA1_AXI_DECERR",
|
||||||
"ERR 43: LSB_PARITY_ERR",
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void ccp_log_error(struct ccp_device *d, int e)
|
void ccp_log_error(struct ccp_device *d, unsigned int e)
|
||||||
{
|
{
|
||||||
dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
|
if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (e < ARRAY_SIZE(ccp_error_codes))
|
||||||
|
dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
|
||||||
|
else
|
||||||
|
dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* List of CCPs, CCP count, read-write access lock, and access functions
|
/* List of CCPs, CCP count, read-write access lock, and access functions
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user