linux_dsm_epyc7002/drivers/crypto/qce/dma.c
Eneas U de Queiroz d6364b8128 crypto: qce - use cryptlen when adding extra sgl
The qce crypto driver appends an extra entry to the dst sgl, to maintain
private state information.

When the gcm driver sends requests to the ctr skcipher, it passes the
authentication tag after the actual crypto payload, but it must not be
touched.

Commit 1336c2221bee ("crypto: qce - save a sg table slot for result
buf") limited the destination sgl to avoid overwriting the
authentication tag but it assumed the tag would be in a separate sgl
entry.

This is not always the case, so it is better to limit the length of the
destination buffer to req->cryptlen before appending the result buf.

Signed-off-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2020-02-13 17:05:26 +08:00

130 lines
2.9 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
dma->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->txchan))
return PTR_ERR(dma->txchan);
dma->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan)) {
ret = PTR_ERR(dma->rxchan);
goto error_rx;
}
dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
GFP_KERNEL);
if (!dma->result_buf) {
ret = -ENOMEM;
goto error_nomem;
}
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
return 0;
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
dma_release_channel(dma->txchan);
return ret;
}
void qce_dma_release(struct qce_dma_data *dma)
{
dma_release_channel(dma->txchan);
dma_release_channel(dma->rxchan);
kfree(dma->result_buf);
}
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
unsigned int max_len)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
unsigned int new_len;
while (sg) {
if (!sg_page(sg))
break;
sg = sg_next(sg);
}
if (!sg)
return ERR_PTR(-EINVAL);
while (new_sgl && sg && max_len) {
new_len = new_sgl->length > max_len ? max_len : new_sgl->length;
sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
max_len -= new_len;
}
return sg_last;
}
static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
int nents, unsigned long flags,
enum dma_transfer_direction dir,
dma_async_tx_callback cb, void *cb_param)
{
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
if (!sg || !nents)
return -EINVAL;
desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
if (!desc)
return -EINVAL;
desc->callback = cb;
desc->callback_param = cb_param;
cookie = dmaengine_submit(desc);
return dma_submit_error(cookie);
}
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
int rx_nents, struct scatterlist *tx_sg, int tx_nents,
dma_async_tx_callback cb, void *cb_param)
{
struct dma_chan *rxchan = dma->rxchan;
struct dma_chan *txchan = dma->txchan;
unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
int ret;
ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
NULL, NULL);
if (ret)
return ret;
return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
cb, cb_param);
}
void qce_dma_issue_pending(struct qce_dma_data *dma)
{
dma_async_issue_pending(dma->rxchan);
dma_async_issue_pending(dma->txchan);
}
int qce_dma_terminate_all(struct qce_dma_data *dma)
{
int ret;
ret = dmaengine_terminate_all(dma->rxchan);
return ret ?: dmaengine_terminate_all(dma->txchan);
}