mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 12:46:40 +07:00
crypto: ccp - Change ISR handler method for a v3 CCP
The CCP has the ability to perform several operations simultaneously, but only one interrupt. When implemented as a PCI device and using MSI-X/MSI interrupts, use a tasklet model to service interrupts. By disabling and enabling interrupts from the CCP, coupled with the queuing that tasklets provide, we can ensure that all events (occurring on the device) are recognized and serviced. This change fixes a problem wherein 2 or more busy queues can cause notification bits to change state while a (CCP) interrupt is being serviced, but after the queue state has been evaluated. This results in the event being 'lost' and the queue hanging, waiting to be serviced. Since the status bits are never fully de-asserted, the CCP never generates another interrupt (all bits zero -> one or more bits one), and no further CCP operations will be executed. Cc: <stable@vger.kernel.org> # 4.9.x+ Signed-off-by: Gary R Hook <gary.hook@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
7c6c0dc7bb
commit
7b537b24e7
@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op)
|
|||||||
return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
|
return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
|
||||||
|
{
|
||||||
|
iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
|
||||||
|
{
|
||||||
|
iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ccp_irq_bh(unsigned long data)
|
||||||
|
{
|
||||||
|
struct ccp_device *ccp = (struct ccp_device *)data;
|
||||||
|
struct ccp_cmd_queue *cmd_q;
|
||||||
|
u32 q_int, status;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
|
||||||
|
|
||||||
|
for (i = 0; i < ccp->cmd_q_count; i++) {
|
||||||
|
cmd_q = &ccp->cmd_q[i];
|
||||||
|
|
||||||
|
q_int = status & (cmd_q->int_ok | cmd_q->int_err);
|
||||||
|
if (q_int) {
|
||||||
|
cmd_q->int_status = status;
|
||||||
|
cmd_q->q_status = ioread32(cmd_q->reg_status);
|
||||||
|
cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
|
||||||
|
|
||||||
|
/* On error, only save the first error value */
|
||||||
|
if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
|
||||||
|
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
|
||||||
|
|
||||||
|
cmd_q->int_rcvd = 1;
|
||||||
|
|
||||||
|
/* Acknowledge the interrupt and wake the kthread */
|
||||||
|
iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
|
||||||
|
wake_up_interruptible(&cmd_q->int_queue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ccp_enable_queue_interrupts(ccp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static irqreturn_t ccp_irq_handler(int irq, void *data)
|
||||||
|
{
|
||||||
|
struct device *dev = data;
|
||||||
|
struct ccp_device *ccp = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
ccp_disable_queue_interrupts(ccp);
|
||||||
|
if (ccp->use_tasklet)
|
||||||
|
tasklet_schedule(&ccp->irq_tasklet);
|
||||||
|
else
|
||||||
|
ccp_irq_bh((unsigned long)ccp);
|
||||||
|
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
static int ccp_init(struct ccp_device *ccp)
|
static int ccp_init(struct ccp_device *ccp)
|
||||||
{
|
{
|
||||||
struct device *dev = ccp->dev;
|
struct device *dev = ccp->dev;
|
||||||
struct ccp_cmd_queue *cmd_q;
|
struct ccp_cmd_queue *cmd_q;
|
||||||
struct dma_pool *dma_pool;
|
struct dma_pool *dma_pool;
|
||||||
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
|
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
|
||||||
unsigned int qmr, qim, i;
|
unsigned int qmr, i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Find available queues */
|
/* Find available queues */
|
||||||
qim = 0;
|
ccp->qim = 0;
|
||||||
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
|
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
|
||||||
for (i = 0; i < MAX_HW_QUEUES; i++) {
|
for (i = 0; i < MAX_HW_QUEUES; i++) {
|
||||||
if (!(qmr & (1 << i)))
|
if (!(qmr & (1 << i)))
|
||||||
@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp)
|
|||||||
init_waitqueue_head(&cmd_q->int_queue);
|
init_waitqueue_head(&cmd_q->int_queue);
|
||||||
|
|
||||||
/* Build queue interrupt mask (two interrupts per queue) */
|
/* Build queue interrupt mask (two interrupts per queue) */
|
||||||
qim |= cmd_q->int_ok | cmd_q->int_err;
|
ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64
|
#ifdef CONFIG_ARM64
|
||||||
/* For arm64 set the recommended queue cache settings */
|
/* For arm64 set the recommended queue cache settings */
|
||||||
@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp)
|
|||||||
dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
|
dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
|
||||||
|
|
||||||
/* Disable and clear interrupts until ready */
|
/* Disable and clear interrupts until ready */
|
||||||
iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
|
ccp_disable_queue_interrupts(ccp);
|
||||||
for (i = 0; i < ccp->cmd_q_count; i++) {
|
for (i = 0; i < ccp->cmd_q_count; i++) {
|
||||||
cmd_q = &ccp->cmd_q[i];
|
cmd_q = &ccp->cmd_q[i];
|
||||||
|
|
||||||
ioread32(cmd_q->reg_int_status);
|
ioread32(cmd_q->reg_int_status);
|
||||||
ioread32(cmd_q->reg_status);
|
ioread32(cmd_q->reg_status);
|
||||||
}
|
}
|
||||||
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
|
iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
|
||||||
|
|
||||||
/* Request an irq */
|
/* Request an irq */
|
||||||
ret = ccp->get_irq(ccp);
|
ret = ccp->get_irq(ccp);
|
||||||
@ -404,6 +460,11 @@ static int ccp_init(struct ccp_device *ccp)
|
|||||||
goto e_pool;
|
goto e_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Initialize the ISR tasklet? */
|
||||||
|
if (ccp->use_tasklet)
|
||||||
|
tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
|
||||||
|
(unsigned long)ccp);
|
||||||
|
|
||||||
dev_dbg(dev, "Starting threads...\n");
|
dev_dbg(dev, "Starting threads...\n");
|
||||||
/* Create a kthread for each queue */
|
/* Create a kthread for each queue */
|
||||||
for (i = 0; i < ccp->cmd_q_count; i++) {
|
for (i = 0; i < ccp->cmd_q_count; i++) {
|
||||||
@ -426,7 +487,7 @@ static int ccp_init(struct ccp_device *ccp)
|
|||||||
|
|
||||||
dev_dbg(dev, "Enabling interrupts...\n");
|
dev_dbg(dev, "Enabling interrupts...\n");
|
||||||
/* Enable interrupts */
|
/* Enable interrupts */
|
||||||
iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
|
ccp_enable_queue_interrupts(ccp);
|
||||||
|
|
||||||
dev_dbg(dev, "Registering device...\n");
|
dev_dbg(dev, "Registering device...\n");
|
||||||
ccp_add_device(ccp);
|
ccp_add_device(ccp);
|
||||||
@ -463,7 +524,7 @@ static void ccp_destroy(struct ccp_device *ccp)
|
|||||||
{
|
{
|
||||||
struct ccp_cmd_queue *cmd_q;
|
struct ccp_cmd_queue *cmd_q;
|
||||||
struct ccp_cmd *cmd;
|
struct ccp_cmd *cmd;
|
||||||
unsigned int qim, i;
|
unsigned int i;
|
||||||
|
|
||||||
/* Unregister the DMA engine */
|
/* Unregister the DMA engine */
|
||||||
ccp_dmaengine_unregister(ccp);
|
ccp_dmaengine_unregister(ccp);
|
||||||
@ -474,22 +535,15 @@ static void ccp_destroy(struct ccp_device *ccp)
|
|||||||
/* Remove this device from the list of available units */
|
/* Remove this device from the list of available units */
|
||||||
ccp_del_device(ccp);
|
ccp_del_device(ccp);
|
||||||
|
|
||||||
/* Build queue interrupt mask (two interrupt masks per queue) */
|
|
||||||
qim = 0;
|
|
||||||
for (i = 0; i < ccp->cmd_q_count; i++) {
|
|
||||||
cmd_q = &ccp->cmd_q[i];
|
|
||||||
qim |= cmd_q->int_ok | cmd_q->int_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Disable and clear interrupts */
|
/* Disable and clear interrupts */
|
||||||
iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
|
ccp_disable_queue_interrupts(ccp);
|
||||||
for (i = 0; i < ccp->cmd_q_count; i++) {
|
for (i = 0; i < ccp->cmd_q_count; i++) {
|
||||||
cmd_q = &ccp->cmd_q[i];
|
cmd_q = &ccp->cmd_q[i];
|
||||||
|
|
||||||
ioread32(cmd_q->reg_int_status);
|
ioread32(cmd_q->reg_int_status);
|
||||||
ioread32(cmd_q->reg_status);
|
ioread32(cmd_q->reg_status);
|
||||||
}
|
}
|
||||||
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
|
iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
|
||||||
|
|
||||||
/* Stop the queue kthreads */
|
/* Stop the queue kthreads */
|
||||||
for (i = 0; i < ccp->cmd_q_count; i++)
|
for (i = 0; i < ccp->cmd_q_count; i++)
|
||||||
@ -516,40 +570,6 @@ static void ccp_destroy(struct ccp_device *ccp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t ccp_irq_handler(int irq, void *data)
|
|
||||||
{
|
|
||||||
struct device *dev = data;
|
|
||||||
struct ccp_device *ccp = dev_get_drvdata(dev);
|
|
||||||
struct ccp_cmd_queue *cmd_q;
|
|
||||||
u32 q_int, status;
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
|
|
||||||
|
|
||||||
for (i = 0; i < ccp->cmd_q_count; i++) {
|
|
||||||
cmd_q = &ccp->cmd_q[i];
|
|
||||||
|
|
||||||
q_int = status & (cmd_q->int_ok | cmd_q->int_err);
|
|
||||||
if (q_int) {
|
|
||||||
cmd_q->int_status = status;
|
|
||||||
cmd_q->q_status = ioread32(cmd_q->reg_status);
|
|
||||||
cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
|
|
||||||
|
|
||||||
/* On error, only save the first error value */
|
|
||||||
if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
|
|
||||||
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
|
|
||||||
|
|
||||||
cmd_q->int_rcvd = 1;
|
|
||||||
|
|
||||||
/* Acknowledge the interrupt and wake the kthread */
|
|
||||||
iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
|
|
||||||
wake_up_interruptible(&cmd_q->int_queue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct ccp_actions ccp3_actions = {
|
static const struct ccp_actions ccp3_actions = {
|
||||||
.aes = ccp_perform_aes,
|
.aes = ccp_perform_aes,
|
||||||
.xts_aes = ccp_perform_xts_aes,
|
.xts_aes = ccp_perform_xts_aes,
|
||||||
|
@ -339,7 +339,10 @@ struct ccp_device {
|
|||||||
void *dev_specific;
|
void *dev_specific;
|
||||||
int (*get_irq)(struct ccp_device *ccp);
|
int (*get_irq)(struct ccp_device *ccp);
|
||||||
void (*free_irq)(struct ccp_device *ccp);
|
void (*free_irq)(struct ccp_device *ccp);
|
||||||
|
unsigned int qim;
|
||||||
unsigned int irq;
|
unsigned int irq;
|
||||||
|
bool use_tasklet;
|
||||||
|
struct tasklet_struct irq_tasklet;
|
||||||
|
|
||||||
/* I/O area used for device communication. The register mapping
|
/* I/O area used for device communication. The register mapping
|
||||||
* starts at an offset into the mapped bar.
|
* starts at an offset into the mapped bar.
|
||||||
|
@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
|
|||||||
goto e_irq;
|
goto e_irq;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ccp->use_tasklet = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
|
|||||||
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
|
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
|
||||||
goto e_msi;
|
goto e_msi;
|
||||||
}
|
}
|
||||||
|
ccp->use_tasklet = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user