2013-11-27 03:30:40 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2004 IBM Corporation
|
|
|
|
* Authors:
|
|
|
|
* Leendert van Doorn <leendert@watson.ibm.com>
|
|
|
|
* Dave Safford <safford@watson.ibm.com>
|
|
|
|
* Reiner Sailer <sailer@watson.ibm.com>
|
|
|
|
* Kylene Hall <kjhall@us.ibm.com>
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013 Obsidian Research Corp
|
|
|
|
* Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
|
|
|
|
*
|
|
|
|
* Device file system interface to the TPM
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation, version 2 of the
|
|
|
|
* License.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include "tpm.h"
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
struct file_priv {
|
|
|
|
struct tpm_chip *chip;
|
|
|
|
|
|
|
|
/* Data passed to and from the tpm via the read/write calls */
|
|
|
|
atomic_t data_pending;
|
|
|
|
struct mutex buffer_mutex;
|
|
|
|
|
|
|
|
struct timer_list user_read_timer; /* user needs to claim result */
|
|
|
|
struct work_struct work;
|
|
|
|
|
|
|
|
u8 data_buffer[TPM_BUFSIZE];
|
|
|
|
};
|
|
|
|
|
2013-11-27 03:30:40 +07:00
|
|
|
static void user_reader_timeout(unsigned long ptr)
|
|
|
|
{
|
2013-11-27 03:30:45 +07:00
|
|
|
struct file_priv *priv = (struct file_priv *)ptr;
|
2013-11-27 03:30:40 +07:00
|
|
|
|
2017-01-24 07:06:08 +07:00
|
|
|
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
|
|
|
|
task_tgid_nr(current));
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
schedule_work(&priv->work);
|
2013-11-27 03:30:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void timeout_work(struct work_struct *work)
|
|
|
|
{
|
2013-11-27 03:30:45 +07:00
|
|
|
struct file_priv *priv = container_of(work, struct file_priv, work);
|
2013-11-27 03:30:40 +07:00
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
mutex_lock(&priv->buffer_mutex);
|
|
|
|
atomic_set(&priv->data_pending, 0);
|
|
|
|
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
|
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 03:30:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tpm_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2014-12-13 02:46:37 +07:00
|
|
|
struct tpm_chip *chip =
|
|
|
|
container_of(inode->i_cdev, struct tpm_chip, cdev);
|
2013-11-27 03:30:45 +07:00
|
|
|
struct file_priv *priv;
|
2013-11-27 03:30:40 +07:00
|
|
|
|
|
|
|
/* It's assured that the chip will be opened just once,
|
|
|
|
* by the check of is_open variable, which is protected
|
|
|
|
* by driver_lock. */
|
|
|
|
if (test_and_set_bit(0, &chip->is_open)) {
|
2016-03-01 00:29:47 +07:00
|
|
|
dev_dbg(&chip->dev, "Another process owns this TPM\n");
|
2013-11-27 03:30:40 +07:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
|
if (priv == NULL) {
|
2013-11-27 03:30:40 +07:00
|
|
|
clear_bit(0, &chip->is_open);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
priv->chip = chip;
|
|
|
|
atomic_set(&priv->data_pending, 0);
|
|
|
|
mutex_init(&priv->buffer_mutex);
|
|
|
|
setup_timer(&priv->user_read_timer, user_reader_timeout,
|
|
|
|
(unsigned long)priv);
|
|
|
|
INIT_WORK(&priv->work, timeout_work);
|
2013-11-27 03:30:40 +07:00
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
file->private_data = priv;
|
2013-11-27 03:30:40 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tpm_read(struct file *file, char __user *buf,
|
|
|
|
size_t size, loff_t *off)
|
|
|
|
{
|
2013-11-27 03:30:45 +07:00
|
|
|
struct file_priv *priv = file->private_data;
|
2013-11-27 03:30:40 +07:00
|
|
|
ssize_t ret_size;
|
|
|
|
int rc;
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
del_singleshot_timer_sync(&priv->user_read_timer);
|
|
|
|
flush_work(&priv->work);
|
|
|
|
ret_size = atomic_read(&priv->data_pending);
|
2013-11-27 03:30:40 +07:00
|
|
|
if (ret_size > 0) { /* relay data */
|
|
|
|
ssize_t orig_ret_size = ret_size;
|
|
|
|
if (size < ret_size)
|
|
|
|
ret_size = size;
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
mutex_lock(&priv->buffer_mutex);
|
|
|
|
rc = copy_to_user(buf, priv->data_buffer, ret_size);
|
|
|
|
memset(priv->data_buffer, 0, orig_ret_size);
|
2013-11-27 03:30:40 +07:00
|
|
|
if (rc)
|
|
|
|
ret_size = -EFAULT;
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 03:30:40 +07:00
|
|
|
}
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
atomic_set(&priv->data_pending, 0);
|
2013-11-27 03:30:40 +07:00
|
|
|
|
|
|
|
return ret_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t tpm_write(struct file *file, const char __user *buf,
|
|
|
|
size_t size, loff_t *off)
|
|
|
|
{
|
2013-11-27 03:30:45 +07:00
|
|
|
struct file_priv *priv = file->private_data;
|
2013-11-27 03:30:40 +07:00
|
|
|
size_t in_size = size;
|
|
|
|
ssize_t out_size;
|
|
|
|
|
|
|
|
/* cannot perform a write until the read has cleared
|
|
|
|
either via tpm_read or a user_read_timer timeout.
|
|
|
|
This also prevents splitted buffered writes from blocking here.
|
|
|
|
*/
|
2013-11-27 03:30:45 +07:00
|
|
|
if (atomic_read(&priv->data_pending) != 0)
|
2013-11-27 03:30:40 +07:00
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
if (in_size > TPM_BUFSIZE)
|
|
|
|
return -E2BIG;
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
mutex_lock(&priv->buffer_mutex);
|
2013-11-27 03:30:40 +07:00
|
|
|
|
|
|
|
if (copy_from_user
|
2013-11-27 03:30:45 +07:00
|
|
|
(priv->data_buffer, (void __user *) buf, in_size)) {
|
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 03:30:40 +07:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2016-02-13 10:29:53 +07:00
|
|
|
/* atomic tpm command send and result receive. We only hold the ops
|
|
|
|
* lock during this period so that the tpm can be unregistered even if
|
|
|
|
* the char dev is held open.
|
|
|
|
*/
|
|
|
|
if (tpm_try_get_ops(priv->chip)) {
|
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
|
|
|
return -EPIPE;
|
|
|
|
}
|
2013-11-27 03:30:45 +07:00
|
|
|
out_size = tpm_transmit(priv->chip, priv->data_buffer,
|
2016-08-17 02:00:38 +07:00
|
|
|
sizeof(priv->data_buffer), 0);
|
2016-02-13 10:29:53 +07:00
|
|
|
|
|
|
|
tpm_put_ops(priv->chip);
|
2013-11-27 03:30:40 +07:00
|
|
|
if (out_size < 0) {
|
2013-11-27 03:30:45 +07:00
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 03:30:40 +07:00
|
|
|
return out_size;
|
|
|
|
}
|
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
atomic_set(&priv->data_pending, out_size);
|
|
|
|
mutex_unlock(&priv->buffer_mutex);
|
2013-11-27 03:30:40 +07:00
|
|
|
|
|
|
|
/* Set a timeout by which the reader must come claim the result */
|
2017-01-24 07:06:08 +07:00
|
|
|
mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
|
2013-11-27 03:30:40 +07:00
|
|
|
|
|
|
|
return in_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called on file close
|
|
|
|
*/
|
|
|
|
static int tpm_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
2013-11-27 03:30:45 +07:00
|
|
|
struct file_priv *priv = file->private_data;
|
2013-11-27 03:30:40 +07:00
|
|
|
|
2013-11-27 03:30:45 +07:00
|
|
|
del_singleshot_timer_sync(&priv->user_read_timer);
|
|
|
|
flush_work(&priv->work);
|
2013-11-27 03:30:40 +07:00
|
|
|
file->private_data = NULL;
|
2013-11-27 03:30:45 +07:00
|
|
|
atomic_set(&priv->data_pending, 0);
|
|
|
|
clear_bit(0, &priv->chip->is_open);
|
|
|
|
kfree(priv);
|
2013-11-27 03:30:40 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-12-13 02:46:37 +07:00
|
|
|
const struct file_operations tpm_fops = {
|
2013-11-27 03:30:40 +07:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.llseek = no_llseek,
|
|
|
|
.open = tpm_open,
|
|
|
|
.read = tpm_read,
|
|
|
|
.write = tpm_write,
|
|
|
|
.release = tpm_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
|