[S390] appldata: avoid deadlock with appldata_mem

The appldata_ops callbacks are called with a spin_lock held. But the
appldata_mem callback then calls all_vm_events(), which calls
get_online_cpus(), which might sleep. This possible deadlock is fixed
by using a mutex instead of a spin_lock.

Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Gerald Schaefer 2009-04-23 13:58:07 +02:00 committed by Martin Schwidefsky
parent 3bd5f3ef29
commit b1ad171efa
2 changed files with 17 additions and 17 deletions

View File

@ -98,7 +98,7 @@ static DECLARE_WORK(appldata_work, appldata_work_fn);
/*
* Ops list
*/
static DEFINE_SPINLOCK(appldata_ops_lock);
static DEFINE_MUTEX(appldata_ops_mutex);
static LIST_HEAD(appldata_ops_list);
@ -129,14 +129,14 @@ static void appldata_work_fn(struct work_struct *work)
i = 0;
get_online_cpus();
spin_lock(&appldata_ops_lock);
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list);
if (ops->active == 1) {
ops->callback(ops->data);
}
}
spin_unlock(&appldata_ops_lock);
mutex_unlock(&appldata_ops_mutex);
put_online_cpus();
}
@ -338,7 +338,7 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
struct list_head *lh;
found = 0;
spin_lock(&appldata_ops_lock);
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
tmp_ops = list_entry(lh, struct appldata_ops, list);
if (&tmp_ops->ctl_table[2] == ctl) {
@ -346,15 +346,15 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
}
}
if (!found) {
spin_unlock(&appldata_ops_lock);
mutex_unlock(&appldata_ops_mutex);
return -ENODEV;
}
ops = ctl->data;
if (!try_module_get(ops->owner)) { // protect this function
spin_unlock(&appldata_ops_lock);
mutex_unlock(&appldata_ops_mutex);
return -ENODEV;
}
spin_unlock(&appldata_ops_lock);
mutex_unlock(&appldata_ops_mutex);
if (!*lenp || *ppos) {
*lenp = 0;
@ -378,11 +378,11 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
return -EFAULT;
}
spin_lock(&appldata_ops_lock);
mutex_lock(&appldata_ops_mutex);
if ((buf[0] == '1') && (ops->active == 0)) {
// protect work queue callback
if (!try_module_get(ops->owner)) {
spin_unlock(&appldata_ops_lock);
mutex_unlock(&appldata_ops_mutex);
module_put(ops->owner);
return -ENODEV;
}
@ -407,7 +407,7 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
"failed with rc=%d\n", ops->name, rc);
module_put(ops->owner);
}
spin_unlock(&appldata_ops_lock);
mutex_unlock(&appldata_ops_mutex);
out:
*lenp = len;
*ppos += len;
@ -433,9 +433,9 @@ int appldata_register_ops(struct appldata_ops *ops)
if (!ops->ctl_table)
return -ENOMEM;
spin_lock(&appldata_ops_lock);
mutex_lock(&appldata_ops_mutex);
list_add(&ops->list, &appldata_ops_list);
spin_unlock(&appldata_ops_lock);
mutex_unlock(&appldata_ops_mutex);
ops->ctl_table[0].procname = appldata_proc_name;
ops->ctl_table[0].maxlen = 0;
@ -452,9 +452,9 @@ int appldata_register_ops(struct appldata_ops *ops)
goto out;
return 0;
out:
spin_lock(&appldata_ops_lock);
mutex_lock(&appldata_ops_mutex);
list_del(&ops->list);
spin_unlock(&appldata_ops_lock);
mutex_unlock(&appldata_ops_mutex);
kfree(ops->ctl_table);
return -ENOMEM;
}
@ -466,9 +466,9 @@ int appldata_register_ops(struct appldata_ops *ops)
*/
void appldata_unregister_ops(struct appldata_ops *ops)
{
spin_lock(&appldata_ops_lock);
mutex_lock(&appldata_ops_mutex);
list_del(&ops->list);
spin_unlock(&appldata_ops_lock);
mutex_unlock(&appldata_ops_mutex);
unregister_sysctl_table(ops->sysctl_header);
kfree(ops->ctl_table);
}

View File

@ -78,7 +78,7 @@ static void appldata_get_mem_data(void *data)
{
/*
* don't put large structures on the stack, we are
* serialized through the appldata_ops_lock and can use static
* serialized through the appldata_ops_mutex and can use static
*/
static struct sysinfo val;
unsigned long ev[NR_VM_EVENT_ITEMS];