mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 08:30:52 +07:00
lockd: Fix Oopses due to list manipulation errors.
The patch "stop abusing file_lock_list introduces a couple of bugs since the locks may be copied and need to be removed from the lists when they are destroyed. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
parent
26bcbf965f
commit
4c060b5310
@ -213,11 +213,12 @@ reclaimer(void *ptr)
|
||||
/* First, reclaim all locks that have been marked. */
|
||||
restart:
|
||||
list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
|
||||
list_del(&fl->fl_u.nfs_fl.list);
|
||||
list_del_init(&fl->fl_u.nfs_fl.list);
|
||||
|
||||
nlmclnt_reclaim(host, fl);
|
||||
if (signalled())
|
||||
break;
|
||||
continue;
|
||||
if (nlmclnt_reclaim(host, fl) == 0)
|
||||
list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
|
@ -446,12 +446,14 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
|
||||
|
||||
static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
|
||||
{
|
||||
memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl));
|
||||
nlm_get_lockowner(new->fl_u.nfs_fl.owner);
|
||||
new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
|
||||
new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
|
||||
list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
|
||||
}
|
||||
|
||||
static void nlmclnt_locks_release_private(struct file_lock *fl)
|
||||
{
|
||||
list_del(&fl->fl_u.nfs_fl.list);
|
||||
nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
|
||||
fl->fl_ops = NULL;
|
||||
}
|
||||
@ -466,6 +468,7 @@ static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *ho
|
||||
BUG_ON(fl->fl_ops != NULL);
|
||||
fl->fl_u.nfs_fl.state = 0;
|
||||
fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
|
||||
INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
|
||||
fl->fl_ops = &nlmclnt_lock_ops;
|
||||
}
|
||||
|
||||
@ -552,7 +555,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
|
||||
if (resp->status == NLM_LCK_GRANTED) {
|
||||
fl->fl_u.nfs_fl.state = host->h_state;
|
||||
fl->fl_flags |= FL_SLEEP;
|
||||
list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
|
||||
/* Ensure the resulting lock will get added to granted list */
|
||||
do_vfs_lock(fl);
|
||||
}
|
||||
status = nlm_stat_to_errno(resp->status);
|
||||
@ -618,12 +621,6 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
|
||||
struct nlm_res *resp = &req->a_res;
|
||||
int status;
|
||||
|
||||
/*
|
||||
* Remove from the granted list now so the lock doesn't get
|
||||
* reclaimed while we're stuck in the unlock call.
|
||||
*/
|
||||
list_del(&fl->fl_u.nfs_fl.list);
|
||||
|
||||
/*
|
||||
* Note: the server is supposed to either grant us the unlock
|
||||
* request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
|
||||
|
@ -245,8 +245,12 @@ void nlm_release_host(struct nlm_host *host)
|
||||
{
|
||||
if (host != NULL) {
|
||||
dprintk("lockd: release host %s\n", host->h_name);
|
||||
atomic_dec(&host->h_count);
|
||||
BUG_ON(atomic_read(&host->h_count) < 0);
|
||||
if (atomic_dec_and_test(&host->h_count)) {
|
||||
BUG_ON(!list_empty(&host->h_lockowners));
|
||||
BUG_ON(!list_empty(&host->h_granted));
|
||||
BUG_ON(!list_empty(&host->h_reclaim));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -334,7 +338,6 @@ nlm_gc_hosts(void)
|
||||
rpc_destroy_client(host->h_rpcclnt);
|
||||
}
|
||||
}
|
||||
BUG_ON(!list_empty(&host->h_lockowners));
|
||||
kfree(host);
|
||||
nrhosts--;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user