2006-01-18 16:30:29 +07:00
|
|
|
/******************************************************************************
|
|
|
|
*******************************************************************************
|
|
|
|
**
|
|
|
|
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
2007-05-18 20:59:31 +07:00
|
|
|
** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
|
2006-01-18 16:30:29 +07:00
|
|
|
**
|
|
|
|
** This copyrighted material is made available to anyone wishing to use,
|
|
|
|
** modify, copy, or redistribute it subject to the terms and conditions
|
|
|
|
** of the GNU General Public License v.2.
|
|
|
|
**
|
|
|
|
*******************************************************************************
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
#include "dlm_internal.h"
|
|
|
|
#include "lockspace.h"
|
|
|
|
#include "lock.h"
|
2006-07-13 04:44:04 +07:00
|
|
|
#include "user.h"
|
2006-01-18 16:30:29 +07:00
|
|
|
#include "memory.h"
|
|
|
|
#include "config.h"
|
2012-07-27 00:44:30 +07:00
|
|
|
#include "lowcomms.h"
|
2006-01-18 16:30:29 +07:00
|
|
|
|
|
|
|
static int __init init_dlm(void)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = dlm_memory_init();
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
error = dlm_lockspace_init();
|
|
|
|
if (error)
|
|
|
|
goto out_mem;
|
|
|
|
|
|
|
|
error = dlm_config_init();
|
|
|
|
if (error)
|
|
|
|
goto out_lockspace;
|
|
|
|
|
|
|
|
error = dlm_register_debugfs();
|
|
|
|
if (error)
|
|
|
|
goto out_config;
|
|
|
|
|
2006-07-13 04:44:04 +07:00
|
|
|
error = dlm_user_init();
|
|
|
|
if (error)
|
[DLM] Clean up lowcomms
This fixes up most of the things pointed out by akpm and Pavel Machek
with comments below indicating why some things have been left:
Andrew Morton wrote:
>
>> +static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
>> +{
>> + struct nodeinfo *ni;
>> + int r;
>> + int n;
>> +
>> + down_read(&nodeinfo_lock);
>
> Given that this function can sleep, I wonder if `alloc' is useful.
>
> I see lots of callers passing in a literal "0" for `alloc'. That's in fact
> a secret (GFP_ATOMIC & ~__GFP_HIGH). I doubt if that's what you really
> meant. Particularly as the code could at least have used __GFP_WAIT (aka
> GFP_NOIO) which is much, much more reliable than "0". In fact "0" is the
> least reliable mode possible.
>
> IOW, this is all bollixed up.
When 0 is passed into nodeid2nodeinfo the function does not try to allocate a
new structure at all. it's an indication that the caller only wants the nodeinfo
struct for that nodeid if there actually is one in existance.
I've tidied the function itself so it's more obvious, (and tidier!)
>> +/* Data received from remote end */
>> +static int receive_from_sock(void)
>> +{
>> + int ret = 0;
>> + struct msghdr msg;
>> + struct kvec iov[2];
>> + unsigned len;
>> + int r;
>> + struct sctp_sndrcvinfo *sinfo;
>> + struct cmsghdr *cmsg;
>> + struct nodeinfo *ni;
>> +
>> + /* These two are marginally too big for stack allocation, but this
>> + * function is (currently) only called by dlm_recvd so static should be
>> + * OK.
>> + */
>> + static struct sockaddr_storage msgname;
>> + static char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
>
> whoa. This is globally singly-threaded code??
Yes. it is only ever run in the context of dlm_recvd.
>>
>> +static void initiate_association(int nodeid)
>> +{
>> + struct sockaddr_storage rem_addr;
>> + static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
>
> Another static buffer to worry about. Globally singly-threaded code?
Yes. Only ever called by dlm_sendd.
>> +
>> +/* Send a message */
>> +static int send_to_sock(struct nodeinfo *ni)
>> +{
>> + int ret = 0;
>> + struct writequeue_entry *e;
>> + int len, offset;
>> + struct msghdr outmsg;
>> + static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
>
> Singly-threaded?
Yep.
>>
>> +static void dealloc_nodeinfo(void)
>> +{
>> + int i;
>> +
>> + for (i=1; i<=max_nodeid; i++) {
>> + struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
>> + if (ni) {
>> + idr_remove(&nodeinfo_idr, i);
>
> Didn't that need locking?
Not. it's only ever called at DLM shutdown after all the other threads
have been stopped.
>>
>> +static int write_list_empty(void)
>> +{
>> + int status;
>> +
>> + spin_lock_bh(&write_nodes_lock);
>> + status = list_empty(&write_nodes);
>> + spin_unlock_bh(&write_nodes_lock);
>> +
>> + return status;
>> +}
>
> This function's return value is meaningless. As soon as the lock gets
> dropped, the return value can get out of sync with reality.
>
> Looking at the caller, this _might_ happen to be OK, but it's a nasty and
> dangerous thing. Really the locking should be moved into the caller.
It's just an optimisation to allow the caller to schedule if there is no work
to do. if something arrives immediately afterwards then it will get picked up
when the process re-awakes (and it will be woken by that arrival).
The 'accepting' atomic has gone completely. as Andrew pointed out it didn't
really achieve much anyway. I suspect it was a plaster over some other
startup or shutdown bug to be honest.
Signed-off-by: Patrick Caulfield <pcaulfie@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Cc: Andrew Morton <akpm@osdl.org>
Cc: Pavel Machek <pavel@ucw.cz>
2006-12-06 22:10:37 +07:00
|
|
|
goto out_debug;
|
2006-07-13 04:44:04 +07:00
|
|
|
|
2007-05-18 20:59:31 +07:00
|
|
|
error = dlm_netlink_init();
|
|
|
|
if (error)
|
|
|
|
goto out_user;
|
|
|
|
|
2008-03-15 03:09:15 +07:00
|
|
|
error = dlm_plock_init();
|
|
|
|
if (error)
|
|
|
|
goto out_netlink;
|
|
|
|
|
2011-04-01 17:41:20 +07:00
|
|
|
printk("DLM installed\n");
|
2006-01-18 16:30:29 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2008-03-15 03:09:15 +07:00
|
|
|
out_netlink:
|
|
|
|
dlm_netlink_exit();
|
2007-05-18 20:59:31 +07:00
|
|
|
out_user:
|
|
|
|
dlm_user_exit();
|
2006-01-18 16:30:29 +07:00
|
|
|
out_debug:
|
|
|
|
dlm_unregister_debugfs();
|
|
|
|
out_config:
|
|
|
|
dlm_config_exit();
|
|
|
|
out_lockspace:
|
|
|
|
dlm_lockspace_exit();
|
|
|
|
out_mem:
|
|
|
|
dlm_memory_exit();
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit exit_dlm(void)
|
|
|
|
{
|
2008-03-15 03:09:15 +07:00
|
|
|
dlm_plock_exit();
|
2007-05-18 20:59:31 +07:00
|
|
|
dlm_netlink_exit();
|
2006-07-13 04:44:04 +07:00
|
|
|
dlm_user_exit();
|
2006-01-18 16:30:29 +07:00
|
|
|
dlm_config_exit();
|
|
|
|
dlm_memory_exit();
|
|
|
|
dlm_lockspace_exit();
|
2012-07-27 00:44:30 +07:00
|
|
|
dlm_lowcomms_exit();
|
2006-01-18 16:30:29 +07:00
|
|
|
dlm_unregister_debugfs();
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(init_dlm);
|
|
|
|
module_exit(exit_dlm);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Distributed Lock Manager");
|
|
|
|
MODULE_AUTHOR("Red Hat, Inc.");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(dlm_new_lockspace);
|
|
|
|
EXPORT_SYMBOL_GPL(dlm_release_lockspace);
|
|
|
|
EXPORT_SYMBOL_GPL(dlm_lock);
|
|
|
|
EXPORT_SYMBOL_GPL(dlm_unlock);
|
|
|
|
|