2005-04-17 05:20:36 +07:00
/*
* fs / cifs / misc . c
*
2008-02-08 06:25:02 +07:00
* Copyright ( C ) International Business Machines Corp . , 2002 , 2008
2005-04-17 05:20:36 +07:00
* Author ( s ) : Steve French ( sfrench @ us . ibm . com )
*
* This library is free software ; you can redistribute it and / or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation ; either version 2.1 of the License , or
* ( at your option ) any later version .
*
* This library is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See
* the GNU Lesser General Public License for more details .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library ; if not , write to the Free Software
2007-07-10 08:16:18 +07:00
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
2005-04-17 05:20:36 +07:00
*/
# include <linux/slab.h>
# include <linux/ctype.h>
# include <linux/mempool.h>
# include "cifspdu.h"
# include "cifsglob.h"
# include "cifsproto.h"
# include "cifs_debug.h"
# include "smberr.h"
# include "nterr.h"
2005-04-29 12:41:06 +07:00
# include "cifs_unicode.h"
2012-01-13 01:40:50 +07:00
# ifdef CONFIG_CIFS_SMB2
# include "smb2pdu.h"
# endif
2005-04-17 05:20:36 +07:00
extern mempool_t * cifs_sm_req_poolp ;
extern mempool_t * cifs_req_poolp ;
2007-07-10 08:16:18 +07:00
/* The xid serves as a useful identifier for each incoming vfs request,
in a similar way to the mid which is useful to track each sent smb ,
and CurrentXid can also provide a running counter ( although it
will eventually wrap past zero ) of the total vfs operations handled
2005-04-17 05:20:36 +07:00
since the cifs fs was mounted */
unsigned int
2012-06-20 14:21:16 +07:00
_get_xid ( void )
2005-04-17 05:20:36 +07:00
{
unsigned int xid ;
spin_lock ( & GlobalMid_Lock ) ;
GlobalTotalActiveXid + + ;
2007-07-13 07:33:32 +07:00
/* keep high water mark for number of simultaneous ops in filesystem */
2005-04-17 05:20:36 +07:00
if ( GlobalTotalActiveXid > GlobalMaxActiveXid )
2007-07-13 07:33:32 +07:00
GlobalMaxActiveXid = GlobalTotalActiveXid ;
2007-07-08 02:25:05 +07:00
if ( GlobalTotalActiveXid > 65000 )
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " warning: more than 65000 requests active \n " ) ;
2005-04-17 05:20:36 +07:00
xid = GlobalCurrentXid + + ;
spin_unlock ( & GlobalMid_Lock ) ;
return xid ;
}
void
2012-06-20 14:21:16 +07:00
_free_xid ( unsigned int xid )
2005-04-17 05:20:36 +07:00
{
spin_lock ( & GlobalMid_Lock ) ;
2007-07-08 02:25:05 +07:00
/* if (GlobalTotalActiveXid == 0)
2005-04-17 05:20:36 +07:00
BUG ( ) ; */
GlobalTotalActiveXid - - ;
spin_unlock ( & GlobalMid_Lock ) ;
}
2011-05-27 11:34:02 +07:00
struct cifs_ses *
2005-04-17 05:20:36 +07:00
sesInfoAlloc ( void )
{
2011-05-27 11:34:02 +07:00
struct cifs_ses * ret_buf ;
2005-04-17 05:20:36 +07:00
2011-05-27 11:34:02 +07:00
ret_buf = kzalloc ( sizeof ( struct cifs_ses ) , GFP_KERNEL ) ;
2005-04-17 05:20:36 +07:00
if ( ret_buf ) {
atomic_inc ( & sesInfoAllocCount ) ;
ret_buf - > status = CifsNew ;
2008-11-15 01:53:46 +07:00
+ + ret_buf - > ses_count ;
INIT_LIST_HEAD ( & ret_buf - > smb_ses_list ) ;
2008-11-15 23:12:47 +07:00
INIT_LIST_HEAD ( & ret_buf - > tcon_list ) ;
2010-02-25 12:36:46 +07:00
mutex_init ( & ret_buf - > session_mutex ) ;
2005-04-17 05:20:36 +07:00
}
return ret_buf ;
}
void
2011-05-27 11:34:02 +07:00
sesInfoFree ( struct cifs_ses * buf_to_free )
2005-04-17 05:20:36 +07:00
{
if ( buf_to_free = = NULL ) {
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " Null buffer passed to sesInfoFree \n " ) ;
2005-04-17 05:20:36 +07:00
return ;
}
atomic_dec ( & sesInfoAllocCount ) ;
2005-11-07 16:01:34 +07:00
kfree ( buf_to_free - > serverOS ) ;
kfree ( buf_to_free - > serverDomain ) ;
kfree ( buf_to_free - > serverNOS ) ;
2008-12-06 08:41:21 +07:00
if ( buf_to_free - > password ) {
memset ( buf_to_free - > password , 0 , strlen ( buf_to_free - > password ) ) ;
kfree ( buf_to_free - > password ) ;
}
2011-02-25 14:11:56 +07:00
kfree ( buf_to_free - > user_name ) ;
2006-06-01 05:40:51 +07:00
kfree ( buf_to_free - > domainName ) ;
2013-08-29 20:35:09 +07:00
kfree ( buf_to_free - > auth_key . response ) ;
2005-04-17 05:20:36 +07:00
kfree ( buf_to_free ) ;
}
2011-05-27 11:34:02 +07:00
struct cifs_tcon *
2005-04-17 05:20:36 +07:00
tconInfoAlloc ( void )
{
2011-05-27 11:34:02 +07:00
struct cifs_tcon * ret_buf ;
ret_buf = kzalloc ( sizeof ( struct cifs_tcon ) , GFP_KERNEL ) ;
2005-04-17 05:20:36 +07:00
if ( ret_buf ) {
atomic_inc ( & tconInfoAllocCount ) ;
ret_buf - > tidStatus = CifsNew ;
2008-11-15 23:12:47 +07:00
+ + ret_buf - > tc_count ;
2005-04-17 05:20:36 +07:00
INIT_LIST_HEAD ( & ret_buf - > openFileList ) ;
2008-11-15 23:12:47 +07:00
INIT_LIST_HEAD ( & ret_buf - > tcon_list ) ;
2005-04-17 05:20:36 +07:00
# ifdef CONFIG_CIFS_STATS
spin_lock_init ( & ret_buf - > stat_lock ) ;
# endif
}
return ret_buf ;
}
void
2011-05-27 11:34:02 +07:00
tconInfoFree ( struct cifs_tcon * buf_to_free )
2005-04-17 05:20:36 +07:00
{
if ( buf_to_free = = NULL ) {
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " Null buffer passed to tconInfoFree \n " ) ;
2005-04-17 05:20:36 +07:00
return ;
}
atomic_dec ( & tconInfoAllocCount ) ;
2005-11-07 16:01:34 +07:00
kfree ( buf_to_free - > nativeFileSystem ) ;
2008-12-06 08:41:21 +07:00
if ( buf_to_free - > password ) {
memset ( buf_to_free - > password , 0 , strlen ( buf_to_free - > password ) ) ;
kfree ( buf_to_free - > password ) ;
}
2005-04-17 05:20:36 +07:00
kfree ( buf_to_free ) ;
}
struct smb_hdr *
cifs_buf_get ( void )
{
struct smb_hdr * ret_buf = NULL ;
2012-01-13 01:40:50 +07:00
size_t buf_size = sizeof ( struct smb_hdr ) ;
# ifdef CONFIG_CIFS_SMB2
/*
* SMB2 header is bigger than CIFS one - no problems to clean some
* more bytes for CIFS .
*/
buf_size = sizeof ( struct smb2_hdr ) ;
# endif
/*
* We could use negotiated size instead of max_msgsize -
* but it may be more efficient to always alloc same size
* albeit slightly larger than necessary and maxbuffersize
* defaults to this and can not be bigger .
*/
2008-09-15 17:22:54 +07:00
ret_buf = mempool_alloc ( cifs_req_poolp , GFP_NOFS ) ;
2005-04-17 05:20:36 +07:00
/* clear the first few header bytes */
/* for most paths, more is cleared in header_assemble */
if ( ret_buf ) {
2012-01-13 01:40:50 +07:00
memset ( ret_buf , 0 , buf_size + 3 ) ;
2005-04-17 05:20:36 +07:00
atomic_inc ( & bufAllocCount ) ;
2005-12-04 04:58:57 +07:00
# ifdef CONFIG_CIFS_STATS2
atomic_inc ( & totBufAllocCount ) ;
# endif /* CONFIG_CIFS_STATS2 */
2005-04-17 05:20:36 +07:00
}
return ret_buf ;
}
void
cifs_buf_release ( void * buf_to_free )
{
if ( buf_to_free = = NULL ) {
2013-05-05 10:12:25 +07:00
/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
2005-04-17 05:20:36 +07:00
return ;
}
2007-07-10 08:16:18 +07:00
mempool_free ( buf_to_free , cifs_req_poolp ) ;
2005-04-17 05:20:36 +07:00
atomic_dec ( & bufAllocCount ) ;
return ;
}
struct smb_hdr *
cifs_small_buf_get ( void )
{
struct smb_hdr * ret_buf = NULL ;
2007-07-10 08:16:18 +07:00
/* We could use negotiated size instead of max_msgsize -
but it may be more efficient to always alloc same size
albeit slightly larger than necessary and maxbuffersize
2005-04-17 05:20:36 +07:00
defaults to this and can not be bigger */
2008-09-15 17:22:54 +07:00
ret_buf = mempool_alloc ( cifs_sm_req_poolp , GFP_NOFS ) ;
2005-04-17 05:20:36 +07:00
if ( ret_buf ) {
/* No need to clear memory here, cleared in header assemble */
/* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
atomic_inc ( & smBufAllocCount ) ;
2005-12-04 04:58:57 +07:00
# ifdef CONFIG_CIFS_STATS2
atomic_inc ( & totSmBufAllocCount ) ;
# endif /* CONFIG_CIFS_STATS2 */
2005-04-17 05:20:36 +07:00
}
return ret_buf ;
}
void
cifs_small_buf_release ( void * buf_to_free )
{
if ( buf_to_free = = NULL ) {
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " Null buffer passed to cifs_small_buf_release \n " ) ;
2005-04-17 05:20:36 +07:00
return ;
}
2007-07-10 08:16:18 +07:00
mempool_free ( buf_to_free , cifs_sm_req_poolp ) ;
2005-04-17 05:20:36 +07:00
atomic_dec ( & smBufAllocCount ) ;
return ;
}
2005-08-18 02:38:22 +07:00
/* NB: MID can not be set if treeCon not passed in, in that
case it is responsbility of caller to set the mid */
2005-04-17 05:20:36 +07:00
void
header_assemble ( struct smb_hdr * buffer , char smb_command /* command */ ,
2011-05-27 11:34:02 +07:00
const struct cifs_tcon * treeCon , int word_count
2005-04-17 05:20:36 +07:00
/* length of fixed section (word count) in two byte units */ )
{
char * temp = ( char * ) buffer ;
2007-07-10 08:16:18 +07:00
memset ( temp , 0 , 256 ) ; /* bigger than MAX_CIFS_HDR_SIZE */
2005-04-17 05:20:36 +07:00
2011-04-29 12:40:20 +07:00
buffer - > smb_buf_length = cpu_to_be32 (
2007-10-26 04:17:17 +07:00
( 2 * word_count ) + sizeof ( struct smb_hdr ) -
2005-04-17 05:20:36 +07:00
4 /* RFC 1001 length field does not count */ +
2011-04-29 12:40:20 +07:00
2 /* for bcc field itself */ ) ;
2005-04-17 05:20:36 +07:00
buffer - > Protocol [ 0 ] = 0xFF ;
buffer - > Protocol [ 1 ] = ' S ' ;
buffer - > Protocol [ 2 ] = ' M ' ;
buffer - > Protocol [ 3 ] = ' B ' ;
buffer - > Command = smb_command ;
buffer - > Flags = 0x00 ; /* case sensitive */
buffer - > Flags2 = SMBFLG2_KNOWS_LONG_NAMES ;
buffer - > Pid = cpu_to_le16 ( ( __u16 ) current - > tgid ) ;
buffer - > PidHigh = cpu_to_le16 ( ( __u16 ) ( current - > tgid > > 16 ) ) ;
if ( treeCon ) {
buffer - > Tid = treeCon - > tid ;
if ( treeCon - > ses ) {
if ( treeCon - > ses - > capabilities & CAP_UNICODE )
buffer - > Flags2 | = SMBFLG2_UNICODE ;
2008-02-08 06:25:02 +07:00
if ( treeCon - > ses - > capabilities & CAP_STATUS32 )
2005-04-17 05:20:36 +07:00
buffer - > Flags2 | = SMBFLG2_ERR_STATUS ;
2008-02-08 06:25:02 +07:00
2005-08-18 02:38:22 +07:00
/* Uid is not converted */
buffer - > Uid = treeCon - > ses - > Suid ;
2012-05-23 17:01:59 +07:00
buffer - > Mid = get_next_mid ( treeCon - > ses - > server ) ;
2005-04-17 05:20:36 +07:00
}
if ( treeCon - > Flags & SMB_SHARE_IS_IN_DFS )
buffer - > Flags2 | = SMBFLG2_DFS ;
2005-08-20 01:04:29 +07:00
if ( treeCon - > nocase )
buffer - > Flags | = SMBFLG_CASELESS ;
2007-07-08 02:25:05 +07:00
if ( ( treeCon - > ses ) & & ( treeCon - > ses - > server ) )
2013-05-26 18:01:00 +07:00
if ( treeCon - > ses - > server - > sign )
2005-04-17 05:20:36 +07:00
buffer - > Flags2 | = SMBFLG2_SECURITY_SIGNATURE ;
}
/* endian conversion of flags is now done just before sending */
buffer - > WordCount = ( char ) word_count ;
return ;
}
2006-09-29 02:43:08 +07:00
static int
2013-10-16 22:09:49 +07:00
check_smb_hdr ( struct smb_hdr * smb )
2005-04-17 05:20:36 +07:00
{
2011-01-29 03:05:42 +07:00
/* does it have the right SMB "signature" ? */
if ( * ( __le32 * ) smb - > Protocol ! = cpu_to_le32 ( 0x424d53ff ) ) {
2013-05-05 10:12:25 +07:00
cifs_dbg ( VFS , " Bad protocol string signature header 0x%x \n " ,
* ( unsigned int * ) smb - > Protocol ) ;
2011-01-29 03:05:42 +07:00
return 1 ;
}
/* if it's a response then accept */
if ( smb - > Flags & SMBFLG_RESPONSE )
return 0 ;
/* only one valid case where server sends us request */
if ( smb - > Command = = SMB_COM_LOCKING_ANDX )
return 0 ;
2013-11-03 00:50:34 +07:00
cifs_dbg ( VFS , " Server sent request, not response. mid=%u \n " ,
get_mid ( smb ) ) ;
2005-04-17 05:20:36 +07:00
return 1 ;
}
int
2012-03-24 01:28:02 +07:00
checkSMB ( char * buf , unsigned int total_read )
2005-04-17 05:20:36 +07:00
{
2012-03-24 01:28:02 +07:00
struct smb_hdr * smb = ( struct smb_hdr * ) buf ;
2011-10-11 17:41:32 +07:00
__u32 rfclen = be32_to_cpu ( smb - > smb_buf_length ) ;
2005-10-11 01:48:26 +07:00
__u32 clc_len ; /* calculated length */
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " checkSMB Length: 0x%x, smb_buf_length: 0x%x \n " ,
total_read , rfclen ) ;
2006-10-13 00:49:24 +07:00
2011-10-11 17:41:32 +07:00
/* is this frame too small to even get to a BCC? */
if ( total_read < 2 + sizeof ( struct smb_hdr ) ) {
if ( ( total_read > = sizeof ( struct smb_hdr ) - 1 )
2005-04-17 05:20:36 +07:00
& & ( smb - > Status . CifsError ! = 0 ) ) {
2011-10-11 17:41:32 +07:00
/* it's an error return */
2006-10-13 00:49:24 +07:00
smb - > WordCount = 0 ;
/* some error cases do not return wct and bcc */
return 0 ;
2011-10-11 17:41:32 +07:00
} else if ( ( total_read = = sizeof ( struct smb_hdr ) + 1 ) & &
2006-10-13 00:49:24 +07:00
( smb - > WordCount = = 0 ) ) {
2007-07-10 08:16:18 +07:00
char * tmp = ( char * ) smb ;
2006-10-13 00:49:24 +07:00
/* Need to work around a bug in two servers here */
/* First, check if the part of bcc they sent was zero */
if ( tmp [ sizeof ( struct smb_hdr ) ] = = 0 ) {
/* some servers return only half of bcc
* on simple responses ( wct , bcc both zero )
* in particular have seen this on
* ulogoffX and FindClose . This leaves
* one byte of bcc potentially unitialized
*/
/* zero rest of bcc */
tmp [ sizeof ( struct smb_hdr ) + 1 ] = 0 ;
2006-03-02 07:07:08 +07:00
return 0 ;
2005-04-17 05:20:36 +07:00
}
2013-05-05 10:12:25 +07:00
cifs_dbg ( VFS , " rcvd invalid byte count (bcc) \n " ) ;
2006-10-13 00:49:24 +07:00
} else {
2013-05-05 10:12:25 +07:00
cifs_dbg ( VFS , " Length less than smb header size \n " ) ;
2005-04-17 05:20:36 +07:00
}
2011-10-11 17:41:32 +07:00
return - EIO ;
2005-04-17 05:20:36 +07:00
}
2011-10-11 17:41:32 +07:00
/* otherwise, there is enough to get to the BCC */
2013-10-16 22:09:49 +07:00
if ( check_smb_hdr ( smb ) )
2011-10-11 17:41:32 +07:00
return - EIO ;
2011-05-04 19:05:26 +07:00
clc_len = smbCalcSize ( smb ) ;
2006-02-24 13:15:11 +07:00
2011-10-11 17:41:32 +07:00
if ( 4 + rfclen ! = total_read ) {
2013-05-05 10:12:25 +07:00
cifs_dbg ( VFS , " Length read does not match RFC1001 length %d \n " ,
rfclen ) ;
2011-10-11 17:41:32 +07:00
return - EIO ;
2006-02-24 13:15:11 +07:00
}
2011-10-11 17:41:32 +07:00
if ( 4 + rfclen ! = clc_len ) {
2013-11-03 00:50:34 +07:00
__u16 mid = get_mid ( smb ) ;
2006-02-24 13:15:11 +07:00
/* check if bcc wrapped around for large read responses */
2011-10-11 17:41:32 +07:00
if ( ( rfclen > 64 * 1024 ) & & ( rfclen > clc_len ) ) {
2006-02-24 13:15:11 +07:00
/* check if lengths match mod 64K */
2011-10-11 17:41:32 +07:00
if ( ( ( 4 + rfclen ) & 0xFFFF ) = = ( clc_len & 0xFFFF ) )
2007-07-10 08:16:18 +07:00
return 0 ; /* bcc wrapped */
2006-02-24 13:15:11 +07:00
}
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " Calculated size %u vs length %u mismatch for mid=%u \n " ,
2013-11-03 00:50:34 +07:00
clc_len , 4 + rfclen , mid ) ;
2011-01-31 21:14:17 +07:00
2011-10-11 17:41:32 +07:00
if ( 4 + rfclen < clc_len ) {
2013-05-05 10:12:25 +07:00
cifs_dbg ( VFS , " RFC1001 size %u smaller than SMB for mid=%u \n " ,
2013-11-03 00:50:34 +07:00
rfclen , mid ) ;
2011-10-11 17:41:32 +07:00
return - EIO ;
} else if ( rfclen > clc_len + 512 ) {
2011-01-31 21:14:17 +07:00
/*
* Some servers ( Windows XP in particular ) send more
* data than the lengths in the SMB packet would
* indicate on certain calls ( byte range locks and
* trans2 find first calls in particular ) . While the
* client can handle such a frame by ignoring the
* trailing data , we choose limit the amount of extra
* data to 512 bytes .
*/
2013-05-05 10:12:25 +07:00
cifs_dbg ( VFS , " RFC1001 size %u more than 512 bytes larger than SMB for mid=%u \n " ,
2013-11-03 00:50:34 +07:00
rfclen , mid ) ;
2011-10-11 17:41:32 +07:00
return - EIO ;
2006-03-02 07:07:08 +07:00
}
2005-04-17 05:20:36 +07:00
}
2005-09-22 12:05:57 +07:00
return 0 ;
2005-04-17 05:20:36 +07:00
}
2008-04-29 07:06:05 +07:00
bool
2012-03-24 01:28:02 +07:00
is_valid_oplock_break ( char * buffer , struct TCP_Server_Info * srv )
2007-07-10 08:16:18 +07:00
{
2012-03-24 01:28:02 +07:00
struct smb_hdr * buf = ( struct smb_hdr * ) buffer ;
2007-07-10 08:16:18 +07:00
struct smb_com_lock_req * pSMB = ( struct smb_com_lock_req * ) buf ;
2008-11-15 23:12:47 +07:00
struct list_head * tmp , * tmp1 , * tmp2 ;
2011-05-27 11:34:02 +07:00
struct cifs_ses * ses ;
struct cifs_tcon * tcon ;
2008-11-15 23:12:47 +07:00
struct cifsInodeInfo * pCifsInode ;
2005-04-17 05:20:36 +07:00
struct cifsFileInfo * netfile ;
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " Checking for oplock break or dnotify response \n " ) ;
2007-07-08 02:25:05 +07:00
if ( ( pSMB - > hdr . Command = = SMB_COM_NT_TRANSACT ) & &
2005-04-17 05:20:36 +07:00
( pSMB - > hdr . Flags & SMBFLG_RESPONSE ) ) {
2007-07-10 08:16:18 +07:00
struct smb_com_transaction_change_notify_rsp * pSMBr =
2005-04-17 05:20:36 +07:00
( struct smb_com_transaction_change_notify_rsp * ) buf ;
2007-07-10 08:16:18 +07:00
struct file_notify_information * pnotify ;
2005-04-17 05:20:36 +07:00
__u32 data_offset = 0 ;
2011-05-04 19:05:26 +07:00
if ( get_bcc ( buf ) > sizeof ( struct file_notify_information ) ) {
2005-04-17 05:20:36 +07:00
data_offset = le32_to_cpu ( pSMBr - > DataOffset ) ;
2006-06-01 05:40:51 +07:00
pnotify = ( struct file_notify_information * )
( ( char * ) & pSMBr - > hdr . Protocol + data_offset ) ;
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " dnotify on %s Action: 0x%x \n " ,
2010-04-21 10:50:45 +07:00
pnotify - > FileName , pnotify - > Action ) ;
2007-07-10 08:16:18 +07:00
/* cifs_dump_mem("Rcvd notify Data: ",buf,
2006-06-01 05:40:51 +07:00
sizeof ( struct smb_hdr ) + 60 ) ; */
2008-04-29 07:06:05 +07:00
return true ;
2005-04-17 05:20:36 +07:00
}
2007-07-08 02:25:05 +07:00
if ( pSMBr - > hdr . Status . CifsError ) {
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " notify err 0x%d \n " ,
pSMBr - > hdr . Status . CifsError ) ;
2008-04-29 07:06:05 +07:00
return true ;
2005-04-17 05:20:36 +07:00
}
2008-04-29 07:06:05 +07:00
return false ;
2007-07-10 08:16:18 +07:00
}
2007-07-08 02:25:05 +07:00
if ( pSMB - > hdr . Command ! = SMB_COM_LOCKING_ANDX )
2008-04-29 07:06:05 +07:00
return false ;
2007-07-08 02:25:05 +07:00
if ( pSMB - > hdr . Flags & SMBFLG_RESPONSE ) {
2005-04-17 05:20:36 +07:00
/* no sense logging error on invalid handle on oplock
break - harmless race between close request and oplock
break response is expected from time to time writing out
large dirty files cached on the client */
2007-07-10 08:16:18 +07:00
if ( ( NT_STATUS_INVALID_HANDLE ) = =
le32_to_cpu ( pSMB - > hdr . Status . CifsError ) ) {
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " invalid handle on oplock break \n " ) ;
2008-04-29 07:06:05 +07:00
return true ;
2007-07-10 08:16:18 +07:00
} else if ( ERRbadfid = =
2005-04-17 05:20:36 +07:00
le16_to_cpu ( pSMB - > hdr . Status . DosError . Error ) ) {
2008-04-29 07:06:05 +07:00
return true ;
2005-04-17 05:20:36 +07:00
} else {
2008-04-29 07:06:05 +07:00
return false ; /* on valid oplock brk we get "request" */
2005-04-17 05:20:36 +07:00
}
}
2007-07-08 02:25:05 +07:00
if ( pSMB - > hdr . WordCount ! = 8 )
2008-04-29 07:06:05 +07:00
return false ;
2005-04-17 05:20:36 +07:00
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " oplock type 0x%d level 0x%d \n " ,
2010-04-21 10:50:45 +07:00
pSMB - > LockType , pSMB - > OplockLevel ) ;
2007-07-08 02:25:05 +07:00
if ( ! ( pSMB - > LockType & LOCKING_ANDX_OPLOCK_RELEASE ) )
2008-04-29 07:06:05 +07:00
return false ;
2005-04-17 05:20:36 +07:00
/* look up tcon based on tid & uid */
2010-10-19 00:59:37 +07:00
spin_lock ( & cifs_tcp_ses_lock ) ;
2008-11-15 23:12:47 +07:00
list_for_each ( tmp , & srv - > smb_ses_list ) {
2011-05-27 11:34:02 +07:00
ses = list_entry ( tmp , struct cifs_ses , smb_ses_list ) ;
2008-11-15 23:12:47 +07:00
list_for_each ( tmp1 , & ses - > tcon_list ) {
2011-05-27 11:34:02 +07:00
tcon = list_entry ( tmp1 , struct cifs_tcon , tcon_list ) ;
2008-11-15 23:12:47 +07:00
if ( tcon - > tid ! = buf - > Tid )
continue ;
2012-05-28 17:16:31 +07:00
cifs_stats_inc ( & tcon - > stats . cifs_stats . num_oplock_brks ) ;
2010-10-16 02:34:03 +07:00
spin_lock ( & cifs_file_list_lock ) ;
2008-11-15 23:12:47 +07:00
list_for_each ( tmp2 , & tcon - > openFileList ) {
netfile = list_entry ( tmp2 , struct cifsFileInfo ,
2005-04-29 12:41:10 +07:00
tlist ) ;
2012-09-19 06:20:26 +07:00
if ( pSMB - > Fid ! = netfile - > fid . netfid )
2008-11-15 23:12:47 +07:00
continue ;
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " file id match, oplock break \n " ) ;
2010-10-12 02:07:18 +07:00
pCifsInode = CIFS_I ( netfile - > dentry - > d_inode ) ;
2010-07-21 03:09:02 +07:00
2014-03-11 23:11:47 +07:00
set_bit ( CIFS_INODE_PENDING_OPLOCK_BREAK ,
& pCifsInode - > flags ) ;
/*
* Set flag if the server downgrades the oplock
* to L2 else clear .
*/
if ( pSMB - > OplockLevel )
set_bit (
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 ,
& pCifsInode - > flags ) ;
else
clear_bit (
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 ,
& pCifsInode - > flags ) ;
2012-03-24 01:40:53 +07:00
queue_work ( cifsiod_wq ,
2011-07-26 23:20:17 +07:00
& netfile - > oplock_break ) ;
2010-07-21 03:09:02 +07:00
netfile - > oplock_break_cancelled = false ;
2010-10-16 02:34:03 +07:00
spin_unlock ( & cifs_file_list_lock ) ;
2010-10-19 00:59:37 +07:00
spin_unlock ( & cifs_tcp_ses_lock ) ;
2008-11-15 23:12:47 +07:00
return true ;
2005-04-17 05:20:36 +07:00
}
2010-10-16 02:34:03 +07:00
spin_unlock ( & cifs_file_list_lock ) ;
2010-10-19 00:59:37 +07:00
spin_unlock ( & cifs_tcp_ses_lock ) ;
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " No matching file for oplock break \n " ) ;
2008-04-29 07:06:05 +07:00
return true ;
2005-04-17 05:20:36 +07:00
}
}
2010-10-19 00:59:37 +07:00
spin_unlock ( & cifs_tcp_ses_lock ) ;
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " Can not process oplock break for non-existent connection \n " ) ;
2008-04-29 07:06:05 +07:00
return true ;
2005-04-17 05:20:36 +07:00
}
void
2012-03-24 01:28:02 +07:00
dump_smb ( void * buf , int smb_buf_length )
2005-04-17 05:20:36 +07:00
{
int i , j ;
char debug_line [ 17 ] ;
2012-03-24 01:28:02 +07:00
unsigned char * buffer = buf ;
2005-04-17 05:20:36 +07:00
if ( traceSMB = = 0 )
return ;
for ( i = 0 , j = 0 ; i < smb_buf_length ; i + + , j + + ) {
2008-02-08 06:25:02 +07:00
if ( i % 8 = = 0 ) {
/* have reached the beginning of line */
2005-04-17 05:20:36 +07:00
printk ( KERN_DEBUG " | " ) ;
j = 0 ;
}
printk ( " %0#4x " , buffer [ i ] ) ;
debug_line [ 2 * j ] = ' ' ;
if ( isprint ( buffer [ i ] ) )
debug_line [ 1 + ( 2 * j ) ] = buffer [ i ] ;
else
debug_line [ 1 + ( 2 * j ) ] = ' _ ' ;
2008-02-08 06:25:02 +07:00
if ( i % 8 = = 7 ) {
/* reached end of line, time to print ascii */
2005-04-17 05:20:36 +07:00
debug_line [ 16 ] = 0 ;
printk ( " | %s \n " , debug_line ) ;
}
}
for ( ; j < 8 ; j + + ) {
printk ( " " ) ;
debug_line [ 2 * j ] = ' ' ;
debug_line [ 1 + ( 2 * j ) ] = ' ' ;
}
2008-02-08 06:25:02 +07:00
printk ( " | %s \n " , debug_line ) ;
2005-04-17 05:20:36 +07:00
return ;
}
2005-04-29 12:41:05 +07:00
2009-11-07 02:18:29 +07:00
void
cifs_autodisable_serverino ( struct cifs_sb_info * cifs_sb )
{
if ( cifs_sb - > mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM ) {
2009-11-16 13:33:16 +07:00
cifs_sb - > mnt_cifs_flags & = ~ CIFS_MOUNT_SERVER_INUM ;
2013-05-05 10:12:25 +07:00
cifs_dbg ( VFS , " Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \" noserverino \" option to silence this message. \n " ,
cifs_sb_master_tcon ( cifs_sb ) - > treeName ) ;
2009-11-07 02:18:29 +07:00
}
}
2010-11-02 16:00:42 +07:00
2010-11-03 14:58:57 +07:00
void cifs_set_oplock_level ( struct cifsInodeInfo * cinode , __u32 oplock )
2010-11-02 16:00:42 +07:00
{
2010-11-03 14:58:57 +07:00
oplock & = 0xF ;
2010-11-02 16:00:42 +07:00
2010-11-03 14:58:57 +07:00
if ( oplock = = OPLOCK_EXCLUSIVE ) {
2013-09-05 16:01:06 +07:00
cinode - > oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG ;
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " Exclusive Oplock granted on inode %p \n " ,
& cinode - > vfs_inode ) ;
2010-11-03 14:58:57 +07:00
} else if ( oplock = = OPLOCK_READ ) {
2013-09-05 16:01:06 +07:00
cinode - > oplock = CIFS_CACHE_READ_FLG ;
2013-05-05 10:12:25 +07:00
cifs_dbg ( FYI , " Level II Oplock granted on inode %p \n " ,
& cinode - > vfs_inode ) ;
2013-09-05 16:01:06 +07:00
} else
cinode - > oplock = 0 ;
2010-11-02 16:00:42 +07:00
}
2011-09-26 21:56:44 +07:00
2014-03-11 23:11:47 +07:00
static int
cifs_oplock_break_wait ( void * unused )
{
schedule ( ) ;
return signal_pending ( current ) ? - ERESTARTSYS : 0 ;
}
/*
* We wait for oplock breaks to be processed before we attempt to perform
* writes .
*/
int cifs_get_writer ( struct cifsInodeInfo * cinode )
{
int rc ;
start :
rc = wait_on_bit ( & cinode - > flags , CIFS_INODE_PENDING_OPLOCK_BREAK ,
cifs_oplock_break_wait , TASK_KILLABLE ) ;
if ( rc )
return rc ;
spin_lock ( & cinode - > writers_lock ) ;
if ( ! cinode - > writers )
set_bit ( CIFS_INODE_PENDING_WRITERS , & cinode - > flags ) ;
cinode - > writers + + ;
/* Check to see if we have started servicing an oplock break */
if ( test_bit ( CIFS_INODE_PENDING_OPLOCK_BREAK , & cinode - > flags ) ) {
cinode - > writers - - ;
if ( cinode - > writers = = 0 ) {
clear_bit ( CIFS_INODE_PENDING_WRITERS , & cinode - > flags ) ;
wake_up_bit ( & cinode - > flags , CIFS_INODE_PENDING_WRITERS ) ;
}
spin_unlock ( & cinode - > writers_lock ) ;
goto start ;
}
spin_unlock ( & cinode - > writers_lock ) ;
return 0 ;
}
void cifs_put_writer ( struct cifsInodeInfo * cinode )
{
spin_lock ( & cinode - > writers_lock ) ;
cinode - > writers - - ;
if ( cinode - > writers = = 0 ) {
clear_bit ( CIFS_INODE_PENDING_WRITERS , & cinode - > flags ) ;
wake_up_bit ( & cinode - > flags , CIFS_INODE_PENDING_WRITERS ) ;
}
spin_unlock ( & cinode - > writers_lock ) ;
}
void cifs_done_oplock_break ( struct cifsInodeInfo * cinode )
{
clear_bit ( CIFS_INODE_PENDING_OPLOCK_BREAK , & cinode - > flags ) ;
wake_up_bit ( & cinode - > flags , CIFS_INODE_PENDING_OPLOCK_BREAK ) ;
}
2011-09-26 21:56:44 +07:00
bool
backup_cred ( struct cifs_sb_info * cifs_sb )
{
if ( cifs_sb - > mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID ) {
2013-02-06 16:20:20 +07:00
if ( uid_eq ( cifs_sb - > mnt_backupuid , current_fsuid ( ) ) )
2011-09-26 21:56:44 +07:00
return true ;
}
if ( cifs_sb - > mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID ) {
if ( in_group_p ( cifs_sb - > mnt_backupgid ) )
return true ;
}
return false ;
}
2012-09-19 20:22:45 +07:00
void
cifs_del_pending_open ( struct cifs_pending_open * open )
{
spin_lock ( & cifs_file_list_lock ) ;
list_del ( & open - > olist ) ;
spin_unlock ( & cifs_file_list_lock ) ;
}
void
cifs_add_pending_open_locked ( struct cifs_fid * fid , struct tcon_link * tlink ,
struct cifs_pending_open * open )
{
# ifdef CONFIG_CIFS_SMB2
memcpy ( open - > lease_key , fid - > lease_key , SMB2_LEASE_KEY_SIZE ) ;
# endif
open - > oplock = CIFS_OPLOCK_NO_CHANGE ;
open - > tlink = tlink ;
fid - > pending_open = open ;
list_add_tail ( & open - > olist , & tlink_tcon ( tlink ) - > pending_opens ) ;
}
void
cifs_add_pending_open ( struct cifs_fid * fid , struct tcon_link * tlink ,
struct cifs_pending_open * open )
{
spin_lock ( & cifs_file_list_lock ) ;
cifs_add_pending_open_locked ( fid , tlink , open ) ;
spin_unlock ( & cifs_file_list_lock ) ;
}