mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 03:20:53 +07:00
CIFS: Move get_next_mid to ops struct
Reviewed-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Pavel Shilovsky <pshilovsky@samba.org> Signed-off-by: Steve French <sfrench@us.ibm.com>
This commit is contained in:
parent
7f0adb53bc
commit
8825736060
@ -174,6 +174,7 @@ struct smb_version_operations {
|
||||
void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
|
||||
void (*set_credits)(struct TCP_Server_Info *, const int);
|
||||
int * (*get_credits_field)(struct TCP_Server_Info *);
|
||||
__u64 (*get_next_mid)(struct TCP_Server_Info *);
|
||||
/* data offset from read response message */
|
||||
unsigned int (*read_data_offset)(char *);
|
||||
/* data length from read response message */
|
||||
@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val)
|
||||
server->ops->set_credits(server, val);
|
||||
}
|
||||
|
||||
static inline __u64
|
||||
get_next_mid(struct TCP_Server_Info *server)
|
||||
{
|
||||
return server->ops->get_next_mid(server);
|
||||
}
|
||||
|
||||
/*
|
||||
* Macros to allow the TCP_Server_Info->net field and related code to drop out
|
||||
* when CONFIG_NET_NS isn't set.
|
||||
|
@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
|
||||
void **request_buf);
|
||||
extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
|
||||
const struct nls_table *nls_cp);
|
||||
extern __u64 GetNextMid(struct TCP_Server_Info *server);
|
||||
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
|
||||
extern u64 cifs_UnixTimeToNT(struct timespec);
|
||||
extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
|
||||
|
@ -268,7 +268,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
|
||||
return rc;
|
||||
|
||||
buffer = (struct smb_hdr *)*request_buf;
|
||||
buffer->Mid = GetNextMid(ses->server);
|
||||
buffer->Mid = get_next_mid(ses->server);
|
||||
if (ses->capabilities & CAP_UNICODE)
|
||||
buffer->Flags2 |= SMBFLG2_UNICODE;
|
||||
if (ses->capabilities & CAP_STATUS32)
|
||||
@ -402,7 +402,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
|
||||
|
||||
cFYI(1, "secFlags 0x%x", secFlags);
|
||||
|
||||
pSMB->hdr.Mid = GetNextMid(server);
|
||||
pSMB->hdr.Mid = get_next_mid(server);
|
||||
pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
|
||||
|
||||
if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
|
||||
@ -782,7 +782,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
|
||||
return rc;
|
||||
}
|
||||
|
||||
pSMB->hdr.Mid = GetNextMid(ses->server);
|
||||
pSMB->hdr.Mid = get_next_mid(ses->server);
|
||||
|
||||
if (ses->server->sec_mode &
|
||||
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
|
||||
@ -4762,7 +4762,7 @@ CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
|
||||
|
||||
/* server pointer checked in called function,
|
||||
but should never be null here anyway */
|
||||
pSMB->hdr.Mid = GetNextMid(ses->server);
|
||||
pSMB->hdr.Mid = get_next_mid(ses->server);
|
||||
pSMB->hdr.Tid = ses->ipc_tid;
|
||||
pSMB->hdr.Uid = ses->Suid;
|
||||
if (ses->capabilities & CAP_STATUS32)
|
||||
|
@ -3940,7 +3940,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
|
||||
header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
|
||||
NULL /*no tid */ , 4 /*wct */ );
|
||||
|
||||
smb_buffer->Mid = GetNextMid(ses->server);
|
||||
smb_buffer->Mid = get_next_mid(ses->server);
|
||||
smb_buffer->Uid = ses->Suid;
|
||||
pSMB = (TCONX_REQ *) smb_buffer;
|
||||
pSMBr = (TCONX_RSP *) smb_buffer_response;
|
||||
|
@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a free multiplex id (SMB mid). Otherwise there could be
|
||||
* mid collisions which might cause problems, demultiplexing the
|
||||
* wrong response to this request. Multiplex ids could collide if
|
||||
* one of a series requests takes much longer than the others, or
|
||||
* if a very large number of long lived requests (byte range
|
||||
* locks or FindNotify requests) are pending. No more than
|
||||
* 64K-1 requests can be outstanding at one time. If no
|
||||
* mids are available, return zero. A future optimization
|
||||
* could make the combination of mids and uid the key we use
|
||||
* to demultiplex on (rather than mid alone).
|
||||
* In addition to the above check, the cifs demultiplex
|
||||
* code already used the command code as a secondary
|
||||
* check of the frame and if signing is negotiated the
|
||||
* response would be discarded if the mid were the same
|
||||
* but the signature was wrong. Since the mid is not put in the
|
||||
* pending queue until later (when it is about to be dispatched)
|
||||
* we do have to limit the number of outstanding requests
|
||||
* to somewhat less than 64K-1 although it is hard to imagine
|
||||
* so many threads being in the vfs at one time.
|
||||
*/
|
||||
__u64 GetNextMid(struct TCP_Server_Info *server)
|
||||
{
|
||||
__u64 mid = 0;
|
||||
__u16 last_mid, cur_mid;
|
||||
bool collision;
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
|
||||
/* mid is 16 bit only for CIFS/SMB */
|
||||
cur_mid = (__u16)((server->CurrentMid) & 0xffff);
|
||||
/* we do not want to loop forever */
|
||||
last_mid = cur_mid;
|
||||
cur_mid++;
|
||||
|
||||
/*
|
||||
* This nested loop looks more expensive than it is.
|
||||
* In practice the list of pending requests is short,
|
||||
* fewer than 50, and the mids are likely to be unique
|
||||
* on the first pass through the loop unless some request
|
||||
* takes longer than the 64 thousand requests before it
|
||||
* (and it would also have to have been a request that
|
||||
* did not time out).
|
||||
*/
|
||||
while (cur_mid != last_mid) {
|
||||
struct mid_q_entry *mid_entry;
|
||||
unsigned int num_mids;
|
||||
|
||||
collision = false;
|
||||
if (cur_mid == 0)
|
||||
cur_mid++;
|
||||
|
||||
num_mids = 0;
|
||||
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
|
||||
++num_mids;
|
||||
if (mid_entry->mid == cur_mid &&
|
||||
mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
|
||||
/* This mid is in use, try a different one */
|
||||
collision = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if we have more than 32k mids in the list, then something
|
||||
* is very wrong. Possibly a local user is trying to DoS the
|
||||
* box by issuing long-running calls and SIGKILL'ing them. If
|
||||
* we get to 2^16 mids then we're in big trouble as this
|
||||
* function could loop forever.
|
||||
*
|
||||
* Go ahead and assign out the mid in this situation, but force
|
||||
* an eventual reconnect to clean out the pending_mid_q.
|
||||
*/
|
||||
if (num_mids > 32768)
|
||||
server->tcpStatus = CifsNeedReconnect;
|
||||
|
||||
if (!collision) {
|
||||
mid = (__u64)cur_mid;
|
||||
server->CurrentMid = mid;
|
||||
break;
|
||||
}
|
||||
cur_mid++;
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return mid;
|
||||
}
|
||||
|
||||
/* NB: MID can not be set if treeCon not passed in, in that
|
||||
case it is responsbility of caller to set the mid */
|
||||
void
|
||||
@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
|
||||
|
||||
/* Uid is not converted */
|
||||
buffer->Uid = treeCon->ses->Suid;
|
||||
buffer->Mid = GetNextMid(treeCon->ses->server);
|
||||
buffer->Mid = get_next_mid(treeCon->ses->server);
|
||||
}
|
||||
if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
|
||||
buffer->Flags2 |= SMBFLG2_DFS;
|
||||
|
@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server)
|
||||
return &server->credits;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a free multiplex id (SMB mid). Otherwise there could be
|
||||
* mid collisions which might cause problems, demultiplexing the
|
||||
* wrong response to this request. Multiplex ids could collide if
|
||||
* one of a series requests takes much longer than the others, or
|
||||
* if a very large number of long lived requests (byte range
|
||||
* locks or FindNotify requests) are pending. No more than
|
||||
* 64K-1 requests can be outstanding at one time. If no
|
||||
* mids are available, return zero. A future optimization
|
||||
* could make the combination of mids and uid the key we use
|
||||
* to demultiplex on (rather than mid alone).
|
||||
* In addition to the above check, the cifs demultiplex
|
||||
* code already used the command code as a secondary
|
||||
* check of the frame and if signing is negotiated the
|
||||
* response would be discarded if the mid were the same
|
||||
* but the signature was wrong. Since the mid is not put in the
|
||||
* pending queue until later (when it is about to be dispatched)
|
||||
* we do have to limit the number of outstanding requests
|
||||
* to somewhat less than 64K-1 although it is hard to imagine
|
||||
* so many threads being in the vfs at one time.
|
||||
*/
|
||||
static __u64
|
||||
cifs_get_next_mid(struct TCP_Server_Info *server)
|
||||
{
|
||||
__u64 mid = 0;
|
||||
__u16 last_mid, cur_mid;
|
||||
bool collision;
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
|
||||
/* mid is 16 bit only for CIFS/SMB */
|
||||
cur_mid = (__u16)((server->CurrentMid) & 0xffff);
|
||||
/* we do not want to loop forever */
|
||||
last_mid = cur_mid;
|
||||
cur_mid++;
|
||||
|
||||
/*
|
||||
* This nested loop looks more expensive than it is.
|
||||
* In practice the list of pending requests is short,
|
||||
* fewer than 50, and the mids are likely to be unique
|
||||
* on the first pass through the loop unless some request
|
||||
* takes longer than the 64 thousand requests before it
|
||||
* (and it would also have to have been a request that
|
||||
* did not time out).
|
||||
*/
|
||||
while (cur_mid != last_mid) {
|
||||
struct mid_q_entry *mid_entry;
|
||||
unsigned int num_mids;
|
||||
|
||||
collision = false;
|
||||
if (cur_mid == 0)
|
||||
cur_mid++;
|
||||
|
||||
num_mids = 0;
|
||||
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
|
||||
++num_mids;
|
||||
if (mid_entry->mid == cur_mid &&
|
||||
mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
|
||||
/* This mid is in use, try a different one */
|
||||
collision = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if we have more than 32k mids in the list, then something
|
||||
* is very wrong. Possibly a local user is trying to DoS the
|
||||
* box by issuing long-running calls and SIGKILL'ing them. If
|
||||
* we get to 2^16 mids then we're in big trouble as this
|
||||
* function could loop forever.
|
||||
*
|
||||
* Go ahead and assign out the mid in this situation, but force
|
||||
* an eventual reconnect to clean out the pending_mid_q.
|
||||
*/
|
||||
if (num_mids > 32768)
|
||||
server->tcpStatus = CifsNeedReconnect;
|
||||
|
||||
if (!collision) {
|
||||
mid = (__u64)cur_mid;
|
||||
server->CurrentMid = mid;
|
||||
break;
|
||||
}
|
||||
cur_mid++;
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return mid;
|
||||
}
|
||||
|
||||
struct smb_version_operations smb1_operations = {
|
||||
.send_cancel = send_nt_cancel,
|
||||
.compare_fids = cifs_compare_fids,
|
||||
@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = {
|
||||
.add_credits = cifs_add_credits,
|
||||
.set_credits = cifs_set_credits,
|
||||
.get_credits_field = cifs_get_credits_field,
|
||||
.get_next_mid = cifs_get_next_mid,
|
||||
.read_data_offset = cifs_read_data_offset,
|
||||
.read_data_length = cifs_read_data_length,
|
||||
.map_error = map_smb_to_linux_error,
|
||||
|
@ -779,7 +779,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
|
||||
pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
|
||||
pSMB->Timeout = 0;
|
||||
pSMB->hdr.Mid = GetNextMid(ses->server);
|
||||
pSMB->hdr.Mid = get_next_mid(ses->server);
|
||||
|
||||
return SendReceive(xid, ses, in_buf, out_buf,
|
||||
&bytes_returned, 0);
|
||||
|
Loading…
Reference in New Issue
Block a user