2016-06-16 20:45:23 +07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
|
|
|
|
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/skbuff.h>
|
2017-04-21 00:55:55 +07:00
|
|
|
#include <crypto/hash.h>
|
2016-06-16 20:45:23 +07:00
|
|
|
|
|
|
|
#include "rxe.h"
|
|
|
|
#include "rxe_loc.h"
|
|
|
|
#include "rxe_queue.h"
|
|
|
|
|
|
|
|
static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
2016-09-29 03:26:26 +07:00
|
|
|
u32 opcode);
|
2016-06-16 20:45:23 +07:00
|
|
|
|
|
|
|
static inline void retry_first_write_send(struct rxe_qp *qp,
|
|
|
|
struct rxe_send_wqe *wqe,
|
2017-06-15 15:29:04 +07:00
|
|
|
unsigned int mask, int npsn)
|
2016-06-16 20:45:23 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < npsn; i++) {
|
|
|
|
int to_send = (wqe->dma.resid > qp->mtu) ?
|
|
|
|
qp->mtu : wqe->dma.resid;
|
|
|
|
|
|
|
|
qp->req.opcode = next_opcode(qp, wqe,
|
|
|
|
wqe->wr.opcode);
|
|
|
|
|
|
|
|
if (wqe->wr.send_flags & IB_SEND_INLINE) {
|
|
|
|
wqe->dma.resid -= to_send;
|
|
|
|
wqe->dma.sge_offset += to_send;
|
|
|
|
} else {
|
|
|
|
advance_dma_data(&wqe->dma, to_send);
|
|
|
|
}
|
|
|
|
if (mask & WR_WRITE_MASK)
|
|
|
|
wqe->iova += qp->mtu;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void req_retry(struct rxe_qp *qp)
|
|
|
|
{
|
|
|
|
struct rxe_send_wqe *wqe;
|
|
|
|
unsigned int wqe_index;
|
|
|
|
unsigned int mask;
|
|
|
|
int npsn;
|
|
|
|
int first = 1;
|
|
|
|
|
|
|
|
qp->req.wqe_index = consumer_index(qp->sq.queue);
|
|
|
|
qp->req.psn = qp->comp.psn;
|
|
|
|
qp->req.opcode = -1;
|
|
|
|
|
|
|
|
for (wqe_index = consumer_index(qp->sq.queue);
|
|
|
|
wqe_index != producer_index(qp->sq.queue);
|
|
|
|
wqe_index = next_index(qp->sq.queue, wqe_index)) {
|
|
|
|
wqe = addr_from_index(qp->sq.queue, wqe_index);
|
|
|
|
mask = wr_opcode_mask(wqe->wr.opcode, qp);
|
|
|
|
|
|
|
|
if (wqe->state == wqe_state_posted)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (wqe->state == wqe_state_done)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
wqe->iova = (mask & WR_ATOMIC_MASK) ?
|
|
|
|
wqe->wr.wr.atomic.remote_addr :
|
|
|
|
(mask & WR_READ_OR_WRITE_MASK) ?
|
|
|
|
wqe->wr.wr.rdma.remote_addr :
|
|
|
|
0;
|
|
|
|
|
|
|
|
if (!first || (mask & WR_READ_MASK) == 0) {
|
|
|
|
wqe->dma.resid = wqe->dma.length;
|
|
|
|
wqe->dma.cur_sge = 0;
|
|
|
|
wqe->dma.sge_offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (first) {
|
|
|
|
first = 0;
|
|
|
|
|
2018-06-14 08:48:07 +07:00
|
|
|
if (mask & WR_WRITE_OR_SEND_MASK) {
|
|
|
|
npsn = (qp->comp.psn - wqe->first_psn) &
|
|
|
|
BTH_PSN_MASK;
|
2016-06-16 20:45:23 +07:00
|
|
|
retry_first_write_send(qp, wqe, mask, npsn);
|
2018-06-14 08:48:07 +07:00
|
|
|
}
|
2016-06-16 20:45:23 +07:00
|
|
|
|
2018-06-14 08:48:07 +07:00
|
|
|
if (mask & WR_READ_MASK) {
|
|
|
|
npsn = (wqe->dma.length - wqe->dma.resid) /
|
|
|
|
qp->mtu;
|
2016-06-16 20:45:23 +07:00
|
|
|
wqe->iova += npsn * qp->mtu;
|
2018-06-14 08:48:07 +07:00
|
|
|
}
|
2016-06-16 20:45:23 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
wqe->state = wqe_state_posted;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-24 17:28:27 +07:00
|
|
|
void rnr_nak_timer(struct timer_list *t)
|
2016-06-16 20:45:23 +07:00
|
|
|
{
|
2017-10-24 17:28:27 +07:00
|
|
|
struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
|
2016-06-16 20:45:23 +07:00
|
|
|
|
2016-09-29 03:26:26 +07:00
|
|
|
pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
|
2016-06-16 20:45:23 +07:00
|
|
|
rxe_run_task(&qp->req.task, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
|
|
|
|
{
|
|
|
|
struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
|
|
|
|
/* check to see if we are drained;
|
|
|
|
* state_lock used by requester and completer
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&qp->state_lock, flags);
|
|
|
|
do {
|
|
|
|
if (qp->req.state != QP_STATE_DRAIN) {
|
|
|
|
/* comp just finished */
|
|
|
|
spin_unlock_irqrestore(&qp->state_lock,
|
|
|
|
flags);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wqe && ((qp->req.wqe_index !=
|
|
|
|
consumer_index(qp->sq.queue)) ||
|
|
|
|
(wqe->state != wqe_state_posted))) {
|
|
|
|
/* comp not done yet */
|
|
|
|
spin_unlock_irqrestore(&qp->state_lock,
|
|
|
|
flags);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
qp->req.state = QP_STATE_DRAINED;
|
|
|
|
spin_unlock_irqrestore(&qp->state_lock, flags);
|
|
|
|
|
|
|
|
if (qp->ibqp.event_handler) {
|
|
|
|
struct ib_event ev;
|
|
|
|
|
|
|
|
ev.device = qp->ibqp.device;
|
|
|
|
ev.element.qp = &qp->ibqp;
|
|
|
|
ev.event = IB_EVENT_SQ_DRAINED;
|
|
|
|
qp->ibqp.event_handler(&ev,
|
|
|
|
qp->ibqp.qp_context);
|
|
|
|
}
|
|
|
|
} while (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qp->req.wqe_index == producer_index(qp->sq.queue))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
|
|
|
|
|
|
|
|
if (unlikely((qp->req.state == QP_STATE_DRAIN ||
|
|
|
|
qp->req.state == QP_STATE_DRAINED) &&
|
|
|
|
(wqe->state != wqe_state_processing)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
|
|
|
|
(qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
|
|
|
|
qp->req.wait_fence = 1;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
|
|
|
|
return wqe;
|
|
|
|
}
|
|
|
|
|
2016-09-29 03:26:26 +07:00
|
|
|
static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
|
2016-06-16 20:45:23 +07:00
|
|
|
{
|
|
|
|
switch (opcode) {
|
|
|
|
case IB_WR_RDMA_WRITE:
|
|
|
|
if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
|
|
|
|
qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_RC_RDMA_WRITE_LAST :
|
|
|
|
IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
|
|
|
|
else
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_RC_RDMA_WRITE_ONLY :
|
|
|
|
IB_OPCODE_RC_RDMA_WRITE_FIRST;
|
|
|
|
|
|
|
|
case IB_WR_RDMA_WRITE_WITH_IMM:
|
|
|
|
if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
|
|
|
|
qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
|
|
|
|
IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
|
|
|
|
else
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
|
|
|
|
IB_OPCODE_RC_RDMA_WRITE_FIRST;
|
|
|
|
|
|
|
|
case IB_WR_SEND:
|
|
|
|
if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
|
|
|
|
qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_RC_SEND_LAST :
|
|
|
|
IB_OPCODE_RC_SEND_MIDDLE;
|
|
|
|
else
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_RC_SEND_ONLY :
|
|
|
|
IB_OPCODE_RC_SEND_FIRST;
|
|
|
|
|
|
|
|
case IB_WR_SEND_WITH_IMM:
|
|
|
|
if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
|
|
|
|
qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
|
|
|
|
IB_OPCODE_RC_SEND_MIDDLE;
|
|
|
|
else
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
|
|
|
|
IB_OPCODE_RC_SEND_FIRST;
|
|
|
|
|
|
|
|
case IB_WR_RDMA_READ:
|
|
|
|
return IB_OPCODE_RC_RDMA_READ_REQUEST;
|
|
|
|
|
|
|
|
case IB_WR_ATOMIC_CMP_AND_SWP:
|
|
|
|
return IB_OPCODE_RC_COMPARE_SWAP;
|
|
|
|
|
|
|
|
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
|
|
|
return IB_OPCODE_RC_FETCH_ADD;
|
|
|
|
|
|
|
|
case IB_WR_SEND_WITH_INV:
|
|
|
|
if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
|
|
|
|
qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
|
|
|
|
return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
|
|
|
|
IB_OPCODE_RC_SEND_MIDDLE;
|
|
|
|
else
|
|
|
|
return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
|
|
|
|
IB_OPCODE_RC_SEND_FIRST;
|
|
|
|
case IB_WR_REG_MR:
|
|
|
|
case IB_WR_LOCAL_INV:
|
|
|
|
return opcode;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-09-29 03:26:26 +07:00
|
|
|
static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
|
2016-06-16 20:45:23 +07:00
|
|
|
{
|
|
|
|
switch (opcode) {
|
|
|
|
case IB_WR_RDMA_WRITE:
|
|
|
|
if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
|
|
|
|
qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_UC_RDMA_WRITE_LAST :
|
|
|
|
IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
|
|
|
|
else
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_UC_RDMA_WRITE_ONLY :
|
|
|
|
IB_OPCODE_UC_RDMA_WRITE_FIRST;
|
|
|
|
|
|
|
|
case IB_WR_RDMA_WRITE_WITH_IMM:
|
|
|
|
if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
|
|
|
|
qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
|
|
|
|
IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
|
|
|
|
else
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
|
|
|
|
IB_OPCODE_UC_RDMA_WRITE_FIRST;
|
|
|
|
|
|
|
|
case IB_WR_SEND:
|
|
|
|
if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
|
|
|
|
qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_UC_SEND_LAST :
|
|
|
|
IB_OPCODE_UC_SEND_MIDDLE;
|
|
|
|
else
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_UC_SEND_ONLY :
|
|
|
|
IB_OPCODE_UC_SEND_FIRST;
|
|
|
|
|
|
|
|
case IB_WR_SEND_WITH_IMM:
|
|
|
|
if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
|
|
|
|
qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
|
|
|
|
IB_OPCODE_UC_SEND_MIDDLE;
|
|
|
|
else
|
|
|
|
return fits ?
|
|
|
|
IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
|
|
|
|
IB_OPCODE_UC_SEND_FIRST;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
2016-09-29 03:26:26 +07:00
|
|
|
u32 opcode)
|
2016-06-16 20:45:23 +07:00
|
|
|
{
|
|
|
|
int fits = (wqe->dma.resid <= qp->mtu);
|
|
|
|
|
|
|
|
switch (qp_type(qp)) {
|
|
|
|
case IB_QPT_RC:
|
|
|
|
return next_opcode_rc(qp, opcode, fits);
|
|
|
|
|
|
|
|
case IB_QPT_UC:
|
|
|
|
return next_opcode_uc(qp, opcode, fits);
|
|
|
|
|
|
|
|
case IB_QPT_SMI:
|
|
|
|
case IB_QPT_UD:
|
|
|
|
case IB_QPT_GSI:
|
|
|
|
switch (opcode) {
|
|
|
|
case IB_WR_SEND:
|
|
|
|
return IB_OPCODE_UD_SEND_ONLY;
|
|
|
|
|
|
|
|
case IB_WR_SEND_WITH_IMM:
|
|
|
|
return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|
|
|
{
|
|
|
|
int depth;
|
|
|
|
|
|
|
|
if (wqe->has_rd_atomic)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
qp->req.need_rd_atomic = 1;
|
|
|
|
depth = atomic_dec_return(&qp->req.rd_atomic);
|
|
|
|
|
|
|
|
if (depth >= 0) {
|
|
|
|
qp->req.need_rd_atomic = 0;
|
|
|
|
wqe->has_rd_atomic = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_inc(&qp->req.rd_atomic);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
2017-01-11 02:15:43 +07:00
|
|
|
static inline int get_mtu(struct rxe_qp *qp)
|
2016-06-16 20:45:23 +07:00
|
|
|
{
|
|
|
|
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
|
|
|
|
|
|
|
if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
|
|
|
|
return qp->mtu;
|
|
|
|
|
2017-01-11 02:15:43 +07:00
|
|
|
return rxe->port.mtu_cap;
|
2016-06-16 20:45:23 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct sk_buff *init_req_packet(struct rxe_qp *qp,
|
|
|
|
struct rxe_send_wqe *wqe,
|
|
|
|
int opcode, int payload,
|
|
|
|
struct rxe_pkt_info *pkt)
|
|
|
|
{
|
|
|
|
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
|
|
|
struct rxe_port *port = &rxe->port;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct rxe_send_wr *ibwr = &wqe->wr;
|
|
|
|
struct rxe_av *av;
|
|
|
|
int pad = (-payload) & 0x3;
|
|
|
|
int paylen;
|
|
|
|
int solicited;
|
|
|
|
u16 pkey;
|
|
|
|
u32 qp_num;
|
|
|
|
int ack_req;
|
|
|
|
|
|
|
|
/* length from start of bth to end of icrc */
|
|
|
|
paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
|
|
|
|
|
|
|
|
/* pkt->hdr, rxe, port_num and mask are initialized in ifc
|
|
|
|
* layer
|
|
|
|
*/
|
|
|
|
pkt->opcode = opcode;
|
|
|
|
pkt->qp = qp;
|
|
|
|
pkt->psn = qp->req.psn;
|
|
|
|
pkt->mask = rxe_opcode[opcode].mask;
|
|
|
|
pkt->paylen = paylen;
|
|
|
|
pkt->offset = 0;
|
|
|
|
pkt->wqe = wqe;
|
|
|
|
|
|
|
|
/* init skb */
|
|
|
|
av = rxe_get_av(pkt);
|
2017-01-11 02:15:53 +07:00
|
|
|
skb = rxe_init_packet(rxe, av, paylen, pkt);
|
2016-06-16 20:45:23 +07:00
|
|
|
if (unlikely(!skb))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* init bth */
|
|
|
|
solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
|
|
|
|
(pkt->mask & RXE_END_MASK) &&
|
|
|
|
((pkt->mask & (RXE_SEND_MASK)) ||
|
|
|
|
(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
|
|
|
|
(RXE_WRITE_MASK | RXE_IMMDT_MASK));
|
|
|
|
|
|
|
|
pkey = (qp_type(qp) == IB_QPT_GSI) ?
|
|
|
|
port->pkey_tbl[ibwr->wr.ud.pkey_index] :
|
|
|
|
port->pkey_tbl[qp->attr.pkey_index];
|
|
|
|
|
|
|
|
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
|
|
|
|
qp->attr.dest_qp_num;
|
|
|
|
|
|
|
|
ack_req = ((pkt->mask & RXE_END_MASK) ||
|
|
|
|
(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
|
|
|
|
if (ack_req)
|
|
|
|
qp->req.noack_pkts = 0;
|
|
|
|
|
|
|
|
bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
|
|
|
|
ack_req, pkt->psn);
|
|
|
|
|
|
|
|
/* init optional headers */
|
|
|
|
if (pkt->mask & RXE_RETH_MASK) {
|
|
|
|
reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
|
|
|
|
reth_set_va(pkt, wqe->iova);
|
2018-06-14 08:48:07 +07:00
|
|
|
reth_set_len(pkt, wqe->dma.resid);
|
2016-06-16 20:45:23 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pkt->mask & RXE_IMMDT_MASK)
|
|
|
|
immdt_set_imm(pkt, ibwr->ex.imm_data);
|
|
|
|
|
|
|
|
if (pkt->mask & RXE_IETH_MASK)
|
|
|
|
ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
|
|
|
|
|
|
|
|
if (pkt->mask & RXE_ATMETH_MASK) {
|
|
|
|
atmeth_set_va(pkt, wqe->iova);
|
|
|
|
if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
|
|
|
|
opcode == IB_OPCODE_RD_COMPARE_SWAP) {
|
|
|
|
atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
|
|
|
|
atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
|
|
|
|
} else {
|
|
|
|
atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
|
|
|
|
}
|
|
|
|
atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pkt->mask & RXE_DETH_MASK) {
|
|
|
|
if (qp->ibqp.qp_num == 1)
|
|
|
|
deth_set_qkey(pkt, GSI_QKEY);
|
|
|
|
else
|
|
|
|
deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
|
|
|
|
deth_set_sqp(pkt, qp->ibqp.qp_num);
|
|
|
|
}
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
|
|
|
struct rxe_pkt_info *pkt, struct sk_buff *skb,
|
|
|
|
int paylen)
|
|
|
|
{
|
|
|
|
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
|
|
|
u32 crc = 0;
|
|
|
|
u32 *p;
|
|
|
|
int err;
|
|
|
|
|
2018-08-28 17:51:37 +07:00
|
|
|
err = rxe_prepare(pkt, skb, &crc);
|
2016-06-16 20:45:23 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (pkt->mask & RXE_WRITE_OR_SEND) {
|
|
|
|
if (wqe->wr.send_flags & IB_SEND_INLINE) {
|
|
|
|
u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
|
|
|
|
|
2017-04-21 00:55:55 +07:00
|
|
|
crc = rxe_crc32(rxe, crc, tmp, paylen);
|
2016-06-16 20:45:23 +07:00
|
|
|
memcpy(payload_addr(pkt), tmp, paylen);
|
|
|
|
|
|
|
|
wqe->dma.resid -= paylen;
|
|
|
|
wqe->dma.sge_offset += paylen;
|
|
|
|
} else {
|
2018-04-23 14:57:58 +07:00
|
|
|
err = copy_data(qp->pd, 0, &wqe->dma,
|
2016-06-16 20:45:23 +07:00
|
|
|
payload_addr(pkt), paylen,
|
|
|
|
from_mem_obj,
|
|
|
|
&crc);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p = payload_addr(pkt) + paylen + bth_pad(pkt);
|
|
|
|
|
|
|
|
*p = ~crc;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_wqe_state(struct rxe_qp *qp,
|
2016-09-07 18:04:06 +07:00
|
|
|
struct rxe_send_wqe *wqe,
|
|
|
|
struct rxe_pkt_info *pkt)
|
2016-06-16 20:45:23 +07:00
|
|
|
{
|
|
|
|
if (pkt->mask & RXE_END_MASK) {
|
|
|
|
if (qp_type(qp) == IB_QPT_RC)
|
|
|
|
wqe->state = wqe_state_pending;
|
|
|
|
} else {
|
|
|
|
wqe->state = wqe_state_processing;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-07 18:04:06 +07:00
|
|
|
static void update_wqe_psn(struct rxe_qp *qp,
|
|
|
|
struct rxe_send_wqe *wqe,
|
|
|
|
struct rxe_pkt_info *pkt,
|
|
|
|
int payload)
|
2016-06-16 20:45:23 +07:00
|
|
|
{
|
|
|
|
/* number of packets left to send including current one */
|
|
|
|
int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
|
|
|
|
|
|
|
|
/* handle zero length packet case */
|
|
|
|
if (num_pkt == 0)
|
|
|
|
num_pkt = 1;
|
|
|
|
|
|
|
|
if (pkt->mask & RXE_START_MASK) {
|
|
|
|
wqe->first_psn = qp->req.psn;
|
|
|
|
wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pkt->mask & RXE_READ_MASK)
|
|
|
|
qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
|
|
|
|
else
|
|
|
|
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
|
2016-09-07 18:04:06 +07:00
|
|
|
}
|
2016-06-16 20:45:23 +07:00
|
|
|
|
2016-09-07 18:04:06 +07:00
|
|
|
static void save_state(struct rxe_send_wqe *wqe,
|
|
|
|
struct rxe_qp *qp,
|
|
|
|
struct rxe_send_wqe *rollback_wqe,
|
2016-09-19 18:57:26 +07:00
|
|
|
u32 *rollback_psn)
|
2016-09-07 18:04:06 +07:00
|
|
|
{
|
|
|
|
rollback_wqe->state = wqe->state;
|
|
|
|
rollback_wqe->first_psn = wqe->first_psn;
|
|
|
|
rollback_wqe->last_psn = wqe->last_psn;
|
2016-09-19 18:57:26 +07:00
|
|
|
*rollback_psn = qp->req.psn;
|
2016-09-07 18:04:06 +07:00
|
|
|
}
|
2016-06-16 20:45:23 +07:00
|
|
|
|
2016-09-07 18:04:06 +07:00
|
|
|
static void rollback_state(struct rxe_send_wqe *wqe,
|
|
|
|
struct rxe_qp *qp,
|
|
|
|
struct rxe_send_wqe *rollback_wqe,
|
2016-09-19 18:57:26 +07:00
|
|
|
u32 rollback_psn)
|
2016-09-07 18:04:06 +07:00
|
|
|
{
|
|
|
|
wqe->state = rollback_wqe->state;
|
|
|
|
wqe->first_psn = rollback_wqe->first_psn;
|
|
|
|
wqe->last_psn = rollback_wqe->last_psn;
|
2016-09-19 18:57:26 +07:00
|
|
|
qp->req.psn = rollback_psn;
|
2016-09-07 18:04:06 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
|
|
|
struct rxe_pkt_info *pkt, int payload)
|
|
|
|
{
|
|
|
|
qp->req.opcode = pkt->opcode;
|
2016-06-16 20:45:23 +07:00
|
|
|
|
|
|
|
if (pkt->mask & RXE_END_MASK)
|
|
|
|
qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
|
|
|
|
|
|
|
|
qp->need_req_skb = 0;
|
|
|
|
|
|
|
|
if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
|
|
|
|
mod_timer(&qp->retrans_timer,
|
|
|
|
jiffies + qp->qp_timeout_jiffies);
|
|
|
|
}
|
|
|
|
|
|
|
|
int rxe_requester(void *arg)
|
|
|
|
{
|
|
|
|
struct rxe_qp *qp = (struct rxe_qp *)arg;
|
|
|
|
struct rxe_pkt_info pkt;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct rxe_send_wqe *wqe;
|
2016-09-29 03:26:26 +07:00
|
|
|
enum rxe_hdr_mask mask;
|
2016-06-16 20:45:23 +07:00
|
|
|
int payload;
|
|
|
|
int mtu;
|
|
|
|
int opcode;
|
|
|
|
int ret;
|
2016-09-07 18:04:06 +07:00
|
|
|
struct rxe_send_wqe rollback_wqe;
|
2016-09-19 18:57:26 +07:00
|
|
|
u32 rollback_psn;
|
2016-06-16 20:45:23 +07:00
|
|
|
|
2016-12-05 20:43:21 +07:00
|
|
|
rxe_add_ref(qp);
|
2016-06-16 20:45:23 +07:00
|
|
|
|
|
|
|
next_wqe:
|
2018-01-13 06:11:58 +07:00
|
|
|
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
|
2016-06-16 20:45:23 +07:00
|
|
|
goto exit;
|
2017-01-11 02:15:50 +07:00
|
|
|
|
2016-06-16 20:45:23 +07:00
|
|
|
if (unlikely(qp->req.state == QP_STATE_RESET)) {
|
|
|
|
qp->req.wqe_index = consumer_index(qp->sq.queue);
|
|
|
|
qp->req.opcode = -1;
|
|
|
|
qp->req.need_rd_atomic = 0;
|
|
|
|
qp->req.wait_psn = 0;
|
|
|
|
qp->req.need_retry = 0;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(qp->req.need_retry)) {
|
|
|
|
req_retry(qp);
|
|
|
|
qp->req.need_retry = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
wqe = req_next_wqe(qp);
|
|
|
|
if (unlikely(!wqe))
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
if (wqe->mask & WR_REG_MASK) {
|
|
|
|
if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
|
|
|
|
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
|
|
|
struct rxe_mem *rmr;
|
|
|
|
|
|
|
|
rmr = rxe_pool_get_index(&rxe->mr_pool,
|
|
|
|
wqe->wr.ex.invalidate_rkey >> 8);
|
|
|
|
if (!rmr) {
|
2016-09-29 03:26:26 +07:00
|
|
|
pr_err("No mr for key %#x\n",
|
|
|
|
wqe->wr.ex.invalidate_rkey);
|
2016-06-16 20:45:23 +07:00
|
|
|
wqe->state = wqe_state_error;
|
|
|
|
wqe->status = IB_WC_MW_BIND_ERR;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
rmr->state = RXE_MEM_STATE_FREE;
|
2017-01-11 02:15:52 +07:00
|
|
|
rxe_drop_ref(rmr);
|
2016-06-16 20:45:23 +07:00
|
|
|
wqe->state = wqe_state_done;
|
|
|
|
wqe->status = IB_WC_SUCCESS;
|
|
|
|
} else if (wqe->wr.opcode == IB_WR_REG_MR) {
|
|
|
|
struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
|
|
|
|
|
|
|
|
rmr->state = RXE_MEM_STATE_VALID;
|
|
|
|
rmr->access = wqe->wr.wr.reg.access;
|
|
|
|
rmr->lkey = wqe->wr.wr.reg.key;
|
|
|
|
rmr->rkey = wqe->wr.wr.reg.key;
|
2018-11-26 05:13:08 +07:00
|
|
|
rmr->iova = wqe->wr.wr.reg.mr->iova;
|
2016-06-16 20:45:23 +07:00
|
|
|
wqe->state = wqe_state_done;
|
|
|
|
wqe->status = IB_WC_SUCCESS;
|
|
|
|
} else {
|
|
|
|
goto exit;
|
|
|
|
}
|
2018-06-13 08:16:05 +07:00
|
|
|
if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
|
|
|
|
qp->sq_sig_type == IB_SIGNAL_ALL_WR)
|
|
|
|
rxe_run_task(&qp->comp.task, 1);
|
2016-06-16 20:45:23 +07:00
|
|
|
qp->req.wqe_index = next_index(qp->sq.queue,
|
|
|
|
qp->req.wqe_index);
|
|
|
|
goto next_wqe;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(qp_type(qp) == IB_QPT_RC &&
|
|
|
|
qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
|
|
|
|
qp->req.wait_psn = 1;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Limit the number of inflight SKBs per QP */
|
|
|
|
if (unlikely(atomic_read(&qp->skb_out) >
|
|
|
|
RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
|
|
|
|
qp->need_req_skb = 1;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
opcode = next_opcode(qp, wqe, wqe->wr.opcode);
|
|
|
|
if (unlikely(opcode < 0)) {
|
|
|
|
wqe->status = IB_WC_LOC_QP_OP_ERR;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
mask = rxe_opcode[opcode].mask;
|
|
|
|
if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
|
|
|
|
if (check_init_depth(qp, wqe))
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2017-01-11 02:15:43 +07:00
|
|
|
mtu = get_mtu(qp);
|
2016-06-16 20:45:23 +07:00
|
|
|
payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
|
|
|
|
if (payload > mtu) {
|
|
|
|
if (qp_type(qp) == IB_QPT_UD) {
|
|
|
|
/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
|
|
|
|
* UD message exceeds the MTU of the port as returned by QueryHCA, the CI
|
|
|
|
* shall not emit any packets for this message. Further, the CI shall not
|
|
|
|
* generate an error due to this condition.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* fake a successful UD send */
|
|
|
|
wqe->first_psn = qp->req.psn;
|
|
|
|
wqe->last_psn = qp->req.psn;
|
|
|
|
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
|
|
|
|
qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
|
|
|
|
qp->req.wqe_index = next_index(qp->sq.queue,
|
|
|
|
qp->req.wqe_index);
|
|
|
|
wqe->state = wqe_state_done;
|
|
|
|
wqe->status = IB_WC_SUCCESS;
|
2016-11-16 15:39:15 +07:00
|
|
|
__rxe_do_task(&qp->comp.task);
|
Updates for 4.10 kernel merge window
- Shared mlx5 updates with net stack (will drop out on merge if Dave's
tree has already been merged)
- Driver updates: cxgb4, hfi1, hns-roce, i40iw, mlx4, mlx5, qedr, rxe
- Debug cleanups
- New connection rejection helpers
- SRP updates
- Various misc fixes
- New paravirt driver from vmware
-----BEGIN PGP SIGNATURE-----
iQIcBAABAgAGBQJYUbAPAAoJELgmozMOVy/dMXcP/iuG5MNzfN8Ny1JftyBQGWg3
cqoQ2OLj9CsXjwVB+5EqbcZHRZY852lKONaLoDKkIOx4YAXO2YuIKOp944vN7EQx
96wfqzT1F5jzAcy5mYZXgLaStGFDAwejKMqeHd0LfJj3OEtemGnVPWYzyqSQmSKo
dzJraS1Z9GIRppzU5WaRpB9PtRBkqIqGJ5vZ0EKLGhed5hYY5r0iMJB0GfriMRDO
lJ4UUVfpsAoLPnqDBFH6IMn2V2UeAw9IR5zNa1mrM1RBfvt/uYTxrw1w3p9WoaNs
GRodhk4DCeAfeyqzVPNBLyXZ4Zq4FzGe3UWM4qysJ1RR4oFNw9Cuw0Fqk8mrfznr
7hv5TpGIckRZiKf8l6e+qLirF0qGtXJg29j2vPVQI9i5nSj95g1agA81PnLQlLLb
flWyxeMj81my7lfMHN1xcV6pqPEKMCOysZmfcvVfJd2XxpjuVD7ekl/YXWp8o8kU
YPdQMqPD626XsD8VpPdMszb9FPmx0JD0HEv+Y1rIFX8JegEI+c3H2X0dqC27T/Ou
FEPWOy025EgHm0Fh/7eIzkG6tjZ4JHoCugJAcxNZGj2XW4eB6r5vY8UwJ8iQRv+n
PVYHiy0UoIRePh0mrdOSSphGZMi/GO/DsqKwCtAMEK43WqZQju6wR7QSIGkh66mp
4uSHJqpf3YEYylxGMhk3
=QeGy
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma updates from Doug Ledford:
"This is the complete update for the rdma stack for this release cycle.
Most of it is typical driver and core updates, but there is the
entirely new VMWare pvrdma driver. You may have noticed that there
were changes in DaveM's pull request to the bnxt Ethernet driver to
support a RoCE RDMA driver. The bnxt_re driver was tentatively set to
be pulled in this release cycle, but it simply wasn't ready in time
and was dropped (a few review comments still to address, and some
multi-arch build issues like prefetch() not working across all
arches).
Summary:
- shared mlx5 updates with net stack (will drop out on merge if
Dave's tree has already been merged)
- driver updates: cxgb4, hfi1, hns-roce, i40iw, mlx4, mlx5, qedr, rxe
- debug cleanups
- new connection rejection helpers
- SRP updates
- various misc fixes
- new paravirt driver from vmware"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (210 commits)
IB: Add vmw_pvrdma driver
IB/mlx4: fix improper return value
IB/ocrdma: fix bad initialization
infiniband: nes: return value of skb_linearize should be handled
MAINTAINERS: Update Intel RDMA RNIC driver maintainers
MAINTAINERS: Remove Mitesh Ahuja from emulex maintainers
IB/core: fix unmap_sg argument
qede: fix general protection fault may occur on probe
IB/mthca: Replace pci_pool_alloc by pci_pool_zalloc
mlx5, calc_sq_size(): Make a debug message more informative
mlx5: Remove a set-but-not-used variable
mlx5: Use { } instead of { 0 } to init struct
IB/srp: Make writing the add_target sysfs attr interruptible
IB/srp: Make mapping failures easier to debug
IB/srp: Make login failures easier to debug
IB/srp: Introduce a local variable in srp_add_one()
IB/srp: Fix CONFIG_DYNAMIC_DEBUG=n build
IB/multicast: Check ib_find_pkey() return value
IPoIB: Avoid reading an uninitialized member variable
IB/mad: Fix an array index check
...
2016-12-16 03:03:32 +07:00
|
|
|
rxe_drop_ref(qp);
|
2016-11-16 15:39:15 +07:00
|
|
|
return 0;
|
2016-06-16 20:45:23 +07:00
|
|
|
}
|
|
|
|
payload = mtu;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
|
|
|
|
if (unlikely(!skb)) {
|
2016-09-29 03:26:26 +07:00
|
|
|
pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
|
2016-06-16 20:45:23 +07:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fill_packet(qp, wqe, &pkt, skb, payload)) {
|
2016-09-29 03:26:26 +07:00
|
|
|
pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
|
2018-06-07 13:32:52 +07:00
|
|
|
kfree_skb(skb);
|
2016-06-16 20:45:23 +07:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2016-09-07 18:04:06 +07:00
|
|
|
/*
|
|
|
|
* To prevent a race on wqe access between requester and completer,
|
|
|
|
* wqe members state and psn need to be set before calling
|
|
|
|
* rxe_xmit_packet().
|
|
|
|
* Otherwise, completer might initiate an unjustified retry flow.
|
|
|
|
*/
|
2016-09-19 18:57:26 +07:00
|
|
|
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
|
2016-09-07 18:04:06 +07:00
|
|
|
update_wqe_state(qp, wqe, &pkt);
|
|
|
|
update_wqe_psn(qp, wqe, &pkt, payload);
|
2018-11-03 19:13:18 +07:00
|
|
|
ret = rxe_xmit_packet(qp, &pkt, skb);
|
2016-06-16 20:45:23 +07:00
|
|
|
if (ret) {
|
|
|
|
qp->need_req_skb = 1;
|
|
|
|
|
2016-09-19 18:57:26 +07:00
|
|
|
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
|
2016-06-16 20:45:23 +07:00
|
|
|
|
|
|
|
if (ret == -EAGAIN) {
|
|
|
|
rxe_run_task(&qp->req.task, 1);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
update_state(qp, wqe, &pkt, payload);
|
|
|
|
|
|
|
|
goto next_wqe;
|
|
|
|
|
|
|
|
err:
|
|
|
|
wqe->status = IB_WC_LOC_PROT_ERR;
|
|
|
|
wqe->state = wqe_state_error;
|
2016-11-16 15:39:15 +07:00
|
|
|
__rxe_do_task(&qp->comp.task);
|
2017-01-11 02:15:50 +07:00
|
|
|
|
2016-06-16 20:45:23 +07:00
|
|
|
exit:
|
2016-12-05 20:43:21 +07:00
|
|
|
rxe_drop_ref(qp);
|
2016-06-16 20:45:23 +07:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|