IB/core: Rename ib_destroy_ah to rdma_destroy_ah

Rename ib_destroy_ah to rdma_destroy_ah so its in sync with the
rename of the ib address handle attribute

Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Don Hiatt <don.hiatt@intel.com>
Reviewed-by: Sean Hefty <sean.hefty@intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Dasaratharaman Chandramouli <dasaratharaman.chandramouli@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Dasaratharaman Chandramouli 2017-04-29 14:41:22 -04:00 committed by Doug Ledford
parent bfbfd661c9
commit 3652315934
16 changed files with 41 additions and 41 deletions

View File

@ -137,13 +137,13 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
err2:
ib_free_send_mad(send_buf);
err1:
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
}
static void agent_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc)
{
ib_destroy_ah(mad_send_wc->send_buf->ah);
rdma_destroy_ah(mad_send_wc->send_buf->ah);
ib_free_send_mad(mad_send_wc->send_buf);
}

View File

@ -355,7 +355,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
ret = PTR_ERR(m);
goto out;
}
@ -390,7 +390,7 @@ static int cm_alloc_response_msg(struct cm_port *port,
GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
return PTR_ERR(m);
}
m->ah = ah;
@ -400,7 +400,7 @@ static int cm_alloc_response_msg(struct cm_port *port,
static void cm_free_msg(struct ib_mad_send_buf *msg)
{
ib_destroy_ah(msg->ah);
rdma_destroy_ah(msg->ah);
if (msg->context[0])
cm_deref_id(msg->context[0]);
ib_free_send_mad(msg);

View File

@ -81,7 +81,7 @@ static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
{
deref_rmpp_recv(rmpp_recv);
wait_for_completion(&rmpp_recv->comp);
ib_destroy_ah(rmpp_recv->ah);
rdma_destroy_ah(rmpp_recv->ah);
kfree(rmpp_recv);
}
@ -171,7 +171,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
hdr_len, 0, GFP_KERNEL,
IB_MGMT_BASE_VERSION);
if (IS_ERR(msg))
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
else {
msg->ah = ah;
msg->context[0] = ah;
@ -201,7 +201,7 @@ static void ack_ds_ack(struct ib_mad_agent_private *agent,
ret = ib_post_send_mad(msg, NULL);
if (ret) {
ib_destroy_ah(msg->ah);
rdma_destroy_ah(msg->ah);
ib_free_send_mad(msg);
}
}
@ -209,7 +209,7 @@ static void ack_ds_ack(struct ib_mad_agent_private *agent,
void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
{
if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
ib_destroy_ah(mad_send_wc->send_buf->ah);
rdma_destroy_ah(mad_send_wc->send_buf->ah);
ib_free_send_mad(mad_send_wc->send_buf);
}
@ -237,7 +237,7 @@ static void nack_recv(struct ib_mad_agent_private *agent,
ret = ib_post_send_mad(msg, NULL);
if (ret) {
ib_destroy_ah(msg->ah);
rdma_destroy_ah(msg->ah);
ib_free_send_mad(msg);
}
}

View File

@ -1027,7 +1027,7 @@ static void free_sm_ah(struct kref *kref)
{
struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
ib_destroy_ah(sm_ah->ah);
rdma_destroy_ah(sm_ah->ah);
kfree(sm_ah);
}

View File

@ -197,7 +197,7 @@ static void send_handler(struct ib_mad_agent *agent,
struct ib_umad_packet *packet = send_wc->send_buf->context[0];
dequeue_send(file, packet);
ib_destroy_ah(packet->msg->ah);
rdma_destroy_ah(packet->msg->ah);
ib_free_send_mad(packet->msg);
if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
@ -596,7 +596,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
err_msg:
ib_free_send_mad(packet->msg);
err_ah:
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
err_up:
mutex_unlock(&file->mutex);
err:

View File

@ -2588,7 +2588,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
return in_len;
err_copy:
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
err_put:
uobj_put_obj_read(pd);

View File

@ -41,7 +41,7 @@
static int uverbs_free_ah(struct ib_uobject *uobject,
enum rdma_remove_reason why)
{
return ib_destroy_ah((struct ib_ah *)uobject->object);
return rdma_destroy_ah((struct ib_ah *)uobject->object);
}
static int uverbs_free_flow(struct ib_uobject *uobject,

View File

@ -587,7 +587,7 @@ int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
}
EXPORT_SYMBOL(rdma_query_ah);
int ib_destroy_ah(struct ib_ah *ah)
int rdma_destroy_ah(struct ib_ah *ah)
{
struct ib_pd *pd;
int ret;
@ -599,7 +599,7 @@ int ib_destroy_ah(struct ib_ah *ah)
return ret;
}
EXPORT_SYMBOL(ib_destroy_ah);
EXPORT_SYMBOL(rdma_destroy_ah);
/* Shared receive queues */

View File

@ -207,7 +207,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
spin_lock_irqsave(&dev->sm_lock, flags);
if (dev->sm_ah[port_num - 1])
ib_destroy_ah(dev->sm_ah[port_num - 1]);
rdma_destroy_ah(dev->sm_ah[port_num - 1]);
dev->sm_ah[port_num - 1] = new_ah;
spin_unlock_irqrestore(&dev->sm_lock, flags);
}
@ -580,7 +580,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
if (tun_qp->tx_ring[tun_tx_ix].ah)
ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
tun_qp->tx_ring[tun_tx_ix].ah = ah;
ib_dma_sync_single_for_cpu(&dev->ib_dev,
tun_qp->tx_ring[tun_tx_ix].buf.map,
@ -653,7 +653,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
spin_unlock(&tun_qp->tx_lock);
tun_qp->tx_ring[tun_tx_ix].ah = NULL;
end:
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
return ret;
}
@ -1018,7 +1018,7 @@ static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc)
{
if (mad_send_wc->send_buf->context[0])
ib_destroy_ah(mad_send_wc->send_buf->context[0]);
rdma_destroy_ah(mad_send_wc->send_buf->context[0]);
ib_free_send_mad(mad_send_wc->send_buf);
}
@ -1073,7 +1073,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
}
if (dev->sm_ah[p])
ib_destroy_ah(dev->sm_ah[p]);
rdma_destroy_ah(dev->sm_ah[p]);
}
}
@ -1410,7 +1410,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
if (sqp->tx_ring[wire_tx_ix].ah)
ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
sqp->tx_ring[wire_tx_ix].ah = ah;
ib_dma_sync_single_for_cpu(&dev->ib_dev,
sqp->tx_ring[wire_tx_ix].buf.map,
@ -1455,7 +1455,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
spin_unlock(&sqp->tx_lock);
sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
return ret;
}
@ -1714,7 +1714,7 @@ static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
if (tun_qp->tx_ring[i].ah)
ib_destroy_ah(tun_qp->tx_ring[i].ah);
rdma_destroy_ah(tun_qp->tx_ring[i].ah);
}
kfree(tun_qp->tx_ring);
kfree(tun_qp->ring);
@ -1746,7 +1746,7 @@ static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
pr_debug("received tunnel send completion:"
"wrid=0x%llx, status=0x%x\n",
wc.wr_id, wc.status);
ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;
@ -1763,7 +1763,7 @@ static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
" status = %d, wrid = 0x%llx\n",
ctx->slave, wc.status, wc.wr_id);
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;
@ -1900,7 +1900,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
if (wc.status == IB_WC_SUCCESS) {
switch (wc.opcode) {
case IB_WC_SEND:
ib_destroy_ah(sqp->tx_ring[wc.wr_id &
rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;
@ -1930,7 +1930,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
" status = %d, wrid = 0x%llx\n",
ctx->slave, wc.status, wc.wr_id);
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
ib_destroy_ah(sqp->tx_ring[wc.wr_id &
rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;

View File

@ -93,7 +93,7 @@ static void update_sm_ah(struct mthca_dev *dev,
spin_lock_irqsave(&dev->sm_lock, flags);
if (dev->sm_ah[port_num - 1])
ib_destroy_ah(dev->sm_ah[port_num - 1]);
rdma_destroy_ah(dev->sm_ah[port_num - 1]);
dev->sm_ah[port_num - 1] = new_ah;
spin_unlock_irqrestore(&dev->sm_lock, flags);
}
@ -345,6 +345,6 @@ void mthca_free_agents(struct mthca_dev *dev)
}
if (dev->sm_ah[p])
ib_destroy_ah(dev->sm_ah[p]);
rdma_destroy_ah(dev->sm_ah[p]);
}
}

View File

@ -2500,5 +2500,5 @@ void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
if (dd->pport[port_idx].ibport_data.smi_ah)
ib_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah);
rdma_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah);
}

View File

@ -160,7 +160,7 @@ void rvt_free_mad_agents(struct rvt_dev_info *rdi)
ib_unregister_mad_agent(agent);
}
if (rvp->sm_ah) {
ib_destroy_ah(&rvp->sm_ah->ibah);
rdma_destroy_ah(&rvp->sm_ah->ibah);
rvp->sm_ah = NULL;
}

View File

@ -658,7 +658,7 @@ static void __ipoib_reap_ah(struct net_device *dev)
list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
list_del(&ah->list);
ib_destroy_ah(ah->ah);
rdma_destroy_ah(ah->ah);
kfree(ah);
}

View File

@ -603,7 +603,7 @@ static void vema_set(struct opa_vnic_vema_port *port,
static void vema_send(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_wc)
{
ib_destroy_ah(mad_wc->send_buf->ah);
rdma_destroy_ah(mad_wc->send_buf->ah);
ib_free_send_mad(mad_wc->send_buf);
}
@ -677,7 +677,7 @@ static void vema_recv(struct ib_mad_agent *mad_agent,
ib_free_send_mad(rsp);
err_rsp:
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
free_recv_mad:
ib_free_recv_mad(mad_wc);
}
@ -842,7 +842,7 @@ void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter,
}
err_sndbuf:
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
err_exit:
v_err("Aborting trap\n");
}

View File

@ -417,7 +417,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_wc)
{
ib_destroy_ah(mad_wc->send_buf->ah);
rdma_destroy_ah(mad_wc->send_buf->ah);
ib_free_send_mad(mad_wc->send_buf);
}
@ -481,7 +481,7 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
ib_free_send_mad(rsp);
err_rsp:
ib_destroy_ah(ah);
rdma_destroy_ah(ah);
err:
ib_free_recv_mad(mad_wc);
}

View File

@ -2796,10 +2796,10 @@ int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
/**
* ib_destroy_ah - Destroys an address handle.
* rdma_destroy_ah - Destroys an address handle.
* @ah: The address handle to destroy.
*/
int ib_destroy_ah(struct ib_ah *ah);
int rdma_destroy_ah(struct ib_ah *ah);
/**
* ib_create_srq - Creates a SRQ associated with the specified protection