tipc: narrow down interface towards struct tipc_link

We move the definition of struct tipc_link from link.h to link.c in
order to minimize its exposure to the rest of the code.

When needed, we define new functions to make it possible for external
entities to access and set data in the link.

Apart from the above, there are no functional changes.

Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jon Paul Maloy 2015-11-19 14:30:46 -05:00 committed by David S. Miller
parent 5be9c08671
commit 38206d5939
7 changed files with 415 additions and 345 deletions

View File

@ -332,131 +332,15 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
tipc_sk_rcv(net, inputq);
}
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
struct tipc_stats *stats)
{
int i;
struct nlattr *nest;
struct nla_map {
__u32 key;
__u32 val;
};
struct nla_map map[] = {
{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
};
nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
if (!nest)
return -EMSGSIZE;
for (i = 0; i < ARRAY_SIZE(map); i++)
if (nla_put_u32(skb, map[i].key, map[i].val))
goto msg_full;
nla_nest_end(skb, nest);
return 0;
msg_full:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
{
int err;
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *bcl = tn->bcl;
if (!bcl)
return 0;
tipc_bcast_lock(net);
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_LINK_GET);
if (!hdr)
return -EMSGSIZE;
attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
if (!attrs)
goto msg_full;
/* The broadcast link is always up */
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
goto attr_msg_full;
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
goto attr_msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
goto attr_msg_full;
prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
if (!prop)
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
goto prop_msg_full;
nla_nest_end(msg->skb, prop);
err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
if (err)
goto attr_msg_full;
tipc_bcast_unlock(net);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
return 0;
prop_msg_full:
nla_nest_cancel(msg->skb, prop);
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
tipc_bcast_unlock(net);
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
int tipc_bclink_reset_stats(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *bcl = tn->bcl;
struct tipc_link *l = tipc_bc_sndlink(net);
if (!bcl)
if (!l)
return -ENOPROTOOPT;
tipc_bcast_lock(net);
memset(&bcl->stats, 0, sizeof(bcl->stats));
tipc_link_reset_stats(l);
tipc_bcast_unlock(net);
return 0;
}
@ -530,9 +414,7 @@ int tipc_bcast_init(struct net *net)
void tipc_bcast_reinit(struct net *net)
{
struct tipc_bc_base *b = tipc_bc_base(net);
msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
tipc_link_reinit(tipc_bc_sndlink(net), tipc_own_addr(net));
}
void tipc_bcast_stop(struct net *net)

View File

@ -45,6 +45,151 @@
#include <linux/pkt_sched.h>
struct tipc_stats {
u32 sent_info; /* used in counting # sent packets */
u32 recv_info; /* used in counting # recv'd packets */
u32 sent_states;
u32 recv_states;
u32 sent_probes;
u32 recv_probes;
u32 sent_nacks;
u32 recv_nacks;
u32 sent_acks;
u32 sent_bundled;
u32 sent_bundles;
u32 recv_bundled;
u32 recv_bundles;
u32 retransmitted;
u32 sent_fragmented;
u32 sent_fragments;
u32 recv_fragmented;
u32 recv_fragments;
u32 link_congs; /* # port sends blocked by congestion */
u32 deferred_recv;
u32 duplicates;
u32 max_queue_sz; /* send queue size high water mark */
u32 accu_queue_sz; /* used for send queue size profiling */
u32 queue_sz_counts; /* used for send queue size profiling */
u32 msg_length_counts; /* used for message length profiling */
u32 msg_lengths_total; /* used for message length profiling */
u32 msg_length_profile[7]; /* used for msg. length profiling */
};
/**
* struct tipc_link - TIPC link data structure
* @addr: network address of link's peer node
* @name: link name character string
* @media_addr: media address to use when sending messages over link
* @timer: link timer
* @net: pointer to namespace struct
* @refcnt: reference counter for permanent references (owner node & timer)
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
* @bearer_id: local bearer id used by link
* @tolerance: minimum link continuity loss needed to reset link [in ms]
* @keepalive_intv: link keepalive timer interval
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
* @peer_caps: bitmap describing capabilities of peer node
* @silent_intv_cnt: # of timer intervals without any reception from peer
* @proto_msg: template for control messages generated by link
* @pmsg: convenience pointer to "proto_msg" field
* @priority: current link priority
* @net_plane: current link network plane ('A' through 'H')
* @backlog_limit: backlog queue congestion thresholds (indexed by importance)
* @exp_msg_count: # of tunnelled messages expected during link changeover
* @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
* @mtu: current maximum packet size for this link
* @advertised_mtu: advertised own mtu when link is being established
* @transmitq: queue for sent, non-acked messages
* @backlogq: queue for messages waiting to be sent
* @snt_nxt: next sequence number to use for outbound messages
* @last_retransmitted: sequence number of most recently retransmitted message
* @stale_count: # of identical retransmit requests made by peer
* @ackers: # of peers that needs to ack each packet before it can be released
* @acked: # last packet acked by a certain peer. Used for broadcast.
* @rcv_nxt: next sequence number to expect for inbound messages
* @deferred_queue: deferred queue saved OOS b'cast message received from node
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
* @inputq: buffer queue for messages to be delivered upwards
* @namedq: buffer queue for name table messages to be delivered upwards
* @next_out: ptr to first unsent outbound message in queue
* @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
* @reasm_buf: head of partially reassembled inbound message fragments
* @bc_rcvr: marks that this is a broadcast receiver link
* @stats: collects statistics regarding link activity
*/
struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
struct tipc_media_addr *media_addr;
struct net *net;
/* Management and link supervision data */
u32 peer_session;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
unsigned long keepalive_intv;
u32 abort_limit;
u32 state;
u16 peer_caps;
bool active;
u32 silent_intv_cnt;
struct {
unchar hdr[INT_H_SIZE];
unchar body[TIPC_MAX_IF_NAME];
} proto_msg;
struct tipc_msg *pmsg;
u32 priority;
char net_plane;
/* Failover/synch */
u16 drop_point;
struct sk_buff *failover_reasm_skb;
/* Max packet negotiation */
u16 mtu;
u16 advertised_mtu;
/* Sending */
struct sk_buff_head transmq;
struct sk_buff_head backlogq;
struct {
u16 len;
u16 limit;
} backlog[5];
u16 snd_nxt;
u16 last_retransm;
u16 window;
u32 stale_count;
/* Reception */
u16 rcv_nxt;
u32 rcv_unacked;
struct sk_buff_head deferdq;
struct sk_buff_head *inputq;
struct sk_buff_head *namedq;
/* Congestion handling */
struct sk_buff_head wakeupq;
/* Fragmentation/reassembly */
struct sk_buff *reasm_buf;
/* Broadcast */
u16 ackers;
u16 acked;
struct tipc_link *bc_rcvlink;
struct tipc_link *bc_sndlink;
int nack_state;
bool bc_peer_is_up;
/* Statistics */
struct tipc_stats stats;
};
/*
* Error message prefixes
*/
@ -165,6 +310,36 @@ void tipc_link_set_active(struct tipc_link *l, bool active)
l->active = active;
}
u32 tipc_link_id(struct tipc_link *l)
{
return l->peer_bearer_id << 16 | l->bearer_id;
}
int tipc_link_window(struct tipc_link *l)
{
return l->window;
}
int tipc_link_prio(struct tipc_link *l)
{
return l->priority;
}
unsigned long tipc_link_tolerance(struct tipc_link *l)
{
return l->tolerance;
}
struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
{
return l->inputq;
}
char tipc_link_plane(struct tipc_link *l)
{
return l->net_plane;
}
void tipc_link_add_bc_peer(struct tipc_link *snd_l,
struct tipc_link *uc_l,
struct sk_buff_head *xmitq)
@ -207,11 +382,31 @@ int tipc_link_mtu(struct tipc_link *l)
return l->mtu;
}
u16 tipc_link_rcv_nxt(struct tipc_link *l)
{
return l->rcv_nxt;
}
u16 tipc_link_acked(struct tipc_link *l)
{
return l->acked;
}
char *tipc_link_name(struct tipc_link *l)
{
return l->name;
}
static u32 link_own_addr(struct tipc_link *l)
{
return msg_prevnode(l->pmsg);
}
void tipc_link_reinit(struct tipc_link *l, u32 addr)
{
msg_set_prevnode(l->pmsg, addr);
}
/**
* tipc_link_create - create a new link
* @n: pointer to associated node
@ -674,7 +869,7 @@ void tipc_link_reset(struct tipc_link *l)
l->stats.recv_info = 0;
l->stale_count = 0;
l->bc_peer_is_up = false;
link_reset_statistics(l);
tipc_link_reset_stats(l);
}
/**
@ -1067,8 +1262,9 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
/*
* Send protocol message to the other endpoint.
*/
void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
u32 gap, u32 tolerance, u32 priority)
static void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ,
int probe_msg, u32 gap, u32 tolerance,
u32 priority)
{
struct sk_buff *skb = NULL;
struct sk_buff_head xmitq;
@ -1510,14 +1706,16 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
}
/**
* link_reset_statistics - reset link statistics
* link_reset_stats - reset link statistics
* @l_ptr: pointer to link
*/
void link_reset_statistics(struct tipc_link *l_ptr)
void tipc_link_reset_stats(struct tipc_link *l)
{
memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
l_ptr->stats.sent_info = l_ptr->snd_nxt;
l_ptr->stats.recv_info = l_ptr->rcv_nxt;
memset(&l->stats, 0, sizeof(l->stats));
if (!link_is_bc_sndlink(l)) {
l->stats.sent_info = l->snd_nxt;
l->stats.recv_info = l->rcv_nxt;
}
}
static void link_print(struct tipc_link *l, const char *str)
@ -1705,3 +1903,135 @@ int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
return -EMSGSIZE;
}
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
struct tipc_stats *stats)
{
int i;
struct nlattr *nest;
struct nla_map {
__u32 key;
__u32 val;
};
struct nla_map map[] = {
{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
};
nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
if (!nest)
return -EMSGSIZE;
for (i = 0; i < ARRAY_SIZE(map); i++)
if (nla_put_u32(skb, map[i].key, map[i].val))
goto msg_full;
nla_nest_end(skb, nest);
return 0;
msg_full:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
{
int err;
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *bcl = tn->bcl;
if (!bcl)
return 0;
tipc_bcast_lock(net);
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_LINK_GET);
if (!hdr)
return -EMSGSIZE;
attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
if (!attrs)
goto msg_full;
/* The broadcast link is always up */
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
goto attr_msg_full;
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
goto attr_msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
goto attr_msg_full;
prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
if (!prop)
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
goto prop_msg_full;
nla_nest_end(msg->skb, prop);
err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
if (err)
goto attr_msg_full;
tipc_bcast_unlock(net);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
return 0;
prop_msg_full:
nla_nest_cancel(msg->skb, prop);
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
tipc_bcast_unlock(net);
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
void tipc_link_set_tolerance(struct tipc_link *l, u32 tol)
{
l->tolerance = tol;
tipc_link_proto_xmit(l, STATE_MSG, 0, 0, tol, 0);
}
void tipc_link_set_prio(struct tipc_link *l, u32 prio)
{
l->priority = prio;
tipc_link_proto_xmit(l, STATE_MSG, 0, 0, 0, prio);
}
void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
{
l->abort_limit = limit;
}

View File

@ -45,10 +45,6 @@
*/
#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
/* Out-of-range value for link sequence numbers
*/
#define INVALID_LINK_SEQ 0x10000
/* Link FSM events:
*/
enum {
@ -75,151 +71,6 @@ enum {
*/
#define MAX_PKT_DEFAULT 1500
struct tipc_stats {
u32 sent_info; /* used in counting # sent packets */
u32 recv_info; /* used in counting # recv'd packets */
u32 sent_states;
u32 recv_states;
u32 sent_probes;
u32 recv_probes;
u32 sent_nacks;
u32 recv_nacks;
u32 sent_acks;
u32 sent_bundled;
u32 sent_bundles;
u32 recv_bundled;
u32 recv_bundles;
u32 retransmitted;
u32 sent_fragmented;
u32 sent_fragments;
u32 recv_fragmented;
u32 recv_fragments;
u32 link_congs; /* # port sends blocked by congestion */
u32 deferred_recv;
u32 duplicates;
u32 max_queue_sz; /* send queue size high water mark */
u32 accu_queue_sz; /* used for send queue size profiling */
u32 queue_sz_counts; /* used for send queue size profiling */
u32 msg_length_counts; /* used for message length profiling */
u32 msg_lengths_total; /* used for message length profiling */
u32 msg_length_profile[7]; /* used for msg. length profiling */
};
/**
* struct tipc_link - TIPC link data structure
* @addr: network address of link's peer node
* @name: link name character string
* @media_addr: media address to use when sending messages over link
* @timer: link timer
* @net: pointer to namespace struct
* @refcnt: reference counter for permanent references (owner node & timer)
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
* @bearer_id: local bearer id used by link
* @tolerance: minimum link continuity loss needed to reset link [in ms]
* @keepalive_intv: link keepalive timer interval
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
* @peer_caps: bitmap describing capabilities of peer node
* @silent_intv_cnt: # of timer intervals without any reception from peer
* @proto_msg: template for control messages generated by link
* @pmsg: convenience pointer to "proto_msg" field
* @priority: current link priority
* @net_plane: current link network plane ('A' through 'H')
* @backlog_limit: backlog queue congestion thresholds (indexed by importance)
* @exp_msg_count: # of tunnelled messages expected during link changeover
* @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
* @mtu: current maximum packet size for this link
* @advertised_mtu: advertised own mtu when link is being established
* @transmitq: queue for sent, non-acked messages
* @backlogq: queue for messages waiting to be sent
* @snt_nxt: next sequence number to use for outbound messages
* @last_retransmitted: sequence number of most recently retransmitted message
* @stale_count: # of identical retransmit requests made by peer
* @ackers: # of peers that needs to ack each packet before it can be released
* @acked: # last packet acked by a certain peer. Used for broadcast.
* @rcv_nxt: next sequence number to expect for inbound messages
* @deferred_queue: deferred queue saved OOS b'cast message received from node
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
* @inputq: buffer queue for messages to be delivered upwards
* @namedq: buffer queue for name table messages to be delivered upwards
* @next_out: ptr to first unsent outbound message in queue
* @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
* @reasm_buf: head of partially reassembled inbound message fragments
* @bc_rcvr: marks that this is a broadcast receiver link
* @stats: collects statistics regarding link activity
*/
struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
struct tipc_media_addr *media_addr;
struct net *net;
/* Management and link supervision data */
u32 peer_session;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
unsigned long keepalive_intv;
u32 abort_limit;
u32 state;
u16 peer_caps;
bool active;
u32 silent_intv_cnt;
struct {
unchar hdr[INT_H_SIZE];
unchar body[TIPC_MAX_IF_NAME];
} proto_msg;
struct tipc_msg *pmsg;
u32 priority;
char net_plane;
/* Failover/synch */
u16 drop_point;
struct sk_buff *failover_reasm_skb;
/* Max packet negotiation */
u16 mtu;
u16 advertised_mtu;
/* Sending */
struct sk_buff_head transmq;
struct sk_buff_head backlogq;
struct {
u16 len;
u16 limit;
} backlog[5];
u16 snd_nxt;
u16 last_retransm;
u16 window;
u32 stale_count;
/* Reception */
u16 rcv_nxt;
u32 rcv_unacked;
struct sk_buff_head deferdq;
struct sk_buff_head *inputq;
struct sk_buff_head *namedq;
/* Congestion handling */
struct sk_buff_head wakeupq;
/* Fragmentation/reassembly */
struct sk_buff *reasm_buf;
/* Broadcast */
u16 ackers;
u16 acked;
struct tipc_link *bc_rcvlink;
struct tipc_link *bc_sndlink;
int nack_state;
bool bc_peer_is_up;
/* Statistics */
struct tipc_stats stats;
};
bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
int tolerance, char net_plane, u32 mtu, int priority,
int window, u32 session, u32 ownnode, u32 peer,
@ -235,11 +86,11 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
struct sk_buff_head *namedq,
struct tipc_link *bc_sndlink,
struct tipc_link **link);
void tipc_link_reinit(struct tipc_link *l, u32 addr);
void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
int mtyp, struct sk_buff_head *xmitq);
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
int tipc_link_fsm_evt(struct tipc_link *l, int evt);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
bool tipc_link_is_up(struct tipc_link *l);
bool tipc_link_peer_is_down(struct tipc_link *l);
bool tipc_link_is_reset(struct tipc_link *l);
@ -249,15 +100,24 @@ bool tipc_link_is_failingover(struct tipc_link *l);
bool tipc_link_is_blocked(struct tipc_link *l);
void tipc_link_set_active(struct tipc_link *l, bool active);
void tipc_link_reset(struct tipc_link *l_ptr);
void link_reset_statistics(struct tipc_link *l);
int tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list,
void tipc_link_reset_stats(struct tipc_link *l);
int tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list,
struct sk_buff_head *xmitq);
void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
u32 gap, u32 tolerance, u32 priority);
struct sk_buff_head *tipc_link_inputq(struct tipc_link *l);
u16 tipc_link_rcv_nxt(struct tipc_link *l);
u16 tipc_link_acked(struct tipc_link *l);
u32 tipc_link_id(struct tipc_link *l);
char *tipc_link_name(struct tipc_link *l);
char tipc_link_plane(struct tipc_link *l);
int tipc_link_prio(struct tipc_link *l);
int tipc_link_window(struct tipc_link *l);
unsigned long tipc_link_tolerance(struct tipc_link *l);
void tipc_link_set_tolerance(struct tipc_link *l, u32 tol);
void tipc_link_set_prio(struct tipc_link *l, u32 prio);
void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit);
void tipc_link_set_queue_limits(struct tipc_link *l, u32 window);
int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
struct tipc_link *link, int nlflags);
int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,

View File

@ -102,7 +102,7 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
{
.cmd = TIPC_NL_LINK_GET,
.doit = tipc_nl_node_get_link,
.dumpit = tipc_nl_link_dump,
.dumpit = tipc_nl_node_dump_link,
.policy = tipc_nl_policy,
},
{

View File

@ -1023,13 +1023,13 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
msg->req_type = TIPC_TLV_LINK_NAME;
msg->rep_size = ULTRA_STRING_MAX_LEN;
msg->rep_type = TIPC_TLV_ULTRA_STRING;
dump.dumpit = tipc_nl_link_dump;
dump.dumpit = tipc_nl_node_dump_link;
dump.format = tipc_nl_compat_link_stat_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_GET_LINKS:
msg->req_type = TIPC_TLV_NET_ADDR;
msg->rep_size = ULTRA_STRING_MAX_LEN;
dump.dumpit = tipc_nl_link_dump;
dump.dumpit = tipc_nl_node_dump_link;
dump.format = tipc_nl_compat_link_dump;
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_SET_LINK_TOL:

View File

@ -42,11 +42,8 @@
#include "bcast.h"
#include "discover.h"
/* Out-of-range value for node signature */
#define INVALID_NODE_SIG 0x10000
#define INVALID_BEARER_ID -1
/* Flags used to take different actions according to flag type
* TIPC_NOTIFY_NODE_DOWN: notify node is down
* TIPC_NOTIFY_NODE_UP: notify node is up
@ -360,7 +357,8 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
n_ptr->active_links[0] = INVALID_BEARER_ID;
n_ptr->active_links[1] = INVALID_BEARER_ID;
if (!tipc_link_bc_create(net, tipc_own_addr(net), n_ptr->addr,
U16_MAX, tipc_bc_sndlink(net)->window,
U16_MAX,
tipc_link_window(tipc_bc_sndlink(net)),
n_ptr->capabilities,
&n_ptr->bc_entry.inputq1,
&n_ptr->bc_entry.namedq,
@ -381,7 +379,7 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
{
unsigned long tol = l->tolerance;
unsigned long tol = tipc_link_tolerance(l);
unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
unsigned long keepalive_intv = msecs_to_jiffies(intv);
@ -390,7 +388,7 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
n->keepalive_intv = keepalive_intv;
/* Ensure link's abort limit corresponds to current interval */
l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv));
}
static void tipc_node_delete(struct tipc_node *node)
@ -559,16 +557,16 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
n->working_links++;
n->action_flags |= TIPC_NOTIFY_LINK_UP;
n->link_id = nl->peer_bearer_id << 16 | bearer_id;
n->link_id = tipc_link_id(nl);
/* Leave room for tunnel header when returning 'mtu' to users: */
n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
tipc_bearer_add_dest(n->net, bearer_id, n->addr);
tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
pr_debug("Established link <%s> on network plane %c\n",
nl->name, nl->net_plane);
tipc_link_name(nl), tipc_link_plane(nl));
/* First link? => give it both slots */
if (!ol) {
@ -581,17 +579,17 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
}
/* Second link => redistribute slots */
if (nl->priority > ol->priority) {
pr_debug("Old link <%s> becomes standby\n", ol->name);
if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
*slot0 = bearer_id;
*slot1 = bearer_id;
tipc_link_set_active(nl, true);
tipc_link_set_active(ol, false);
} else if (nl->priority == ol->priority) {
} else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
tipc_link_set_active(nl, true);
*slot1 = bearer_id;
} else {
pr_debug("New link <%s> is standby\n", nl->name);
pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
}
/* Prepare synchronization with first link */
@ -621,7 +619,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
struct tipc_link_entry *le = &n->links[*bearer_id];
int *slot0 = &n->active_links[0];
int *slot1 = &n->active_links[1];
int i, highest = 0;
int i, highest = 0, prio;
struct tipc_link *l, *_l, *tnl;
l = n->links[*bearer_id].link;
@ -630,12 +628,12 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
n->working_links--;
n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
n->link_id = l->peer_bearer_id << 16 | *bearer_id;
n->link_id = tipc_link_id(l);
tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
pr_debug("Lost link <%s> on network plane %c\n",
l->name, l->net_plane);
tipc_link_name(l), tipc_link_plane(l));
/* Select new active link if any available */
*slot0 = INVALID_BEARER_ID;
@ -646,10 +644,11 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
continue;
if (_l == l)
continue;
if (_l->priority < highest)
prio = tipc_link_prio(_l);
if (prio < highest)
continue;
if (_l->priority > highest) {
highest = _l->priority;
if (prio > highest) {
highest = prio;
*slot0 = i;
*slot1 = i;
continue;
@ -672,17 +671,17 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
/* There is still a working link => initiate failover */
tnl = node_active_link(n, 0);
*bearer_id = n->active_links[0];
tnl = n->links[*bearer_id].link;
tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
tipc_link_reset(l);
tipc_link_fsm_evt(l, LINK_RESET_EVT);
tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
*maddr = &n->links[tnl->bearer_id].maddr;
*bearer_id = tnl->bearer_id;
*maddr = &n->links[*bearer_id].maddr;
}
static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
@ -1117,7 +1116,7 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (link) {
strncpy(linkname, link->name, len);
strncpy(linkname, tipc_link_name(link), len);
err = 0;
}
exit:
@ -1328,25 +1327,25 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
u16 oseqno = msg_seqno(hdr);
u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
u16 exp_pkts = msg_msgcnt(hdr);
u16 rcv_nxt, syncpt, dlv_nxt;
u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
int state = n->state;
struct tipc_link *l, *tnl, *pl = NULL;
struct tipc_media_addr *maddr;
int i, pb_id;
int pb_id;
l = n->links[bearer_id].link;
if (!l)
return false;
rcv_nxt = l->rcv_nxt;
rcv_nxt = tipc_link_rcv_nxt(l);
if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
return true;
/* Find parallel link, if any */
for (i = 0; i < MAX_BEARERS; i++) {
if ((i != bearer_id) && n->links[i].link) {
pl = n->links[i].link;
for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
if ((pb_id != bearer_id) && n->links[pb_id].link) {
pl = n->links[pb_id].link;
break;
}
}
@ -1378,9 +1377,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
syncpt = oseqno + exp_pkts - 1;
if (pl && tipc_link_is_up(pl)) {
pb_id = pl->bearer_id;
__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq);
tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
tipc_link_inputq(l));
}
/* If pkts arrive out of order, use lowest calculated syncpt */
if (less(syncpt, n->sync_point))
@ -1423,7 +1422,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
tnl = pl;
pl = l;
}
dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq));
inputq_len = skb_queue_len(tipc_link_inputq(pl));
dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
if (more(dlv_nxt, n->sync_point)) {
tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
@ -1483,7 +1483,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
/* Ensure broadcast reception is in synch with peer's send state */
if (unlikely(usr == LINK_PROTOCOL))
tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr);
else if (unlikely(n->bc_entry.link->acked != bc_ack))
else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
/* Receive packet directly if conditions permit */
@ -1592,36 +1592,36 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
/* tipc_link_find_owner - locate owner node of link by link's name
/* tipc_node_find_by_name - locate owner node of link by link's name
* @net: the applicable net namespace
* @name: pointer to link name string
* @bearer_id: pointer to index in 'node->links' array where the link was found.
*
* Returns pointer to node owning the link, or 0 if no matching link is found.
*/
static struct tipc_node *tipc_link_find_owner(struct net *net,
const char *link_name,
unsigned int *bearer_id)
static struct tipc_node *tipc_node_find_by_name(struct net *net,
const char *link_name,
unsigned int *bearer_id)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
struct tipc_link *l;
struct tipc_node *n;
struct tipc_node *found_node = NULL;
int i;
*bearer_id = 0;
rcu_read_lock();
list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
tipc_node_read_lock(n_ptr);
list_for_each_entry_rcu(n, &tn->node_list, list) {
tipc_node_read_lock(n);
for (i = 0; i < MAX_BEARERS; i++) {
l_ptr = n_ptr->links[i].link;
if (l_ptr && !strcmp(l_ptr->name, link_name)) {
l = n->links[i].link;
if (l && !strcmp(tipc_link_name(l), link_name)) {
*bearer_id = i;
found_node = n_ptr;
found_node = n;
break;
}
}
tipc_node_read_unlock(n_ptr);
tipc_node_read_unlock(n);
if (found_node)
break;
}
@ -1658,7 +1658,7 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
if (strcmp(name, tipc_bclink_name) == 0)
return tipc_nl_bc_link_set(net, attrs);
node = tipc_link_find_owner(net, name, &bearer_id);
node = tipc_node_find_by_name(net, name, &bearer_id);
if (!node)
return -EINVAL;
@ -1684,15 +1684,13 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
u32 tol;
tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
link->tolerance = tol;
tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
tipc_link_set_tolerance(link, tol);
}
if (props[TIPC_NLA_PROP_PRIO]) {
u32 prio;
prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
link->priority = prio;
tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
tipc_link_set_prio(link, prio);
}
if (props[TIPC_NLA_PROP_WIN]) {
u32 win;
@ -1737,7 +1735,7 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
struct tipc_node *node;
struct tipc_link *link;
node = tipc_link_find_owner(net, name, &bearer_id);
node = tipc_node_find_by_name(net, name, &bearer_id);
if (!node)
return -EINVAL;
@ -1792,7 +1790,7 @@ int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
return 0;
}
node = tipc_link_find_owner(net, link_name, &bearer_id);
node = tipc_node_find_by_name(net, link_name, &bearer_id);
if (!node)
return -EINVAL;
@ -1805,7 +1803,7 @@ int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
tipc_node_read_unlock(node);
return -EINVAL;
}
link_reset_statistics(link);
tipc_link_reset_stats(link);
spin_unlock_bh(&le->lock);
tipc_node_read_unlock(node);
return 0;
@ -1834,7 +1832,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
return 0;
}
int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);

View File

@ -42,8 +42,6 @@
#include "bearer.h"
#include "msg.h"
#define INVALID_BEARER_ID -1
/* Optional capabilities supported by this code version
*/
enum {
@ -51,6 +49,7 @@ enum {
};
#define TIPC_NODE_CAPABILITIES TIPC_BCAST_SYNCH
#define INVALID_BEARER_ID -1
void tipc_node_stop(struct net *net);
void tipc_node_check_dest(struct net *net, u32 onode,
@ -72,6 +71,7 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info);