2008-10-23 05:47:49 +07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <linux/delay.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/slab.h>
|
2012-07-19 05:33:52 +07:00
|
|
|
#include <linux/hash.h>
|
|
|
|
#include <net/ip.h>
|
2013-07-10 21:13:17 +07:00
|
|
|
#include <net/busy_poll.h>
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
#include <linux/mlx4/driver.h>
|
|
|
|
#include <linux/mlx4/device.h>
|
|
|
|
#include <linux/mlx4/cmd.h>
|
|
|
|
#include <linux/mlx4/cq.h>
|
|
|
|
|
|
|
|
#include "mlx4_en.h"
|
|
|
|
#include "en_port.h"
|
|
|
|
|
2012-12-02 10:49:23 +07:00
|
|
|
int mlx4_en_setup_tc(struct net_device *dev, u8 up)
|
2012-04-05 04:33:27 +07:00
|
|
|
{
|
2012-05-17 07:58:10 +07:00
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
int i;
|
2012-12-02 10:49:23 +07:00
|
|
|
unsigned int offset = 0;
|
2012-05-17 07:58:10 +07:00
|
|
|
|
|
|
|
if (up && up != MLX4_EN_NUM_UP)
|
2012-04-05 04:33:27 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2012-05-17 07:58:10 +07:00
|
|
|
netdev_set_num_tc(dev, up);
|
|
|
|
|
|
|
|
/* Partition Tx queues evenly amongst UP's */
|
|
|
|
for (i = 0; i < up; i++) {
|
2012-12-02 10:49:23 +07:00
|
|
|
netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
|
|
|
|
offset += priv->num_tx_rings_p_up;
|
2012-05-17 07:58:10 +07:00
|
|
|
}
|
|
|
|
|
2012-04-05 04:33:27 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-01 10:10:25 +07:00
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
2013-06-18 20:18:27 +07:00
|
|
|
/* must be called with local_bh_disable()d */
|
|
|
|
static int mlx4_en_low_latency_recv(struct napi_struct *napi)
|
|
|
|
{
|
|
|
|
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
|
|
|
|
struct net_device *dev = cq->dev;
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
2013-11-07 17:19:52 +07:00
|
|
|
struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
|
2013-06-18 20:18:27 +07:00
|
|
|
int done;
|
|
|
|
|
|
|
|
if (!priv->port_up)
|
|
|
|
return LL_FLUSH_FAILED;
|
|
|
|
|
|
|
|
if (!mlx4_en_cq_lock_poll(cq))
|
|
|
|
return LL_FLUSH_BUSY;
|
|
|
|
|
|
|
|
done = mlx4_en_process_rx_cq(dev, cq, 4);
|
2013-06-18 20:18:28 +07:00
|
|
|
if (likely(done))
|
|
|
|
rx_ring->cleaned += done;
|
|
|
|
else
|
|
|
|
rx_ring->misses++;
|
2013-06-18 20:18:27 +07:00
|
|
|
|
|
|
|
mlx4_en_cq_unlock_poll(cq);
|
|
|
|
|
|
|
|
return done;
|
|
|
|
}
|
2013-08-01 10:10:25 +07:00
|
|
|
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
2013-06-18 20:18:27 +07:00
|
|
|
|
2012-07-19 05:33:52 +07:00
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
|
|
|
|
|
struct mlx4_en_filter {
|
|
|
|
struct list_head next;
|
|
|
|
struct work_struct work;
|
|
|
|
|
2013-11-07 17:19:49 +07:00
|
|
|
u8 ip_proto;
|
2012-07-19 05:33:52 +07:00
|
|
|
__be32 src_ip;
|
|
|
|
__be32 dst_ip;
|
|
|
|
__be16 src_port;
|
|
|
|
__be16 dst_port;
|
|
|
|
|
|
|
|
int rxq_index;
|
|
|
|
struct mlx4_en_priv *priv;
|
|
|
|
u32 flow_id; /* RFS infrastructure id */
|
|
|
|
int id; /* mlx4_en driver id */
|
|
|
|
u64 reg_id; /* Flow steering API id */
|
|
|
|
u8 activated; /* Used to prevent expiry before filter
|
|
|
|
* is attached
|
|
|
|
*/
|
|
|
|
struct hlist_node filter_chain;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
|
|
|
|
|
2013-11-07 17:19:49 +07:00
|
|
|
static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
|
|
|
|
{
|
|
|
|
switch (ip_proto) {
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
return MLX4_NET_TRANS_RULE_ID_UDP;
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
return MLX4_NET_TRANS_RULE_ID_TCP;
|
|
|
|
default:
|
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-07-19 05:33:52 +07:00
|
|
|
static void mlx4_en_filter_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct mlx4_en_filter *filter = container_of(work,
|
|
|
|
struct mlx4_en_filter,
|
|
|
|
work);
|
|
|
|
struct mlx4_en_priv *priv = filter->priv;
|
2013-11-07 17:19:49 +07:00
|
|
|
struct mlx4_spec_list spec_tcp_udp = {
|
|
|
|
.id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
|
2012-07-19 05:33:52 +07:00
|
|
|
{
|
|
|
|
.tcp_udp = {
|
|
|
|
.dst_port = filter->dst_port,
|
|
|
|
.dst_port_msk = (__force __be16)-1,
|
|
|
|
.src_port = filter->src_port,
|
|
|
|
.src_port_msk = (__force __be16)-1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
struct mlx4_spec_list spec_ip = {
|
|
|
|
.id = MLX4_NET_TRANS_RULE_ID_IPV4,
|
|
|
|
{
|
|
|
|
.ipv4 = {
|
|
|
|
.dst_ip = filter->dst_ip,
|
|
|
|
.dst_ip_msk = (__force __be32)-1,
|
|
|
|
.src_ip = filter->src_ip,
|
|
|
|
.src_ip_msk = (__force __be32)-1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
struct mlx4_spec_list spec_eth = {
|
|
|
|
.id = MLX4_NET_TRANS_RULE_ID_ETH,
|
|
|
|
};
|
|
|
|
struct mlx4_net_trans_rule rule = {
|
|
|
|
.list = LIST_HEAD_INIT(rule.list),
|
|
|
|
.queue_mode = MLX4_NET_TRANS_Q_LIFO,
|
|
|
|
.exclusive = 1,
|
|
|
|
.allow_loopback = 1,
|
2013-04-24 20:58:45 +07:00
|
|
|
.promisc_mode = MLX4_FS_REGULAR,
|
2012-07-19 05:33:52 +07:00
|
|
|
.port = priv->port,
|
|
|
|
.priority = MLX4_DOMAIN_RFS,
|
|
|
|
};
|
|
|
|
int rc;
|
|
|
|
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
|
|
|
|
2013-11-07 17:19:49 +07:00
|
|
|
if (spec_tcp_udp.id < 0) {
|
|
|
|
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
|
|
|
|
filter->ip_proto);
|
|
|
|
goto ignore;
|
|
|
|
}
|
2012-07-19 05:33:52 +07:00
|
|
|
list_add_tail(&spec_eth.list, &rule.list);
|
|
|
|
list_add_tail(&spec_ip.list, &rule.list);
|
2013-11-07 17:19:49 +07:00
|
|
|
list_add_tail(&spec_tcp_udp.list, &rule.list);
|
2012-07-19 05:33:52 +07:00
|
|
|
|
|
|
|
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
|
2013-02-07 09:25:20 +07:00
|
|
|
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
|
2012-07-19 05:33:52 +07:00
|
|
|
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
|
|
|
|
|
|
|
|
filter->activated = 0;
|
|
|
|
|
|
|
|
if (filter->reg_id) {
|
|
|
|
rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
|
|
|
|
if (rc && rc != -ENOENT)
|
|
|
|
en_err(priv, "Error detaching flow. rc = %d\n", rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
|
|
|
|
if (rc)
|
|
|
|
en_err(priv, "Error attaching flow. err = %d\n", rc);
|
|
|
|
|
2013-11-07 17:19:49 +07:00
|
|
|
ignore:
|
2012-07-19 05:33:52 +07:00
|
|
|
mlx4_en_filter_rfs_expire(priv);
|
|
|
|
|
|
|
|
filter->activated = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct hlist_head *
|
|
|
|
filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
|
|
|
|
__be16 src_port, __be16 dst_port)
|
|
|
|
{
|
|
|
|
unsigned long l;
|
|
|
|
int bucket_idx;
|
|
|
|
|
|
|
|
l = (__force unsigned long)src_port |
|
|
|
|
((__force unsigned long)dst_port << 2);
|
|
|
|
l ^= (__force unsigned long)(src_ip ^ dst_ip);
|
|
|
|
|
|
|
|
bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
|
|
|
|
|
|
|
|
return &priv->filter_hash[bucket_idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mlx4_en_filter *
|
|
|
|
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
|
2013-11-07 17:19:49 +07:00
|
|
|
__be32 dst_ip, u8 ip_proto, __be16 src_port,
|
|
|
|
__be16 dst_port, u32 flow_id)
|
2012-07-19 05:33:52 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_filter *filter = NULL;
|
|
|
|
|
|
|
|
filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
|
|
|
|
if (!filter)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
filter->priv = priv;
|
|
|
|
filter->rxq_index = rxq_index;
|
|
|
|
INIT_WORK(&filter->work, mlx4_en_filter_work);
|
|
|
|
|
|
|
|
filter->src_ip = src_ip;
|
|
|
|
filter->dst_ip = dst_ip;
|
2013-11-07 17:19:49 +07:00
|
|
|
filter->ip_proto = ip_proto;
|
2012-07-19 05:33:52 +07:00
|
|
|
filter->src_port = src_port;
|
|
|
|
filter->dst_port = dst_port;
|
|
|
|
|
|
|
|
filter->flow_id = flow_id;
|
|
|
|
|
2012-07-26 04:21:16 +07:00
|
|
|
filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
|
2012-07-19 05:33:52 +07:00
|
|
|
|
|
|
|
list_add_tail(&filter->next, &priv->filters);
|
|
|
|
hlist_add_head(&filter->filter_chain,
|
|
|
|
filter_hash_bucket(priv, src_ip, dst_ip, src_port,
|
|
|
|
dst_port));
|
|
|
|
|
|
|
|
return filter;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = filter->priv;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
list_del(&filter->next);
|
|
|
|
|
|
|
|
rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
|
|
|
|
if (rc && rc != -ENOENT)
|
|
|
|
en_err(priv, "Error detaching flow. rc = %d\n", rc);
|
|
|
|
|
|
|
|
kfree(filter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct mlx4_en_filter *
|
|
|
|
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
|
2013-11-07 17:19:49 +07:00
|
|
|
u8 ip_proto, __be16 src_port, __be16 dst_port)
|
2012-07-19 05:33:52 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_filter *filter;
|
|
|
|
struct mlx4_en_filter *ret = NULL;
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry(filter,
|
2012-07-19 05:33:52 +07:00
|
|
|
filter_hash_bucket(priv, src_ip, dst_ip,
|
|
|
|
src_port, dst_port),
|
|
|
|
filter_chain) {
|
|
|
|
if (filter->src_ip == src_ip &&
|
|
|
|
filter->dst_ip == dst_ip &&
|
2013-11-07 17:19:49 +07:00
|
|
|
filter->ip_proto == ip_proto &&
|
2012-07-19 05:33:52 +07:00
|
|
|
filter->src_port == src_port &&
|
|
|
|
filter->dst_port == dst_port) {
|
|
|
|
ret = filter;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|
|
|
u16 rxq_index, u32 flow_id)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(net_dev);
|
|
|
|
struct mlx4_en_filter *filter;
|
|
|
|
const struct iphdr *ip;
|
|
|
|
const __be16 *ports;
|
2013-11-07 17:19:49 +07:00
|
|
|
u8 ip_proto;
|
2012-07-19 05:33:52 +07:00
|
|
|
__be32 src_ip;
|
|
|
|
__be32 dst_ip;
|
|
|
|
__be16 src_port;
|
|
|
|
__be16 dst_port;
|
|
|
|
int nhoff = skb_network_offset(skb);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (skb->protocol != htons(ETH_P_IP))
|
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
|
|
|
|
ip = (const struct iphdr *)(skb->data + nhoff);
|
|
|
|
if (ip_is_fragment(ip))
|
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
|
2013-11-07 17:19:49 +07:00
|
|
|
if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
|
|
|
|
return -EPROTONOSUPPORT;
|
2012-07-19 05:33:52 +07:00
|
|
|
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
|
|
|
|
|
2013-11-07 17:19:49 +07:00
|
|
|
ip_proto = ip->protocol;
|
2012-07-19 05:33:52 +07:00
|
|
|
src_ip = ip->saddr;
|
|
|
|
dst_ip = ip->daddr;
|
|
|
|
src_port = ports[0];
|
|
|
|
dst_port = ports[1];
|
|
|
|
|
|
|
|
spin_lock_bh(&priv->filters_lock);
|
2013-11-07 17:19:49 +07:00
|
|
|
filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
|
|
|
|
src_port, dst_port);
|
2012-07-19 05:33:52 +07:00
|
|
|
if (filter) {
|
|
|
|
if (filter->rxq_index == rxq_index)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
filter->rxq_index = rxq_index;
|
|
|
|
} else {
|
|
|
|
filter = mlx4_en_filter_alloc(priv, rxq_index,
|
2013-11-07 17:19:49 +07:00
|
|
|
src_ip, dst_ip, ip_proto,
|
2012-07-19 05:33:52 +07:00
|
|
|
src_port, dst_port, flow_id);
|
|
|
|
if (!filter) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
queue_work(priv->mdev->workqueue, &filter->work);
|
|
|
|
|
|
|
|
out:
|
|
|
|
ret = filter->id;
|
|
|
|
err:
|
|
|
|
spin_unlock_bh(&priv->filters_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-11-07 17:19:52 +07:00
|
|
|
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
|
2012-07-19 05:33:52 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_filter *filter, *tmp;
|
|
|
|
LIST_HEAD(del_list);
|
|
|
|
|
|
|
|
spin_lock_bh(&priv->filters_lock);
|
|
|
|
list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
|
|
|
|
list_move(&filter->next, &del_list);
|
|
|
|
hlist_del(&filter->filter_chain);
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&priv->filters_lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(filter, tmp, &del_list, next) {
|
|
|
|
cancel_work_sync(&filter->work);
|
|
|
|
mlx4_en_filter_free(filter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
|
|
|
|
{
|
|
|
|
struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
|
|
|
|
LIST_HEAD(del_list);
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
spin_lock_bh(&priv->filters_lock);
|
|
|
|
list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
|
|
|
|
if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (filter->activated &&
|
|
|
|
!work_pending(&filter->work) &&
|
|
|
|
rps_may_expire_flow(priv->dev,
|
|
|
|
filter->rxq_index, filter->flow_id,
|
|
|
|
filter->id)) {
|
|
|
|
list_move(&filter->next, &del_list);
|
|
|
|
hlist_del(&filter->filter_chain);
|
|
|
|
} else
|
|
|
|
last_filter = filter;
|
|
|
|
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (last_filter && (&last_filter->next != priv->filters.next))
|
|
|
|
list_move(&priv->filters, &last_filter->next);
|
|
|
|
|
|
|
|
spin_unlock_bh(&priv->filters_lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(filter, tmp, &del_list, next)
|
|
|
|
mlx4_en_filter_free(filter);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-04-19 09:04:28 +07:00
|
|
|
static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
|
|
|
|
__be16 proto, u16 vid)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
int err;
|
2010-08-26 21:19:22 +07:00
|
|
|
int idx;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2011-07-20 11:54:22 +07:00
|
|
|
en_dbg(HW, priv, "adding VLAN:%d\n", vid);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2011-07-20 11:54:22 +07:00
|
|
|
set_bit(vid, priv->active_vlans);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
/* Add VID to port VLAN filter */
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
if (mdev->device_up && priv->port_up) {
|
2011-07-20 11:54:22 +07:00
|
|
|
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
|
2008-10-23 05:47:49 +07:00
|
|
|
if (err)
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed configuring VLAN filter\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
2010-08-26 21:19:22 +07:00
|
|
|
if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
|
2013-06-25 16:09:32 +07:00
|
|
|
en_dbg(HW, priv, "failed adding vlan %d\n", vid);
|
2008-10-23 05:47:49 +07:00
|
|
|
mutex_unlock(&mdev->state_lock);
|
2010-08-26 21:19:22 +07:00
|
|
|
|
2011-12-09 07:52:37 +07:00
|
|
|
return 0;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2013-04-19 09:04:28 +07:00
|
|
|
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
|
|
|
|
__be16 proto, u16 vid)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
int err;
|
|
|
|
|
2011-07-20 11:54:22 +07:00
|
|
|
en_dbg(HW, priv, "Killing VID:%d\n", vid);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2011-07-20 11:54:22 +07:00
|
|
|
clear_bit(vid, priv->active_vlans);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
/* Remove VID from port VLAN filter */
|
|
|
|
mutex_lock(&mdev->state_lock);
|
2013-11-03 15:03:19 +07:00
|
|
|
mlx4_unregister_vlan(mdev->dev, priv->port, vid);
|
2010-08-26 21:19:22 +07:00
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
if (mdev->device_up && priv->port_up) {
|
2011-07-20 11:54:22 +07:00
|
|
|
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
|
2008-10-23 05:47:49 +07:00
|
|
|
if (err)
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed configuring VLAN filter\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
mutex_unlock(&mdev->state_lock);
|
2011-12-09 07:52:37 +07:00
|
|
|
|
|
|
|
return 0;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2013-02-07 09:25:20 +07:00
|
|
|
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
|
|
|
|
{
|
2013-04-02 20:49:45 +07:00
|
|
|
int i;
|
|
|
|
for (i = ETH_ALEN - 1; i >= 0; --i) {
|
2013-02-07 09:25:20 +07:00
|
|
|
dst_mac[i] = src_mac & 0xff;
|
|
|
|
src_mac >>= 8;
|
|
|
|
}
|
|
|
|
memset(&dst_mac[ETH_ALEN], 0, 2);
|
|
|
|
}
|
|
|
|
|
2013-12-23 21:09:44 +07:00
|
|
|
|
|
|
|
static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
|
|
|
|
int qpn, u64 *reg_id)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct mlx4_spec_list spec_eth_outer = { {NULL} };
|
|
|
|
struct mlx4_spec_list spec_vxlan = { {NULL} };
|
|
|
|
struct mlx4_spec_list spec_eth_inner = { {NULL} };
|
|
|
|
|
|
|
|
struct mlx4_net_trans_rule rule = {
|
|
|
|
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
|
|
|
|
.exclusive = 0,
|
|
|
|
.allow_loopback = 1,
|
|
|
|
.promisc_mode = MLX4_FS_REGULAR,
|
|
|
|
.priority = MLX4_DOMAIN_NIC,
|
|
|
|
};
|
|
|
|
|
|
|
|
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
|
|
|
|
|
|
|
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
|
|
|
|
return 0; /* do nothing */
|
|
|
|
|
|
|
|
rule.port = priv->port;
|
|
|
|
rule.qpn = qpn;
|
|
|
|
INIT_LIST_HEAD(&rule.list);
|
|
|
|
|
|
|
|
spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
|
|
|
|
memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
|
|
|
|
memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
|
|
|
|
|
|
|
|
spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
|
|
|
|
spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
|
|
|
|
|
|
|
|
list_add_tail(&spec_eth_outer.list, &rule.list);
|
|
|
|
list_add_tail(&spec_vxlan.list, &rule.list);
|
|
|
|
list_add_tail(&spec_eth_inner.list, &rule.list);
|
|
|
|
|
|
|
|
err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
|
|
|
|
if (err) {
|
|
|
|
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-07 09:25:22 +07:00
|
|
|
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
|
|
|
|
unsigned char *mac, int *qpn, u64 *reg_id)
|
|
|
|
{
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct mlx4_dev *dev = mdev->dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
switch (dev->caps.steering_mode) {
|
|
|
|
case MLX4_STEERING_MODE_B0: {
|
|
|
|
struct mlx4_qp qp;
|
|
|
|
u8 gid[16] = {0};
|
|
|
|
|
|
|
|
qp.qpn = *qpn;
|
|
|
|
memcpy(&gid[10], mac, ETH_ALEN);
|
|
|
|
gid[5] = priv->port;
|
|
|
|
|
|
|
|
err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
|
|
|
|
struct mlx4_spec_list spec_eth = { {NULL} };
|
|
|
|
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
|
|
|
|
|
|
|
struct mlx4_net_trans_rule rule = {
|
|
|
|
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
|
|
|
|
.exclusive = 0,
|
|
|
|
.allow_loopback = 1,
|
2013-04-24 20:58:45 +07:00
|
|
|
.promisc_mode = MLX4_FS_REGULAR,
|
2013-02-07 09:25:22 +07:00
|
|
|
.priority = MLX4_DOMAIN_NIC,
|
|
|
|
};
|
|
|
|
|
|
|
|
rule.port = priv->port;
|
|
|
|
rule.qpn = *qpn;
|
|
|
|
INIT_LIST_HEAD(&rule.list);
|
|
|
|
|
|
|
|
spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
|
|
|
|
memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
|
|
|
|
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
|
|
|
|
list_add_tail(&spec_eth.list, &rule.list);
|
|
|
|
|
|
|
|
err = mlx4_flow_attach(dev, &rule, reg_id);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
en_warn(priv, "Failed Attaching Unicast\n");
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
|
|
|
|
unsigned char *mac, int qpn, u64 reg_id)
|
|
|
|
{
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct mlx4_dev *dev = mdev->dev;
|
|
|
|
|
|
|
|
switch (dev->caps.steering_mode) {
|
|
|
|
case MLX4_STEERING_MODE_B0: {
|
|
|
|
struct mlx4_qp qp;
|
|
|
|
u8 gid[16] = {0};
|
|
|
|
|
|
|
|
qp.qpn = qpn;
|
|
|
|
memcpy(&gid[10], mac, ETH_ALEN);
|
|
|
|
gid[5] = priv->port;
|
|
|
|
|
|
|
|
mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
|
|
|
|
mlx4_flow_detach(dev, reg_id);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
en_err(priv, "Invalid steering mode.\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
|
|
|
|
{
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct mlx4_dev *dev = mdev->dev;
|
|
|
|
struct mlx4_mac_entry *entry;
|
|
|
|
int index = 0;
|
|
|
|
int err = 0;
|
|
|
|
u64 reg_id;
|
|
|
|
int *qpn = &priv->base_qpn;
|
|
|
|
u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
|
|
|
|
|
|
|
|
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
|
|
|
|
priv->dev->dev_addr);
|
|
|
|
index = mlx4_register_mac(dev, priv->port, mac);
|
|
|
|
if (index < 0) {
|
|
|
|
err = index;
|
|
|
|
en_err(priv, "Failed adding MAC: %pM\n",
|
|
|
|
priv->dev->dev_addr);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
|
|
|
|
int base_qpn = mlx4_get_base_qpn(dev, priv->port);
|
|
|
|
*qpn = base_qpn + index;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
|
|
|
|
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
|
|
|
|
if (err) {
|
|
|
|
en_err(priv, "Failed to reserve qp for mac registration\n");
|
|
|
|
goto qp_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
|
|
|
|
if (err)
|
|
|
|
goto steer_err;
|
|
|
|
|
2013-12-23 21:09:44 +07:00
|
|
|
if (mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
|
|
|
|
&priv->tunnel_reg_id))
|
|
|
|
goto tunnel_err;
|
|
|
|
|
2013-02-07 09:25:22 +07:00
|
|
|
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
|
|
|
if (!entry) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto alloc_err;
|
|
|
|
}
|
|
|
|
memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
|
|
|
|
entry->reg_id = reg_id;
|
|
|
|
|
2013-02-07 09:25:25 +07:00
|
|
|
hlist_add_head_rcu(&entry->hlist,
|
|
|
|
&priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
|
2013-02-07 09:25:22 +07:00
|
|
|
|
2013-02-07 09:25:25 +07:00
|
|
|
return 0;
|
2013-02-07 09:25:22 +07:00
|
|
|
|
|
|
|
alloc_err:
|
2013-12-23 21:09:44 +07:00
|
|
|
if (priv->tunnel_reg_id)
|
|
|
|
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
|
|
|
|
tunnel_err:
|
2013-02-07 09:25:22 +07:00
|
|
|
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
|
|
|
|
|
|
|
|
steer_err:
|
|
|
|
mlx4_qp_release_range(dev, *qpn, 1);
|
|
|
|
|
|
|
|
qp_err:
|
|
|
|
mlx4_unregister_mac(dev, priv->port, mac);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
|
|
|
{
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct mlx4_dev *dev = mdev->dev;
|
|
|
|
int qpn = priv->base_qpn;
|
2013-03-07 10:46:56 +07:00
|
|
|
u64 mac;
|
2013-02-07 09:25:22 +07:00
|
|
|
|
2013-03-07 10:46:56 +07:00
|
|
|
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
|
|
|
|
mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
|
|
|
|
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
|
|
|
priv->dev->dev_addr);
|
|
|
|
mlx4_unregister_mac(dev, priv->port, mac);
|
|
|
|
} else {
|
2013-02-07 09:25:25 +07:00
|
|
|
struct mlx4_mac_entry *entry;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
struct hlist_node *tmp;
|
2013-02-07 09:25:25 +07:00
|
|
|
struct hlist_head *bucket;
|
2013-03-07 10:46:56 +07:00
|
|
|
unsigned int i;
|
2013-02-07 09:25:25 +07:00
|
|
|
|
2013-03-07 10:46:56 +07:00
|
|
|
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
|
|
|
|
bucket = &priv->mac_hash[i];
|
|
|
|
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
|
|
|
|
mac = mlx4_en_mac_to_u64(entry->mac);
|
|
|
|
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
|
|
|
entry->mac);
|
2013-02-07 09:25:25 +07:00
|
|
|
mlx4_en_uc_steer_release(priv, entry->mac,
|
|
|
|
qpn, entry->reg_id);
|
|
|
|
|
2013-03-07 10:46:56 +07:00
|
|
|
mlx4_unregister_mac(dev, priv->port, mac);
|
2013-02-07 09:25:25 +07:00
|
|
|
hlist_del_rcu(&entry->hlist);
|
|
|
|
kfree_rcu(entry, rcu);
|
|
|
|
}
|
2013-02-07 09:25:22 +07:00
|
|
|
}
|
2013-03-07 10:46:56 +07:00
|
|
|
|
2013-12-23 21:09:44 +07:00
|
|
|
if (priv->tunnel_reg_id) {
|
|
|
|
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
|
|
|
|
priv->tunnel_reg_id = 0;
|
|
|
|
}
|
|
|
|
|
2013-03-07 10:46:56 +07:00
|
|
|
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
|
|
|
|
priv->port, qpn);
|
|
|
|
mlx4_qp_release_range(dev, qpn, 1);
|
|
|
|
priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
|
2013-02-07 09:25:22 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
|
2013-02-07 09:25:24 +07:00
|
|
|
unsigned char *new_mac, unsigned char *prev_mac)
|
2013-02-07 09:25:22 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct mlx4_dev *dev = mdev->dev;
|
|
|
|
int err = 0;
|
|
|
|
u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
|
|
|
|
|
|
|
|
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
|
2013-02-07 09:25:25 +07:00
|
|
|
struct hlist_head *bucket;
|
|
|
|
unsigned int mac_hash;
|
|
|
|
struct mlx4_mac_entry *entry;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
struct hlist_node *tmp;
|
2013-02-07 09:25:25 +07:00
|
|
|
u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
|
|
|
|
|
|
|
|
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
|
2013-02-07 09:25:25 +07:00
|
|
|
if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
|
|
|
|
mlx4_en_uc_steer_release(priv, entry->mac,
|
|
|
|
qpn, entry->reg_id);
|
|
|
|
mlx4_unregister_mac(dev, priv->port,
|
|
|
|
prev_mac_u64);
|
|
|
|
hlist_del_rcu(&entry->hlist);
|
|
|
|
synchronize_rcu();
|
|
|
|
memcpy(entry->mac, new_mac, ETH_ALEN);
|
|
|
|
entry->reg_id = 0;
|
|
|
|
mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
|
|
|
|
hlist_add_head_rcu(&entry->hlist,
|
|
|
|
&priv->mac_hash[mac_hash]);
|
|
|
|
mlx4_register_mac(dev, priv->port, new_mac_u64);
|
|
|
|
err = mlx4_en_uc_steer_add(priv, new_mac,
|
|
|
|
&qpn,
|
|
|
|
&entry->reg_id);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -EINVAL;
|
2013-02-07 09:25:22 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
|
|
|
|
}
|
|
|
|
|
2010-08-24 10:46:18 +07:00
|
|
|
u64 mlx4_en_mac_to_u64(u8 *addr)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
u64 mac = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ETH_ALEN; i++) {
|
|
|
|
mac <<= 8;
|
|
|
|
mac |= addr[i];
|
|
|
|
}
|
|
|
|
return mac;
|
|
|
|
}
|
|
|
|
|
2013-03-07 10:46:55 +07:00
|
|
|
static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (priv->port_up) {
|
|
|
|
/* Remove old MAC and insert the new one */
|
2013-02-07 09:25:22 +07:00
|
|
|
err = mlx4_en_replace_mac(priv, priv->base_qpn,
|
2013-02-07 09:25:24 +07:00
|
|
|
priv->dev->dev_addr, priv->prev_mac);
|
2008-10-23 05:47:49 +07:00
|
|
|
if (err)
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed changing HW MAC address\n");
|
2013-02-07 09:25:20 +07:00
|
|
|
memcpy(priv->prev_mac, priv->dev->dev_addr,
|
|
|
|
sizeof(priv->prev_mac));
|
2008-10-23 05:47:49 +07:00
|
|
|
} else
|
2013-02-07 09:25:21 +07:00
|
|
|
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2013-03-07 10:46:55 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlx4_en_set_mac(struct net_device *dev, void *addr)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct sockaddr *saddr = addr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(saddr->sa_data))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
|
|
|
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
|
|
|
|
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
err = mlx4_en_do_set_mac(priv);
|
2008-10-23 05:47:49 +07:00
|
|
|
mutex_unlock(&mdev->state_lock);
|
2013-03-07 10:46:55 +07:00
|
|
|
|
|
|
|
return err;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_clear_list(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
2012-07-05 11:03:43 +07:00
|
|
|
struct mlx4_en_mc_list *tmp, *mc_to_del;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2012-07-05 11:03:43 +07:00
|
|
|
list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
|
|
|
|
list_del(&mc_to_del->list);
|
|
|
|
kfree(mc_to_del);
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_cache_mclist(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
2010-04-02 04:22:57 +07:00
|
|
|
struct netdev_hw_addr *ha;
|
2012-07-05 11:03:43 +07:00
|
|
|
struct mlx4_en_mc_list *tmp;
|
2010-03-01 12:09:14 +07:00
|
|
|
|
2011-12-19 11:02:58 +07:00
|
|
|
mlx4_en_clear_list(dev);
|
2012-07-05 11:03:43 +07:00
|
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
|
|
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
|
|
|
|
if (!tmp) {
|
|
|
|
mlx4_en_clear_list(dev);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
memcpy(tmp->addr, ha->addr, ETH_ALEN);
|
|
|
|
list_add_tail(&tmp->list, &priv->mc_list);
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2012-07-05 11:03:43 +07:00
|
|
|
static void update_mclist_flags(struct mlx4_en_priv *priv,
|
|
|
|
struct list_head *dst,
|
|
|
|
struct list_head *src)
|
|
|
|
{
|
|
|
|
struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
|
|
|
|
bool found;
|
|
|
|
|
|
|
|
/* Find all the entries that should be removed from dst,
|
|
|
|
* These are the entries that are not found in src
|
|
|
|
*/
|
|
|
|
list_for_each_entry(dst_tmp, dst, list) {
|
|
|
|
found = false;
|
|
|
|
list_for_each_entry(src_tmp, src, list) {
|
|
|
|
if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found)
|
|
|
|
dst_tmp->action = MCLIST_REM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add entries that exist in src but not in dst
|
|
|
|
* mark them as need to add
|
|
|
|
*/
|
|
|
|
list_for_each_entry(src_tmp, src, list) {
|
|
|
|
found = false;
|
|
|
|
list_for_each_entry(dst_tmp, dst, list) {
|
|
|
|
if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
|
|
|
|
dst_tmp->action = MCLIST_NONE;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
2013-02-07 18:46:27 +07:00
|
|
|
new_mc = kmemdup(src_tmp,
|
|
|
|
sizeof(struct mlx4_en_mc_list),
|
2012-07-05 11:03:43 +07:00
|
|
|
GFP_KERNEL);
|
2013-02-07 18:46:27 +07:00
|
|
|
if (!new_mc)
|
2012-07-05 11:03:43 +07:00
|
|
|
return;
|
2013-02-07 18:46:27 +07:00
|
|
|
|
2012-07-05 11:03:43 +07:00
|
|
|
new_mc->action = MCLIST_ADD;
|
|
|
|
list_add_tail(&new_mc->list, dst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2013-02-07 09:25:23 +07:00
|
|
|
static void mlx4_en_set_rx_mode(struct net_device *dev)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (!priv->port_up)
|
|
|
|
return;
|
|
|
|
|
2013-02-07 09:25:23 +07:00
|
|
|
queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2013-02-07 09:25:23 +07:00
|
|
|
static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
|
|
|
|
struct mlx4_en_dev *mdev)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
2012-07-05 11:03:44 +07:00
|
|
|
int err = 0;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2013-02-07 09:25:23 +07:00
|
|
|
if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
|
2008-10-23 05:47:49 +07:00
|
|
|
if (netif_msg_rx_status(priv))
|
2013-02-07 09:25:23 +07:00
|
|
|
en_warn(priv, "Entering promiscuous mode\n");
|
|
|
|
priv->flags |= MLX4_EN_FLAG_PROMISC;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2013-02-07 09:25:23 +07:00
|
|
|
/* Enable promiscouos mode */
|
2012-07-05 11:03:44 +07:00
|
|
|
switch (mdev->dev->caps.steering_mode) {
|
2012-07-05 11:03:48 +07:00
|
|
|
case MLX4_STEERING_MODE_DEVICE_MANAGED:
|
2013-02-07 09:25:23 +07:00
|
|
|
err = mlx4_flow_steer_promisc_add(mdev->dev,
|
|
|
|
priv->port,
|
|
|
|
priv->base_qpn,
|
2013-04-24 20:58:45 +07:00
|
|
|
MLX4_FS_ALL_DEFAULT);
|
2012-07-05 11:03:48 +07:00
|
|
|
if (err)
|
2013-02-07 09:25:23 +07:00
|
|
|
en_err(priv, "Failed enabling promiscuous mode\n");
|
|
|
|
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
|
2012-07-05 11:03:48 +07:00
|
|
|
break;
|
|
|
|
|
2012-07-05 11:03:44 +07:00
|
|
|
case MLX4_STEERING_MODE_B0:
|
2013-02-07 09:25:23 +07:00
|
|
|
err = mlx4_unicast_promisc_add(mdev->dev,
|
|
|
|
priv->base_qpn,
|
|
|
|
priv->port);
|
2012-07-05 11:03:44 +07:00
|
|
|
if (err)
|
2013-02-07 09:25:23 +07:00
|
|
|
en_err(priv, "Failed enabling unicast promiscuous mode\n");
|
|
|
|
|
|
|
|
/* Add the default qp number as multicast
|
|
|
|
* promisc
|
|
|
|
*/
|
|
|
|
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
|
|
|
|
err = mlx4_multicast_promisc_add(mdev->dev,
|
|
|
|
priv->base_qpn,
|
|
|
|
priv->port);
|
2012-07-05 11:03:44 +07:00
|
|
|
if (err)
|
2013-02-07 09:25:23 +07:00
|
|
|
en_err(priv, "Failed enabling multicast promiscuous mode\n");
|
|
|
|
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
|
2012-07-05 11:03:44 +07:00
|
|
|
}
|
|
|
|
break;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2012-07-05 11:03:44 +07:00
|
|
|
case MLX4_STEERING_MODE_A0:
|
|
|
|
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
|
|
|
|
priv->port,
|
2013-02-07 09:25:23 +07:00
|
|
|
priv->base_qpn,
|
|
|
|
1);
|
2011-03-23 05:38:31 +07:00
|
|
|
if (err)
|
2013-02-07 09:25:23 +07:00
|
|
|
en_err(priv, "Failed enabling promiscuous mode\n");
|
2012-07-05 11:03:44 +07:00
|
|
|
break;
|
2011-03-23 05:38:31 +07:00
|
|
|
}
|
|
|
|
|
2013-02-07 09:25:23 +07:00
|
|
|
/* Disable port multicast filter (unconditionally) */
|
|
|
|
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
|
|
|
|
0, MLX4_MCAST_DISABLE);
|
|
|
|
if (err)
|
|
|
|
en_err(priv, "Failed disabling multicast filter\n");
|
|
|
|
|
|
|
|
/* Disable port VLAN filter */
|
2011-07-20 11:54:22 +07:00
|
|
|
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
|
2008-10-23 05:47:49 +07:00
|
|
|
if (err)
|
2013-02-07 09:25:23 +07:00
|
|
|
en_err(priv, "Failed disabling VLAN filter\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
|
|
|
|
struct mlx4_en_dev *mdev)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (netif_msg_rx_status(priv))
|
|
|
|
en_warn(priv, "Leaving promiscuous mode\n");
|
|
|
|
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
|
|
|
|
|
|
|
|
/* Disable promiscouos mode */
|
|
|
|
switch (mdev->dev->caps.steering_mode) {
|
|
|
|
case MLX4_STEERING_MODE_DEVICE_MANAGED:
|
|
|
|
err = mlx4_flow_steer_promisc_remove(mdev->dev,
|
|
|
|
priv->port,
|
2013-04-24 20:58:45 +07:00
|
|
|
MLX4_FS_ALL_DEFAULT);
|
2013-02-07 09:25:23 +07:00
|
|
|
if (err)
|
|
|
|
en_err(priv, "Failed disabling promiscuous mode\n");
|
|
|
|
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MLX4_STEERING_MODE_B0:
|
|
|
|
err = mlx4_unicast_promisc_remove(mdev->dev,
|
|
|
|
priv->base_qpn,
|
|
|
|
priv->port);
|
|
|
|
if (err)
|
|
|
|
en_err(priv, "Failed disabling unicast promiscuous mode\n");
|
|
|
|
/* Disable Multicast promisc */
|
|
|
|
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
|
|
|
|
err = mlx4_multicast_promisc_remove(mdev->dev,
|
|
|
|
priv->base_qpn,
|
|
|
|
priv->port);
|
|
|
|
if (err)
|
|
|
|
en_err(priv, "Failed disabling multicast promiscuous mode\n");
|
|
|
|
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MLX4_STEERING_MODE_A0:
|
|
|
|
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
|
|
|
|
priv->port,
|
|
|
|
priv->base_qpn, 0);
|
|
|
|
if (err)
|
|
|
|
en_err(priv, "Failed disabling promiscuous mode\n");
|
|
|
|
break;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2013-02-07 09:25:23 +07:00
|
|
|
/* Enable port VLAN filter */
|
|
|
|
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
|
|
|
|
if (err)
|
|
|
|
en_err(priv, "Failed enabling VLAN filter\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct mlx4_en_dev *mdev)
|
|
|
|
{
|
|
|
|
struct mlx4_en_mc_list *mclist, *tmp;
|
|
|
|
u64 mcast_addr = 0;
|
|
|
|
u8 mc_list[16] = {0};
|
|
|
|
int err = 0;
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Enable/disable the multicast filter according to IFF_ALLMULTI */
|
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
|
|
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
|
|
|
|
0, MLX4_MCAST_DISABLE);
|
|
|
|
if (err)
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed disabling multicast filter\n");
|
2011-03-23 05:38:31 +07:00
|
|
|
|
|
|
|
/* Add the default qp number as multicast promisc */
|
|
|
|
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
|
2012-07-05 11:03:44 +07:00
|
|
|
switch (mdev->dev->caps.steering_mode) {
|
2012-07-05 11:03:48 +07:00
|
|
|
case MLX4_STEERING_MODE_DEVICE_MANAGED:
|
|
|
|
err = mlx4_flow_steer_promisc_add(mdev->dev,
|
|
|
|
priv->port,
|
|
|
|
priv->base_qpn,
|
2013-04-24 20:58:45 +07:00
|
|
|
MLX4_FS_MC_DEFAULT);
|
2012-07-05 11:03:48 +07:00
|
|
|
break;
|
|
|
|
|
2012-07-05 11:03:44 +07:00
|
|
|
case MLX4_STEERING_MODE_B0:
|
|
|
|
err = mlx4_multicast_promisc_add(mdev->dev,
|
|
|
|
priv->base_qpn,
|
|
|
|
priv->port);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MLX4_STEERING_MODE_A0:
|
|
|
|
break;
|
|
|
|
}
|
2011-03-23 05:38:31 +07:00
|
|
|
if (err)
|
|
|
|
en_err(priv, "Failed entering multicast promisc mode\n");
|
|
|
|
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
} else {
|
2011-03-23 05:38:31 +07:00
|
|
|
/* Disable Multicast promisc */
|
|
|
|
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
|
2012-07-05 11:03:44 +07:00
|
|
|
switch (mdev->dev->caps.steering_mode) {
|
2012-07-05 11:03:48 +07:00
|
|
|
case MLX4_STEERING_MODE_DEVICE_MANAGED:
|
|
|
|
err = mlx4_flow_steer_promisc_remove(mdev->dev,
|
|
|
|
priv->port,
|
2013-04-24 20:58:45 +07:00
|
|
|
MLX4_FS_MC_DEFAULT);
|
2012-07-05 11:03:48 +07:00
|
|
|
break;
|
|
|
|
|
2012-07-05 11:03:44 +07:00
|
|
|
case MLX4_STEERING_MODE_B0:
|
|
|
|
err = mlx4_multicast_promisc_remove(mdev->dev,
|
|
|
|
priv->base_qpn,
|
|
|
|
priv->port);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MLX4_STEERING_MODE_A0:
|
|
|
|
break;
|
|
|
|
}
|
2011-03-23 05:38:31 +07:00
|
|
|
if (err)
|
2011-03-31 08:57:33 +07:00
|
|
|
en_err(priv, "Failed disabling multicast promiscuous mode\n");
|
2011-03-23 05:38:31 +07:00
|
|
|
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
|
|
|
|
}
|
2010-03-01 12:09:14 +07:00
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
|
|
|
|
0, MLX4_MCAST_DISABLE);
|
|
|
|
if (err)
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed disabling multicast filter\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
/* Flush mcast filter and init it with broadcast address */
|
|
|
|
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
|
|
|
|
1, MLX4_MCAST_CONFIG);
|
|
|
|
|
|
|
|
/* Update multicast list - we cache all addresses so they won't
|
|
|
|
* change while HW is updated holding the command semaphor */
|
2013-01-24 08:54:16 +07:00
|
|
|
netif_addr_lock_bh(dev);
|
2008-10-23 05:47:49 +07:00
|
|
|
mlx4_en_cache_mclist(dev);
|
2013-01-24 08:54:16 +07:00
|
|
|
netif_addr_unlock_bh(dev);
|
2012-07-05 11:03:43 +07:00
|
|
|
list_for_each_entry(mclist, &priv->mc_list, list) {
|
|
|
|
mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
|
2008-10-23 05:47:49 +07:00
|
|
|
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
|
|
|
|
mcast_addr, 0, MLX4_MCAST_CONFIG);
|
|
|
|
}
|
|
|
|
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
|
|
|
|
0, MLX4_MCAST_ENABLE);
|
|
|
|
if (err)
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed enabling multicast filter\n");
|
2012-07-05 11:03:43 +07:00
|
|
|
|
|
|
|
update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
|
|
|
|
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
|
|
|
|
if (mclist->action == MCLIST_REM) {
|
|
|
|
/* detach this address and delete from list */
|
|
|
|
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
|
|
|
|
mc_list[5] = priv->port;
|
|
|
|
err = mlx4_multicast_detach(mdev->dev,
|
|
|
|
&priv->rss_map.indir_qp,
|
|
|
|
mc_list,
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 11:03:46 +07:00
|
|
|
MLX4_PROT_ETH,
|
|
|
|
mclist->reg_id);
|
2012-07-05 11:03:43 +07:00
|
|
|
if (err)
|
|
|
|
en_err(priv, "Fail to detach multicast address\n");
|
|
|
|
|
2013-12-23 21:09:44 +07:00
|
|
|
if (mclist->tunnel_reg_id) {
|
|
|
|
err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
|
|
|
|
if (err)
|
|
|
|
en_err(priv, "Failed to detach multicast address\n");
|
|
|
|
}
|
|
|
|
|
2012-07-05 11:03:43 +07:00
|
|
|
/* remove from list */
|
|
|
|
list_del(&mclist->list);
|
|
|
|
kfree(mclist);
|
2012-07-11 03:34:07 +07:00
|
|
|
} else if (mclist->action == MCLIST_ADD) {
|
2012-07-05 11:03:43 +07:00
|
|
|
/* attach the address */
|
|
|
|
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 11:03:46 +07:00
|
|
|
/* needed for B0 steering support */
|
2012-07-05 11:03:43 +07:00
|
|
|
mc_list[5] = priv->port;
|
|
|
|
err = mlx4_multicast_attach(mdev->dev,
|
|
|
|
&priv->rss_map.indir_qp,
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 11:03:46 +07:00
|
|
|
mc_list,
|
|
|
|
priv->port, 0,
|
|
|
|
MLX4_PROT_ETH,
|
|
|
|
&mclist->reg_id);
|
2012-07-05 11:03:43 +07:00
|
|
|
if (err)
|
|
|
|
en_err(priv, "Fail to attach multicast address\n");
|
|
|
|
|
2013-12-23 21:09:44 +07:00
|
|
|
err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
|
|
|
|
&mclist->tunnel_reg_id);
|
|
|
|
if (err)
|
|
|
|
en_err(priv, "Failed to attach multicast address\n");
|
2012-07-05 11:03:43 +07:00
|
|
|
}
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
2013-02-07 09:25:23 +07:00
|
|
|
}
|
|
|
|
|
2013-02-07 09:25:26 +07:00
|
|
|
static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct mlx4_en_dev *mdev)
|
|
|
|
{
|
|
|
|
struct netdev_hw_addr *ha;
|
|
|
|
struct mlx4_mac_entry *entry;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
struct hlist_node *tmp;
|
2013-02-07 09:25:26 +07:00
|
|
|
bool found;
|
|
|
|
u64 mac;
|
|
|
|
int err = 0;
|
|
|
|
struct hlist_head *bucket;
|
|
|
|
unsigned int i;
|
|
|
|
int removed = 0;
|
|
|
|
u32 prev_flags;
|
|
|
|
|
|
|
|
/* Note that we do not need to protect our mac_hash traversal with rcu,
|
|
|
|
* since all modification code is protected by mdev->state_lock
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* find what to remove */
|
|
|
|
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
|
|
|
|
bucket = &priv->mac_hash[i];
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
|
2013-02-07 09:25:26 +07:00
|
|
|
found = false;
|
|
|
|
netdev_for_each_uc_addr(ha, dev) {
|
|
|
|
if (ether_addr_equal_64bits(entry->mac,
|
|
|
|
ha->addr)) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* MAC address of the port is not in uc list */
|
|
|
|
if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
|
|
|
|
found = true;
|
|
|
|
|
|
|
|
if (!found) {
|
|
|
|
mac = mlx4_en_mac_to_u64(entry->mac);
|
|
|
|
mlx4_en_uc_steer_release(priv, entry->mac,
|
|
|
|
priv->base_qpn,
|
|
|
|
entry->reg_id);
|
|
|
|
mlx4_unregister_mac(mdev->dev, priv->port, mac);
|
|
|
|
|
|
|
|
hlist_del_rcu(&entry->hlist);
|
|
|
|
kfree_rcu(entry, rcu);
|
|
|
|
en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
|
|
|
|
entry->mac, priv->port);
|
|
|
|
++removed;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if we didn't remove anything, there is no use in trying to add
|
|
|
|
* again once we are in a forced promisc mode state
|
|
|
|
*/
|
|
|
|
if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
|
|
|
|
return;
|
|
|
|
|
|
|
|
prev_flags = priv->flags;
|
|
|
|
priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
|
|
|
|
|
|
|
|
/* find what to add */
|
|
|
|
netdev_for_each_uc_addr(ha, dev) {
|
|
|
|
found = false;
|
|
|
|
bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 08:06:00 +07:00
|
|
|
hlist_for_each_entry(entry, bucket, hlist) {
|
2013-02-07 09:25:26 +07:00
|
|
|
if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found) {
|
|
|
|
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
|
|
|
if (!entry) {
|
|
|
|
en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
|
|
|
|
ha->addr, priv->port);
|
|
|
|
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mac = mlx4_en_mac_to_u64(ha->addr);
|
|
|
|
memcpy(entry->mac, ha->addr, ETH_ALEN);
|
|
|
|
err = mlx4_register_mac(mdev->dev, priv->port, mac);
|
|
|
|
if (err < 0) {
|
|
|
|
en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
|
|
|
|
ha->addr, priv->port, err);
|
|
|
|
kfree(entry);
|
|
|
|
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
err = mlx4_en_uc_steer_add(priv, ha->addr,
|
|
|
|
&priv->base_qpn,
|
|
|
|
&entry->reg_id);
|
|
|
|
if (err) {
|
|
|
|
en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
|
|
|
|
ha->addr, priv->port, err);
|
|
|
|
mlx4_unregister_mac(mdev->dev, priv->port, mac);
|
|
|
|
kfree(entry);
|
|
|
|
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
unsigned int mac_hash;
|
|
|
|
en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
|
|
|
|
ha->addr, priv->port);
|
|
|
|
mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
|
|
|
|
bucket = &priv->mac_hash[mac_hash];
|
|
|
|
hlist_add_head_rcu(&entry->hlist, bucket);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
|
|
|
|
en_warn(priv, "Forcing promiscuous mode on port:%d\n",
|
|
|
|
priv->port);
|
|
|
|
} else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
|
|
|
|
en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
|
|
|
|
priv->port);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-07 09:25:23 +07:00
|
|
|
static void mlx4_en_do_set_rx_mode(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
|
|
|
|
rx_mode_task);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct net_device *dev = priv->dev;
|
|
|
|
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
if (!mdev->device_up) {
|
|
|
|
en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (!priv->port_up) {
|
|
|
|
en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!netif_carrier_ok(dev)) {
|
|
|
|
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
|
|
|
|
if (priv->port_state.link_state) {
|
|
|
|
priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
|
|
|
|
netif_carrier_on(dev);
|
|
|
|
en_dbg(LINK, priv, "Link Up\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-07 09:25:26 +07:00
|
|
|
if (dev->priv_flags & IFF_UNICAST_FLT)
|
|
|
|
mlx4_en_do_uc_filter(priv, dev, mdev);
|
|
|
|
|
2013-02-07 09:25:23 +07:00
|
|
|
/* Promsicuous mode: disable all filters */
|
2013-02-07 09:25:26 +07:00
|
|
|
if ((dev->flags & IFF_PROMISC) ||
|
|
|
|
(priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
|
2013-02-07 09:25:23 +07:00
|
|
|
mlx4_en_set_promisc_mode(priv, mdev);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not in promiscuous mode */
|
|
|
|
if (priv->flags & MLX4_EN_FLAG_PROMISC)
|
|
|
|
mlx4_en_clear_promisc_mode(priv, mdev);
|
|
|
|
|
|
|
|
mlx4_en_do_multicast(priv, dev, mdev);
|
2008-10-23 05:47:49 +07:00
|
|
|
out:
|
|
|
|
mutex_unlock(&mdev->state_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void mlx4_en_netpoll(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_cq *cq;
|
|
|
|
unsigned long flags;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
cq = priv->rx_cq[i];
|
2008-10-23 05:47:49 +07:00
|
|
|
spin_lock_irqsave(&cq->lock, flags);
|
|
|
|
napi_synchronize(&cq->napi);
|
|
|
|
mlx4_en_process_rx_cq(dev, cq, 0);
|
|
|
|
spin_unlock_irqrestore(&cq->lock, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void mlx4_en_tx_timeout(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
2013-06-25 16:09:34 +07:00
|
|
|
int i;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
if (netif_msg_timer(priv))
|
2009-06-02 03:27:13 +07:00
|
|
|
en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2013-06-25 16:09:34 +07:00
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
|
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
|
|
|
|
continue;
|
|
|
|
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
|
2013-11-07 17:19:52 +07:00
|
|
|
i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
|
|
|
|
priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
|
2013-06-25 16:09:34 +07:00
|
|
|
}
|
|
|
|
|
2009-04-20 11:26:05 +07:00
|
|
|
priv->port_stats.tx_timeout++;
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(DRV, priv, "Scheduling watchdog\n");
|
2009-04-20 11:26:05 +07:00
|
|
|
queue_work(mdev->workqueue, &priv->watchdog_task);
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
|
|
spin_lock_bh(&priv->stats_lock);
|
|
|
|
memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
|
|
|
|
spin_unlock_bh(&priv->stats_lock);
|
|
|
|
|
|
|
|
return &priv->ret_stats;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
|
|
|
|
{
|
|
|
|
struct mlx4_en_cq *cq;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* If we haven't received a specific coalescing setting
|
2009-04-22 23:21:29 +07:00
|
|
|
* (module param), we set the moderation parameters as follows:
|
2008-10-23 05:47:49 +07:00
|
|
|
* - moder_cnt is set to the number of mtu sized packets to
|
2012-11-05 23:20:42 +07:00
|
|
|
* satisfy our coalescing target.
|
2008-10-23 05:47:49 +07:00
|
|
|
* - moder_time is set to a fixed value.
|
|
|
|
*/
|
2009-06-02 06:23:13 +07:00
|
|
|
priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
|
2008-12-26 09:19:47 +07:00
|
|
|
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
|
2012-04-23 09:18:33 +07:00
|
|
|
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
|
|
|
|
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
|
2013-02-07 09:25:21 +07:00
|
|
|
en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
|
|
|
|
priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
/* Setup cq moderation params */
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
cq = priv->rx_cq[i];
|
2008-10-23 05:47:49 +07:00
|
|
|
cq->moder_cnt = priv->rx_frames;
|
|
|
|
cq->moder_time = priv->rx_usecs;
|
2011-10-09 12:38:23 +07:00
|
|
|
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
|
|
|
|
priv->last_moder_packets[i] = 0;
|
|
|
|
priv->last_moder_bytes[i] = 0;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
cq = priv->tx_cq[i];
|
2012-04-23 09:18:33 +07:00
|
|
|
cq->moder_cnt = priv->tx_frames;
|
|
|
|
cq->moder_time = priv->tx_usecs;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset auto-moderation params */
|
|
|
|
priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
|
|
|
|
priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
|
|
|
|
priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
|
|
|
|
priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
|
|
|
|
priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
|
2008-12-26 09:19:47 +07:00
|
|
|
priv->adaptive_rx_coal = 1;
|
2008-10-23 05:47:49 +07:00
|
|
|
priv->last_moder_jiffies = 0;
|
|
|
|
priv->last_moder_tx_packets = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
|
|
|
|
{
|
|
|
|
unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
|
|
|
|
struct mlx4_en_cq *cq;
|
|
|
|
unsigned long packets;
|
|
|
|
unsigned long rate;
|
|
|
|
unsigned long avg_pkt_size;
|
|
|
|
unsigned long rx_packets;
|
|
|
|
unsigned long rx_bytes;
|
|
|
|
unsigned long rx_pkt_diff;
|
|
|
|
int moder_time;
|
2011-10-09 12:38:23 +07:00
|
|
|
int ring, err;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
|
|
|
|
return;
|
|
|
|
|
2011-10-09 12:38:23 +07:00
|
|
|
for (ring = 0; ring < priv->rx_ring_num; ring++) {
|
|
|
|
spin_lock_bh(&priv->stats_lock);
|
2013-11-07 17:19:52 +07:00
|
|
|
rx_packets = priv->rx_ring[ring]->packets;
|
|
|
|
rx_bytes = priv->rx_ring[ring]->bytes;
|
2011-10-09 12:38:23 +07:00
|
|
|
spin_unlock_bh(&priv->stats_lock);
|
|
|
|
|
|
|
|
rx_pkt_diff = ((unsigned long) (rx_packets -
|
|
|
|
priv->last_moder_packets[ring]));
|
|
|
|
packets = rx_pkt_diff;
|
|
|
|
rate = packets * HZ / period;
|
|
|
|
avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
|
|
|
|
priv->last_moder_bytes[ring])) / packets : 0;
|
|
|
|
|
|
|
|
/* Apply auto-moderation only when packet rate
|
|
|
|
* exceeds a rate that it matters */
|
|
|
|
if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
|
|
|
|
avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
|
2008-10-23 05:47:49 +07:00
|
|
|
if (rate < priv->pkt_rate_low)
|
|
|
|
moder_time = priv->rx_usecs_low;
|
|
|
|
else if (rate > priv->pkt_rate_high)
|
|
|
|
moder_time = priv->rx_usecs_high;
|
|
|
|
else
|
|
|
|
moder_time = (rate - priv->pkt_rate_low) *
|
|
|
|
(priv->rx_usecs_high - priv->rx_usecs_low) /
|
|
|
|
(priv->pkt_rate_high - priv->pkt_rate_low) +
|
|
|
|
priv->rx_usecs_low;
|
2011-10-09 12:38:23 +07:00
|
|
|
} else {
|
|
|
|
moder_time = priv->rx_usecs_low;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2011-10-09 12:38:23 +07:00
|
|
|
if (moder_time != priv->last_moder_time[ring]) {
|
|
|
|
priv->last_moder_time[ring] = moder_time;
|
2013-11-07 17:19:52 +07:00
|
|
|
cq = priv->rx_cq[ring];
|
2008-10-23 05:47:49 +07:00
|
|
|
cq->moder_time = moder_time;
|
2013-06-04 12:13:26 +07:00
|
|
|
cq->moder_cnt = priv->rx_frames;
|
2008-10-23 05:47:49 +07:00
|
|
|
err = mlx4_en_set_cq_moder(priv, cq);
|
2011-10-09 12:38:23 +07:00
|
|
|
if (err)
|
2013-02-07 09:25:21 +07:00
|
|
|
en_err(priv, "Failed modifying moderation for cq:%d\n",
|
|
|
|
ring);
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
2011-10-09 12:38:23 +07:00
|
|
|
priv->last_moder_packets[ring] = rx_packets;
|
|
|
|
priv->last_moder_bytes[ring] = rx_bytes;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
priv->last_moder_jiffies = jiffies;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_do_get_stats(struct work_struct *work)
|
|
|
|
{
|
2009-04-03 06:56:54 +07:00
|
|
|
struct delayed_work *delay = to_delayed_work(work);
|
2008-10-23 05:47:49 +07:00
|
|
|
struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
|
|
|
|
stats_task);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
if (mdev->device_up) {
|
2013-06-25 16:09:30 +07:00
|
|
|
if (priv->port_up) {
|
|
|
|
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
|
|
|
|
if (err)
|
|
|
|
en_dbg(HW, priv, "Could not update stats\n");
|
2013-01-24 08:54:14 +07:00
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
mlx4_en_auto_moderation(priv);
|
2013-06-25 16:09:30 +07:00
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
|
|
|
|
}
|
2010-08-24 10:46:38 +07:00
|
|
|
if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
|
2013-03-07 10:46:55 +07:00
|
|
|
mlx4_en_do_set_mac(priv);
|
2010-08-24 10:46:38 +07:00
|
|
|
mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
mutex_unlock(&mdev->state_lock);
|
|
|
|
}
|
|
|
|
|
2013-04-23 13:06:51 +07:00
|
|
|
/* mlx4_en_service_task - Run service task for tasks that needed to be done
|
|
|
|
* periodically
|
|
|
|
*/
|
|
|
|
static void mlx4_en_service_task(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct delayed_work *delay = to_delayed_work(work);
|
|
|
|
struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
|
|
|
|
service_task);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
if (mdev->device_up) {
|
2013-04-25 12:22:24 +07:00
|
|
|
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
|
|
|
mlx4_en_ptp_overflow_check(mdev);
|
2013-04-23 13:06:51 +07:00
|
|
|
|
|
|
|
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
|
|
|
SERVICE_TASK_DELAY);
|
|
|
|
}
|
|
|
|
mutex_unlock(&mdev->state_lock);
|
|
|
|
}
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
static void mlx4_en_linkstate(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
|
|
|
|
linkstate_task);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
int linkstate = priv->link_state;
|
|
|
|
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
/* If observable port state changed set carrier state and
|
|
|
|
* report to system log */
|
|
|
|
if (priv->last_link_state != linkstate) {
|
|
|
|
if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
|
2010-08-24 10:46:01 +07:00
|
|
|
en_info(priv, "Link Down\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
netif_carrier_off(priv->dev);
|
|
|
|
} else {
|
2010-08-24 10:46:01 +07:00
|
|
|
en_info(priv, "Link Up\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
netif_carrier_on(priv->dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
priv->last_link_state = linkstate;
|
|
|
|
mutex_unlock(&mdev->state_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-30 09:39:20 +07:00
|
|
|
int mlx4_en_start_port(struct net_device *dev)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct mlx4_en_cq *cq;
|
|
|
|
struct mlx4_en_tx_ring *tx_ring;
|
|
|
|
int rx_index = 0;
|
|
|
|
int tx_index = 0;
|
|
|
|
int err = 0;
|
|
|
|
int i;
|
|
|
|
int j;
|
2011-03-23 05:38:31 +07:00
|
|
|
u8 mc_list[16] = {0};
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
if (priv->port_up) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(DRV, priv, "start port called while port already up\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-05 11:03:43 +07:00
|
|
|
INIT_LIST_HEAD(&priv->mc_list);
|
|
|
|
INIT_LIST_HEAD(&priv->curr_list);
|
2013-01-31 06:07:08 +07:00
|
|
|
INIT_LIST_HEAD(&priv->ethtool_list);
|
|
|
|
memset(&priv->ethtool_rules[0], 0,
|
|
|
|
sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
|
2012-07-05 11:03:43 +07:00
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Calculate Rx buf size */
|
|
|
|
dev->mtu = min(dev->mtu, priv->max_mtu);
|
|
|
|
mlx4_en_calc_rx_buf(dev);
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
|
2009-05-24 10:17:11 +07:00
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Configure rx cq's and rings */
|
2009-05-24 10:17:11 +07:00
|
|
|
err = mlx4_en_activate_rx_rings(priv);
|
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed to activate RX rings\n");
|
2009-05-24 10:17:11 +07:00
|
|
|
return err;
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
cq = priv->rx_cq[i];
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2013-06-18 20:18:27 +07:00
|
|
|
mlx4_en_cq_init_lock(cq);
|
|
|
|
|
2011-10-09 12:26:31 +07:00
|
|
|
err = mlx4_en_activate_cq(priv, cq, i);
|
2008-10-23 05:47:49 +07:00
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed activating Rx CQ\n");
|
2009-04-27 03:41:34 +07:00
|
|
|
goto cq_err;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
for (j = 0; j < cq->size; j++)
|
|
|
|
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
|
|
|
|
err = mlx4_en_set_cq_moder(priv, cq);
|
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed setting cq moderation parameters");
|
2008-10-23 05:47:49 +07:00
|
|
|
mlx4_en_deactivate_cq(priv, cq);
|
|
|
|
goto cq_err;
|
|
|
|
}
|
|
|
|
mlx4_en_arm_cq(priv, cq);
|
2013-11-07 17:19:52 +07:00
|
|
|
priv->rx_ring[i]->cqn = cq->mcq.cqn;
|
2008-10-23 05:47:49 +07:00
|
|
|
++rx_index;
|
|
|
|
}
|
|
|
|
|
2011-12-13 11:16:21 +07:00
|
|
|
/* Set qp number */
|
|
|
|
en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
|
2013-02-07 09:25:22 +07:00
|
|
|
err = mlx4_en_get_qp(priv);
|
2011-03-23 05:38:31 +07:00
|
|
|
if (err) {
|
2011-12-13 11:16:21 +07:00
|
|
|
en_err(priv, "Failed getting eth qp\n");
|
2011-03-23 05:38:31 +07:00
|
|
|
goto cq_err;
|
|
|
|
}
|
|
|
|
mdev->mac_removed[priv->port] = 0;
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
err = mlx4_en_config_rss_steer(priv);
|
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed configuring rss steering\n");
|
2011-03-23 05:38:31 +07:00
|
|
|
goto mac_err;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2012-07-05 11:03:50 +07:00
|
|
|
err = mlx4_en_create_drop_qp(priv);
|
|
|
|
if (err)
|
|
|
|
goto rss_err;
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Configure tx cq's and rings */
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
|
/* Configure cq */
|
2013-11-07 17:19:52 +07:00
|
|
|
cq = priv->tx_cq[i];
|
2011-10-09 12:26:31 +07:00
|
|
|
err = mlx4_en_activate_cq(priv, cq, i);
|
2008-10-23 05:47:49 +07:00
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed allocating Tx CQ\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
goto tx_err;
|
|
|
|
}
|
|
|
|
err = mlx4_en_set_cq_moder(priv, cq);
|
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed setting cq moderation parameters");
|
2008-10-23 05:47:49 +07:00
|
|
|
mlx4_en_deactivate_cq(priv, cq);
|
|
|
|
goto tx_err;
|
|
|
|
}
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
|
2008-10-23 05:47:49 +07:00
|
|
|
cq->buf->wqe_index = cpu_to_be16(0xffff);
|
|
|
|
|
|
|
|
/* Configure ring */
|
2013-11-07 17:19:52 +07:00
|
|
|
tx_ring = priv->tx_ring[i];
|
2012-04-05 04:33:24 +07:00
|
|
|
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
|
2012-12-02 10:49:23 +07:00
|
|
|
i / priv->num_tx_rings_p_up);
|
2008-10-23 05:47:49 +07:00
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed allocating Tx ring\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
mlx4_en_deactivate_cq(priv, cq);
|
|
|
|
goto tx_err;
|
|
|
|
}
|
2012-04-23 09:18:50 +07:00
|
|
|
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
|
2012-04-23 09:18:39 +07:00
|
|
|
|
|
|
|
/* Arm CQ for TX completions */
|
|
|
|
mlx4_en_arm_cq(priv, cq);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Set initial ownership of all Tx TXBBs to SW (1) */
|
|
|
|
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
|
|
|
|
*((u32 *) (tx_ring->buf + j)) = 0xffffffff;
|
|
|
|
++tx_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Configure port */
|
|
|
|
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
|
|
|
|
priv->rx_skb_size + ETH_FCS_LEN,
|
2008-11-05 11:48:36 +07:00
|
|
|
priv->prof->tx_pause,
|
|
|
|
priv->prof->tx_ppp,
|
|
|
|
priv->prof->rx_pause,
|
|
|
|
priv->prof->rx_ppp);
|
2008-10-23 05:47:49 +07:00
|
|
|
if (err) {
|
2013-02-07 09:25:21 +07:00
|
|
|
en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
|
|
|
|
priv->port, err);
|
2008-10-23 05:47:49 +07:00
|
|
|
goto tx_err;
|
|
|
|
}
|
|
|
|
/* Set default qp number */
|
|
|
|
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
|
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed setting default qp numbers\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
goto tx_err;
|
|
|
|
}
|
|
|
|
|
2013-12-23 21:09:44 +07:00
|
|
|
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
|
|
|
|
err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
|
|
|
|
if (err) {
|
|
|
|
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
|
|
|
|
err);
|
|
|
|
goto tx_err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Init port */
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(HW, priv, "Initializing port\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
err = mlx4_INIT_PORT(mdev->dev, priv->port);
|
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed Initializing port\n");
|
2011-03-23 05:38:31 +07:00
|
|
|
goto tx_err;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2011-03-23 05:38:31 +07:00
|
|
|
/* Attach rx QP to bradcast address */
|
|
|
|
memset(&mc_list[10], 0xff, ETH_ALEN);
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 11:03:46 +07:00
|
|
|
mc_list[5] = priv->port; /* needed for B0 steering support */
|
2011-03-23 05:38:31 +07:00
|
|
|
if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 11:03:46 +07:00
|
|
|
priv->port, 0, MLX4_PROT_ETH,
|
|
|
|
&priv->broadcast_id))
|
2011-03-23 05:38:31 +07:00
|
|
|
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
|
|
|
|
|
2011-03-27 08:01:26 +07:00
|
|
|
/* Must redo promiscuous mode setup. */
|
|
|
|
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Schedule multicast task to populate multicast list */
|
2013-02-07 09:25:23 +07:00
|
|
|
queue_work(mdev->workqueue, &priv->rx_mode_task);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2012-01-19 16:45:05 +07:00
|
|
|
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
priv->port_up = true;
|
2009-06-21 05:15:46 +07:00
|
|
|
netif_tx_start_all_queues(dev);
|
2013-01-31 06:07:11 +07:00
|
|
|
netif_device_attach(dev);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
tx_err:
|
|
|
|
while (tx_index--) {
|
2013-11-07 17:19:52 +07:00
|
|
|
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
|
|
|
|
mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
2012-07-05 11:03:50 +07:00
|
|
|
mlx4_en_destroy_drop_qp(priv);
|
|
|
|
rss_err:
|
2008-10-23 05:47:49 +07:00
|
|
|
mlx4_en_release_rss_steer(priv);
|
2011-03-23 05:38:31 +07:00
|
|
|
mac_err:
|
2013-02-07 09:25:22 +07:00
|
|
|
mlx4_en_put_qp(priv);
|
2008-10-23 05:47:49 +07:00
|
|
|
cq_err:
|
|
|
|
while (rx_index--)
|
2013-11-07 17:19:52 +07:00
|
|
|
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
|
2009-05-24 10:17:11 +07:00
|
|
|
for (i = 0; i < priv->rx_ring_num; i++)
|
2013-11-07 17:19:52 +07:00
|
|
|
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
return err; /* need to close devices */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-31 06:07:11 +07:00
|
|
|
void mlx4_en_stop_port(struct net_device *dev, int detach)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
2012-07-05 11:03:43 +07:00
|
|
|
struct mlx4_en_mc_list *mclist, *tmp;
|
2013-01-31 06:07:08 +07:00
|
|
|
struct ethtool_flow_id *flow, *tmp_flow;
|
2008-10-23 05:47:49 +07:00
|
|
|
int i;
|
2011-03-23 05:38:31 +07:00
|
|
|
u8 mc_list[16] = {0};
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
if (!priv->port_up) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(DRV, priv, "stop port called while port already down\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-06-25 16:09:33 +07:00
|
|
|
/* close port*/
|
|
|
|
mlx4_CLOSE_PORT(mdev->dev, priv->port);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Synchronize with tx routine */
|
|
|
|
netif_tx_lock_bh(dev);
|
2013-01-31 06:07:11 +07:00
|
|
|
if (detach)
|
|
|
|
netif_device_detach(dev);
|
2009-06-21 05:15:52 +07:00
|
|
|
netif_tx_stop_all_queues(dev);
|
2008-10-23 05:47:49 +07:00
|
|
|
netif_tx_unlock_bh(dev);
|
|
|
|
|
2013-01-31 06:07:11 +07:00
|
|
|
netif_tx_disable(dev);
|
|
|
|
|
2010-08-24 10:45:45 +07:00
|
|
|
/* Set port as not active */
|
2009-06-21 05:15:52 +07:00
|
|
|
priv->port_up = false;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2013-01-24 08:54:15 +07:00
|
|
|
/* Promsicuous mode */
|
|
|
|
if (mdev->dev->caps.steering_mode ==
|
|
|
|
MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
|
|
|
priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
|
|
|
|
MLX4_EN_FLAG_MC_PROMISC);
|
|
|
|
mlx4_flow_steer_promisc_remove(mdev->dev,
|
|
|
|
priv->port,
|
2013-04-24 20:58:45 +07:00
|
|
|
MLX4_FS_ALL_DEFAULT);
|
2013-01-24 08:54:15 +07:00
|
|
|
mlx4_flow_steer_promisc_remove(mdev->dev,
|
|
|
|
priv->port,
|
2013-04-24 20:58:45 +07:00
|
|
|
MLX4_FS_MC_DEFAULT);
|
2013-01-24 08:54:15 +07:00
|
|
|
} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
|
|
|
|
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
|
|
|
|
|
|
|
|
/* Disable promiscouos mode */
|
|
|
|
mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
|
|
|
|
priv->port);
|
|
|
|
|
|
|
|
/* Disable Multicast promisc */
|
|
|
|
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
|
|
|
|
mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
|
|
|
|
priv->port);
|
|
|
|
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-23 05:38:31 +07:00
|
|
|
/* Detach All multicasts */
|
|
|
|
memset(&mc_list[10], 0xff, ETH_ALEN);
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 11:03:46 +07:00
|
|
|
mc_list[5] = priv->port; /* needed for B0 steering support */
|
2011-03-23 05:38:31 +07:00
|
|
|
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 11:03:46 +07:00
|
|
|
MLX4_PROT_ETH, priv->broadcast_id);
|
2012-07-05 11:03:43 +07:00
|
|
|
list_for_each_entry(mclist, &priv->curr_list, list) {
|
|
|
|
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
|
2011-03-23 05:38:31 +07:00
|
|
|
mc_list[5] = priv->port;
|
|
|
|
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
|
{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-05 11:03:46 +07:00
|
|
|
mc_list, MLX4_PROT_ETH, mclist->reg_id);
|
2011-03-23 05:38:31 +07:00
|
|
|
}
|
|
|
|
mlx4_en_clear_list(dev);
|
2012-07-05 11:03:43 +07:00
|
|
|
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
|
|
|
|
list_del(&mclist->list);
|
|
|
|
kfree(mclist);
|
|
|
|
}
|
|
|
|
|
2011-03-23 05:38:31 +07:00
|
|
|
/* Flush multicast filter */
|
|
|
|
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
|
|
|
|
|
2013-03-21 12:55:53 +07:00
|
|
|
/* Remove flow steering rules for the port*/
|
|
|
|
if (mdev->dev->caps.steering_mode ==
|
|
|
|
MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
|
|
|
ASSERT_RTNL();
|
|
|
|
list_for_each_entry_safe(flow, tmp_flow,
|
|
|
|
&priv->ethtool_list, list) {
|
|
|
|
mlx4_flow_detach(mdev->dev, flow->id);
|
|
|
|
list_del(&flow->list);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-05 11:03:50 +07:00
|
|
|
mlx4_en_destroy_drop_qp(priv);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Free TX Rings */
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
|
|
|
|
mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
msleep(10);
|
|
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++)
|
2013-11-07 17:19:52 +07:00
|
|
|
mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
/* Free RSS qps */
|
|
|
|
mlx4_en_release_rss_steer(priv);
|
|
|
|
|
2011-12-13 11:16:21 +07:00
|
|
|
/* Unregister Mac address for the port */
|
2013-02-07 09:25:22 +07:00
|
|
|
mlx4_en_put_qp(priv);
|
2013-10-15 21:55:22 +07:00
|
|
|
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
|
2013-01-31 06:07:10 +07:00
|
|
|
mdev->mac_removed[priv->port] = 1;
|
2011-12-13 11:16:21 +07:00
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Free RX Rings */
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
struct mlx4_en_cq *cq = priv->rx_cq[i];
|
2013-06-18 20:18:27 +07:00
|
|
|
|
|
|
|
local_bh_disable();
|
|
|
|
while (!mlx4_en_cq_lock_napi(cq)) {
|
|
|
|
pr_info("CQ %d locked\n", i);
|
|
|
|
mdelay(1);
|
|
|
|
}
|
|
|
|
local_bh_enable();
|
|
|
|
|
|
|
|
while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
|
2008-10-23 05:47:49 +07:00
|
|
|
msleep(1);
|
2013-11-07 17:19:52 +07:00
|
|
|
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
|
2013-06-18 20:18:27 +07:00
|
|
|
mlx4_en_deactivate_cq(priv, cq);
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlx4_en_restart(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
|
|
|
|
watchdog_task);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct net_device *dev = priv->dev;
|
|
|
|
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
|
2009-04-20 11:26:05 +07:00
|
|
|
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
if (priv->port_up) {
|
2013-01-31 06:07:11 +07:00
|
|
|
mlx4_en_stop_port(dev, 1);
|
2009-04-20 11:26:05 +07:00
|
|
|
if (mlx4_en_start_port(dev))
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed restarting port %d\n", priv->port);
|
2009-04-20 11:26:05 +07:00
|
|
|
}
|
|
|
|
mutex_unlock(&mdev->state_lock);
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2012-01-19 16:42:37 +07:00
|
|
|
static void mlx4_en_clear_stats(struct net_device *dev)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(HW, priv, "Failed dumping statistics\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
memset(&priv->stats, 0, sizeof(priv->stats));
|
|
|
|
memset(&priv->pstats, 0, sizeof(priv->pstats));
|
2012-01-19 16:42:37 +07:00
|
|
|
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
|
|
|
|
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
priv->tx_ring[i]->bytes = 0;
|
|
|
|
priv->tx_ring[i]->packets = 0;
|
|
|
|
priv->tx_ring[i]->tx_csum = 0;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
priv->rx_ring[i]->bytes = 0;
|
|
|
|
priv->rx_ring[i]->packets = 0;
|
|
|
|
priv->rx_ring[i]->csum_ok = 0;
|
|
|
|
priv->rx_ring[i]->csum_none = 0;
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
2012-01-19 16:42:37 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mlx4_en_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
|
|
|
|
if (!mdev->device_up) {
|
|
|
|
en_err(priv, "Cannot open - device down/disabled\n");
|
|
|
|
err = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset HW statistics and SW counters */
|
|
|
|
mlx4_en_clear_stats(dev);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
err = mlx4_en_start_port(dev);
|
|
|
|
if (err)
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed starting port:%d\n", priv->port);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
out:
|
|
|
|
mutex_unlock(&mdev->state_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int mlx4_en_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(IFDOWN, priv, "Close port called\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
|
2013-01-31 06:07:11 +07:00
|
|
|
mlx4_en_stop_port(dev, 0);
|
2008-10-23 05:47:49 +07:00
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
|
|
|
mutex_unlock(&mdev->state_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-09 12:26:46 +07:00
|
|
|
void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2012-07-19 05:33:52 +07:00
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
|
free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
|
|
|
|
priv->dev->rx_cpu_rmap = NULL;
|
|
|
|
#endif
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
if (priv->tx_ring && priv->tx_ring[i])
|
2008-10-23 05:47:49 +07:00
|
|
|
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
2013-11-07 17:19:52 +07:00
|
|
|
if (priv->tx_cq && priv->tx_cq[i])
|
2011-10-09 12:26:46 +07:00
|
|
|
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
2013-11-07 17:19:52 +07:00
|
|
|
if (priv->rx_ring[i])
|
2012-02-06 15:39:49 +07:00
|
|
|
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
|
|
|
|
priv->prof->rx_ring_size, priv->stride);
|
2013-11-07 17:19:52 +07:00
|
|
|
if (priv->rx_cq[i])
|
2011-10-09 12:26:46 +07:00
|
|
|
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
2012-06-25 07:24:13 +07:00
|
|
|
|
|
|
|
if (priv->base_tx_qpn) {
|
|
|
|
mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
|
|
|
|
priv->base_tx_qpn = 0;
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2008-12-30 09:39:20 +07:00
|
|
|
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
2008-10-23 05:47:49 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_port_profile *prof = priv->prof;
|
|
|
|
int i;
|
2012-06-25 07:24:13 +07:00
|
|
|
int err;
|
2013-11-07 17:19:54 +07:00
|
|
|
int node;
|
2011-03-23 05:38:52 +07:00
|
|
|
|
2012-06-25 07:24:13 +07:00
|
|
|
err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
|
2011-03-23 05:38:52 +07:00
|
|
|
if (err) {
|
|
|
|
en_err(priv, "failed reserving range for TX rings\n");
|
|
|
|
return err;
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
/* Create tx Rings */
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
2013-11-07 17:19:54 +07:00
|
|
|
node = cpu_to_node(i % num_online_cpus());
|
2008-10-23 05:47:49 +07:00
|
|
|
if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
|
2013-11-07 17:19:54 +07:00
|
|
|
prof->tx_ring_size, i, TX, node))
|
2008-10-23 05:47:49 +07:00
|
|
|
goto err;
|
|
|
|
|
2013-12-20 02:20:14 +07:00
|
|
|
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
|
|
|
|
priv->base_tx_qpn + i,
|
|
|
|
prof->tx_ring_size, TXBB_SIZE,
|
|
|
|
node, i))
|
2008-10-23 05:47:49 +07:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create rx Rings */
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
2013-11-07 17:19:54 +07:00
|
|
|
node = cpu_to_node(i % num_online_cpus());
|
2008-10-23 05:47:49 +07:00
|
|
|
if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
|
2013-11-07 17:19:54 +07:00
|
|
|
prof->rx_ring_size, i, RX, node))
|
2008-10-23 05:47:49 +07:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
|
2013-11-07 17:19:54 +07:00
|
|
|
prof->rx_ring_size, priv->stride,
|
|
|
|
node))
|
2008-10-23 05:47:49 +07:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2012-07-19 05:33:52 +07:00
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
2013-03-07 10:46:57 +07:00
|
|
|
if (priv->mdev->dev->caps.comp_pool) {
|
|
|
|
priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
|
|
|
|
if (!priv->dev->rx_cpu_rmap)
|
|
|
|
goto err;
|
|
|
|
}
|
2012-07-19 05:33:52 +07:00
|
|
|
#endif
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed to allocate NIC resources\n");
|
2013-11-07 17:19:52 +07:00
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
|
if (priv->rx_ring[i])
|
|
|
|
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
|
|
|
|
prof->rx_ring_size,
|
|
|
|
priv->stride);
|
|
|
|
if (priv->rx_cq[i])
|
|
|
|
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
|
|
|
|
}
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
|
if (priv->tx_ring[i])
|
|
|
|
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
|
|
|
if (priv->tx_cq[i])
|
|
|
|
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void mlx4_en_destroy_netdev(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
/* Unregister device - this will close the port if it was up */
|
|
|
|
if (priv->registered)
|
|
|
|
unregister_netdev(dev);
|
|
|
|
|
|
|
|
if (priv->allocated)
|
|
|
|
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
|
|
|
|
|
|
|
|
cancel_delayed_work(&priv->stats_task);
|
2013-04-23 13:06:51 +07:00
|
|
|
cancel_delayed_work(&priv->service_task);
|
2008-10-23 05:47:49 +07:00
|
|
|
/* flush any pending task for this netdev */
|
|
|
|
flush_workqueue(mdev->workqueue);
|
|
|
|
|
|
|
|
/* Detach the netdev so tasks would not attempt to access it */
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
mdev->pndev[priv->port] = NULL;
|
|
|
|
mutex_unlock(&mdev->state_lock);
|
|
|
|
|
2011-10-09 12:26:46 +07:00
|
|
|
mlx4_en_free_resources(priv);
|
2012-04-05 04:33:26 +07:00
|
|
|
|
2012-05-17 07:58:10 +07:00
|
|
|
kfree(priv->tx_ring);
|
|
|
|
kfree(priv->tx_cq);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
free_netdev(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
int err = 0;
|
|
|
|
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
|
2008-10-23 05:47:49 +07:00
|
|
|
dev->mtu, new_mtu);
|
|
|
|
|
|
|
|
if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Bad MTU size:%d.\n", new_mtu);
|
2008-10-23 05:47:49 +07:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
dev->mtu = new_mtu;
|
|
|
|
|
|
|
|
if (netif_running(dev)) {
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
|
if (!mdev->device_up) {
|
|
|
|
/* NIC is probably restarting - let watchdog task reset
|
|
|
|
* the port */
|
2009-06-02 03:27:13 +07:00
|
|
|
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
} else {
|
2013-01-31 06:07:11 +07:00
|
|
|
mlx4_en_stop_port(dev, 1);
|
2008-10-23 05:47:49 +07:00
|
|
|
err = mlx4_en_start_port(dev);
|
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed restarting port:%d\n",
|
2008-10-23 05:47:49 +07:00
|
|
|
priv->port);
|
|
|
|
queue_work(mdev->workqueue, &priv->watchdog_task);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&mdev->state_lock);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-19 06:13:31 +07:00
|
|
|
static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
|
2013-04-23 13:06:49 +07:00
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
|
struct hwtstamp_config config;
|
|
|
|
|
|
|
|
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* reserved for future extensions */
|
|
|
|
if (config.flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* device doesn't support time stamping */
|
|
|
|
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* TX HW timestamp */
|
|
|
|
switch (config.tx_type) {
|
|
|
|
case HWTSTAMP_TX_OFF:
|
|
|
|
case HWTSTAMP_TX_ON:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* RX HW timestamp */
|
|
|
|
switch (config.rx_filter) {
|
|
|
|
case HWTSTAMP_FILTER_NONE:
|
|
|
|
break;
|
|
|
|
case HWTSTAMP_FILTER_ALL:
|
|
|
|
case HWTSTAMP_FILTER_SOME:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_EVENT:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_SYNC:
|
|
|
|
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
|
|
|
|
config.rx_filter = HWTSTAMP_FILTER_ALL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
|
|
|
|
config.tx_type = HWTSTAMP_TX_OFF;
|
|
|
|
config.rx_filter = HWTSTAMP_FILTER_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return copy_to_user(ifr->ifr_data, &config,
|
|
|
|
sizeof(config)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
2013-11-19 06:13:31 +07:00
|
|
|
static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
|
|
return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
|
|
|
|
sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
2013-04-23 13:06:49 +07:00
|
|
|
static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCSHWTSTAMP:
|
2013-11-19 06:13:31 +07:00
|
|
|
return mlx4_en_hwtstamp_set(dev, ifr);
|
|
|
|
case SIOCGHWTSTAMP:
|
|
|
|
return mlx4_en_hwtstamp_get(dev, ifr);
|
2013-04-23 13:06:49 +07:00
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-27 02:55:19 +07:00
|
|
|
static int mlx4_en_set_features(struct net_device *netdev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(netdev);
|
|
|
|
|
|
|
|
if (features & NETIF_F_LOOPBACK)
|
|
|
|
priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
|
|
|
|
else
|
|
|
|
priv->ctrl_flags &=
|
|
|
|
cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
|
|
|
|
|
2013-02-07 09:25:19 +07:00
|
|
|
mlx4_en_update_loopback_state(netdev, features);
|
|
|
|
|
2011-11-27 02:55:19 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-04-25 12:22:27 +07:00
|
|
|
static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *en_priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = en_priv->mdev;
|
|
|
|
u64 mac_u64 = mlx4_en_mac_to_u64(mac);
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(mac))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
|
|
|
|
}
|
|
|
|
|
2013-04-25 12:22:28 +07:00
|
|
|
static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *en_priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = en_priv->mdev;
|
|
|
|
|
|
|
|
return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
|
|
|
|
}
|
|
|
|
|
2013-04-25 12:22:29 +07:00
|
|
|
static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *en_priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = en_priv->mdev;
|
|
|
|
|
|
|
|
return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
|
|
|
|
}
|
|
|
|
|
2013-04-25 12:22:30 +07:00
|
|
|
static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *en_priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = en_priv->mdev;
|
|
|
|
|
|
|
|
return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
|
|
|
|
}
|
2013-04-25 12:22:27 +07:00
|
|
|
|
2013-06-13 17:19:11 +07:00
|
|
|
static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *en_priv = netdev_priv(dev);
|
|
|
|
struct mlx4_en_dev *mdev = en_priv->mdev;
|
|
|
|
|
|
|
|
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
|
|
|
|
}
|
2013-12-20 02:20:13 +07:00
|
|
|
|
|
|
|
#define PORT_ID_BYTE_LEN 8
|
|
|
|
static int mlx4_en_get_phys_port_id(struct net_device *dev,
|
|
|
|
struct netdev_phys_port_id *ppid)
|
|
|
|
{
|
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
struct mlx4_dev *mdev = priv->mdev->dev;
|
|
|
|
int i;
|
|
|
|
u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
|
|
|
|
|
|
|
|
if (!phys_port_id)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
ppid->id_len = sizeof(phys_port_id);
|
|
|
|
for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
|
|
|
|
ppid->id[i] = phys_port_id & 0xff;
|
|
|
|
phys_port_id >>= 8;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-22 08:30:58 +07:00
|
|
|
static const struct net_device_ops mlx4_netdev_ops = {
|
|
|
|
.ndo_open = mlx4_en_open,
|
|
|
|
.ndo_stop = mlx4_en_close,
|
|
|
|
.ndo_start_xmit = mlx4_en_xmit,
|
2009-06-02 06:24:07 +07:00
|
|
|
.ndo_select_queue = mlx4_en_select_queue,
|
2008-11-22 08:30:58 +07:00
|
|
|
.ndo_get_stats = mlx4_en_get_stats,
|
2013-02-07 09:25:23 +07:00
|
|
|
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
|
2008-11-22 08:30:58 +07:00
|
|
|
.ndo_set_mac_address = mlx4_en_set_mac,
|
2009-01-09 17:45:37 +07:00
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
2008-11-22 08:30:58 +07:00
|
|
|
.ndo_change_mtu = mlx4_en_change_mtu,
|
2013-04-23 13:06:49 +07:00
|
|
|
.ndo_do_ioctl = mlx4_en_ioctl,
|
2008-11-22 08:30:58 +07:00
|
|
|
.ndo_tx_timeout = mlx4_en_tx_timeout,
|
|
|
|
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = mlx4_en_netpoll,
|
|
|
|
#endif
|
2011-11-27 02:55:19 +07:00
|
|
|
.ndo_set_features = mlx4_en_set_features,
|
2012-04-05 04:33:27 +07:00
|
|
|
.ndo_setup_tc = mlx4_en_setup_tc,
|
2012-07-19 05:33:52 +07:00
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
|
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
|
|
|
|
#endif
|
2013-08-01 10:10:25 +07:00
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
2013-07-10 21:13:26 +07:00
|
|
|
.ndo_busy_poll = mlx4_en_low_latency_recv,
|
2013-06-18 20:18:27 +07:00
|
|
|
#endif
|
2013-12-20 02:20:13 +07:00
|
|
|
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
|
2008-11-22 08:30:58 +07:00
|
|
|
};
|
|
|
|
|
2013-04-25 12:22:27 +07:00
|
|
|
static const struct net_device_ops mlx4_netdev_ops_master = {
|
|
|
|
.ndo_open = mlx4_en_open,
|
|
|
|
.ndo_stop = mlx4_en_close,
|
|
|
|
.ndo_start_xmit = mlx4_en_xmit,
|
|
|
|
.ndo_select_queue = mlx4_en_select_queue,
|
|
|
|
.ndo_get_stats = mlx4_en_get_stats,
|
|
|
|
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
|
|
|
|
.ndo_set_mac_address = mlx4_en_set_mac,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_change_mtu = mlx4_en_change_mtu,
|
|
|
|
.ndo_tx_timeout = mlx4_en_tx_timeout,
|
|
|
|
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
|
|
|
|
.ndo_set_vf_mac = mlx4_en_set_vf_mac,
|
2013-04-25 12:22:28 +07:00
|
|
|
.ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
|
2013-04-25 12:22:29 +07:00
|
|
|
.ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
|
2013-06-13 17:19:11 +07:00
|
|
|
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
|
2013-04-25 12:22:30 +07:00
|
|
|
.ndo_get_vf_config = mlx4_en_get_vf_config,
|
2013-04-25 12:22:27 +07:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = mlx4_en_netpoll,
|
|
|
|
#endif
|
|
|
|
.ndo_set_features = mlx4_en_set_features,
|
|
|
|
.ndo_setup_tc = mlx4_en_setup_tc,
|
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
|
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
|
|
|
|
#endif
|
2013-12-20 02:20:13 +07:00
|
|
|
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
|
2013-04-25 12:22:27 +07:00
|
|
|
};
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
|
struct mlx4_en_port_profile *prof)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct mlx4_en_priv *priv;
|
2013-02-07 09:25:25 +07:00
|
|
|
int i;
|
2008-10-23 05:47:49 +07:00
|
|
|
int err;
|
2013-06-04 12:13:28 +07:00
|
|
|
u64 mac_u64;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2011-01-10 02:36:36 +07:00
|
|
|
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
|
2012-12-02 10:49:23 +07:00
|
|
|
MAX_TX_RINGS, MAX_RX_RINGS);
|
2012-01-29 20:47:52 +07:00
|
|
|
if (dev == NULL)
|
2008-10-23 05:47:49 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-12-02 10:49:23 +07:00
|
|
|
netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
|
|
|
|
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
|
2010-05-27 02:56:24 +07:00
|
|
|
dev->dev_id = port - 1;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize driver private data
|
|
|
|
*/
|
|
|
|
|
|
|
|
priv = netdev_priv(dev);
|
|
|
|
memset(priv, 0, sizeof(struct mlx4_en_priv));
|
|
|
|
priv->dev = dev;
|
|
|
|
priv->mdev = mdev;
|
2012-03-06 11:03:34 +07:00
|
|
|
priv->ddev = &mdev->pdev->dev;
|
2008-10-23 05:47:49 +07:00
|
|
|
priv->prof = prof;
|
|
|
|
priv->port = port;
|
|
|
|
priv->port_up = false;
|
|
|
|
priv->flags = prof->flags;
|
2011-11-27 02:55:19 +07:00
|
|
|
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
|
|
|
|
MLX4_WQE_CTRL_SOLICITED);
|
2012-12-02 10:49:23 +07:00
|
|
|
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
|
2008-10-23 05:47:49 +07:00
|
|
|
priv->tx_ring_num = prof->tx_ring_num;
|
2012-12-02 10:49:23 +07:00
|
|
|
|
2013-11-07 17:19:52 +07:00
|
|
|
priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
|
2012-12-02 10:49:23 +07:00
|
|
|
GFP_KERNEL);
|
2012-05-17 07:58:10 +07:00
|
|
|
if (!priv->tx_ring) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2013-11-07 17:19:52 +07:00
|
|
|
priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
|
2012-12-02 10:49:23 +07:00
|
|
|
GFP_KERNEL);
|
2012-05-17 07:58:10 +07:00
|
|
|
if (!priv->tx_cq) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
priv->rx_ring_num = prof->rx_ring_num;
|
2012-10-21 21:59:24 +07:00
|
|
|
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
|
2008-10-23 05:47:49 +07:00
|
|
|
priv->mac_index = -1;
|
|
|
|
priv->msg_enable = MLX4_EN_MSG_LEVEL;
|
|
|
|
spin_lock_init(&priv->stats_lock);
|
2013-02-07 09:25:23 +07:00
|
|
|
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
|
2008-10-23 05:47:49 +07:00
|
|
|
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
|
|
|
|
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
|
|
|
|
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
|
2013-04-23 13:06:51 +07:00
|
|
|
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
|
2012-04-05 04:33:26 +07:00
|
|
|
#ifdef CONFIG_MLX4_EN_DCB
|
2013-04-07 10:44:07 +07:00
|
|
|
if (!mlx4_is_slave(priv->mdev->dev)) {
|
|
|
|
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
|
|
|
|
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
|
|
|
|
} else {
|
|
|
|
en_info(priv, "enabling only PFC DCB ops\n");
|
|
|
|
dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
|
|
|
|
}
|
|
|
|
}
|
2012-04-05 04:33:26 +07:00
|
|
|
#endif
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2013-02-07 09:25:25 +07:00
|
|
|
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
|
|
|
|
INIT_HLIST_HEAD(&priv->mac_hash[i]);
|
2013-02-07 09:25:22 +07:00
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Query for default mac and max mtu */
|
|
|
|
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
|
2013-02-07 09:25:20 +07:00
|
|
|
|
|
|
|
/* Set default MAC */
|
|
|
|
dev->addr_len = ETH_ALEN;
|
|
|
|
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
|
|
|
|
if (!is_valid_ether_addr(dev->dev_addr)) {
|
2013-06-04 12:13:28 +07:00
|
|
|
if (mlx4_is_slave(priv->mdev->dev)) {
|
|
|
|
eth_hw_addr_random(dev);
|
|
|
|
en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
|
|
|
|
mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
|
|
|
|
mdev->dev->caps.def_mac[priv->port] = mac_u64;
|
|
|
|
} else {
|
|
|
|
en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
|
|
|
|
priv->port, dev->dev_addr);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
}
|
|
|
|
|
2013-02-07 09:25:20 +07:00
|
|
|
memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
|
|
|
|
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
|
|
|
|
err = mlx4_en_alloc_resources(priv);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2013-01-24 08:54:19 +07:00
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
|
INIT_LIST_HEAD(&priv->filters);
|
|
|
|
spin_lock_init(&priv->filters_lock);
|
|
|
|
#endif
|
|
|
|
|
2013-04-23 13:06:49 +07:00
|
|
|
/* Initialize time stamping config */
|
|
|
|
priv->hwtstamp_config.flags = 0;
|
|
|
|
priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
|
|
|
|
priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
/* Allocate page for receive rings */
|
|
|
|
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
|
|
|
|
MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
|
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Failed to allocate page for rx qps\n");
|
2008-10-23 05:47:49 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
priv->allocated = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize netdev entry points
|
|
|
|
*/
|
2013-04-25 12:22:27 +07:00
|
|
|
if (mlx4_is_master(priv->mdev->dev))
|
|
|
|
dev->netdev_ops = &mlx4_netdev_ops_master;
|
|
|
|
else
|
|
|
|
dev->netdev_ops = &mlx4_netdev_ops;
|
2008-10-23 05:47:49 +07:00
|
|
|
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
|
2010-09-27 15:29:34 +07:00
|
|
|
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
|
|
|
|
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
|
2008-11-22 08:30:58 +07:00
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set driver features
|
|
|
|
*/
|
2011-04-15 11:50:49 +07:00
|
|
|
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
|
|
|
if (mdev->LSO_support)
|
|
|
|
dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
|
|
|
|
|
|
|
|
dev->vlan_features = dev->hw_features;
|
|
|
|
|
2011-10-18 08:51:24 +07:00
|
|
|
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
|
2011-04-15 11:50:49 +07:00
|
|
|
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
|
2013-04-19 09:04:27 +07:00
|
|
|
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_FILTER;
|
2011-11-27 02:55:19 +07:00
|
|
|
dev->hw_features |= NETIF_F_LOOPBACK;
|
2008-10-23 05:47:49 +07:00
|
|
|
|
2012-07-19 05:33:52 +07:00
|
|
|
if (mdev->dev->caps.steering_mode ==
|
|
|
|
MLX4_STEERING_MODE_DEVICE_MANAGED)
|
|
|
|
dev->hw_features |= NETIF_F_NTUPLE;
|
|
|
|
|
2013-02-07 09:25:26 +07:00
|
|
|
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
|
|
|
|
dev->priv_flags |= IFF_UNICAST_FLT;
|
|
|
|
|
2013-12-23 21:09:44 +07:00
|
|
|
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
|
|
|
|
dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
|
|
|
|
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
|
|
|
|
dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
|
|
|
dev->features |= NETIF_F_GSO_UDP_TUNNEL;
|
|
|
|
}
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
mdev->pndev[port] = dev;
|
|
|
|
|
|
|
|
netif_carrier_off(dev);
|
2013-06-25 16:09:31 +07:00
|
|
|
mlx4_en_set_default_moderation(priv);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
err = register_netdev(dev);
|
|
|
|
if (err) {
|
2009-06-02 03:27:13 +07:00
|
|
|
en_err(priv, "Netdev registration failed for port %d\n", port);
|
2008-10-23 05:47:49 +07:00
|
|
|
goto out;
|
|
|
|
}
|
2011-10-09 12:29:35 +07:00
|
|
|
priv->registered = 1;
|
2009-06-02 03:27:13 +07:00
|
|
|
|
|
|
|
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
|
|
|
|
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
|
|
|
|
|
2013-02-07 09:25:19 +07:00
|
|
|
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
|
|
|
|
|
2011-03-23 05:37:41 +07:00
|
|
|
/* Configure port */
|
2012-06-25 07:24:11 +07:00
|
|
|
mlx4_en_calc_rx_buf(dev);
|
2011-03-23 05:37:41 +07:00
|
|
|
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
|
2012-06-25 07:24:11 +07:00
|
|
|
priv->rx_skb_size + ETH_FCS_LEN,
|
|
|
|
prof->tx_pause, prof->tx_ppp,
|
|
|
|
prof->rx_pause, prof->rx_ppp);
|
2011-03-23 05:37:41 +07:00
|
|
|
if (err) {
|
|
|
|
en_err(priv, "Failed setting port general configurations "
|
|
|
|
"for port %d, with error %d\n", priv->port, err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-12-23 21:09:44 +07:00
|
|
|
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
|
|
|
|
err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
|
|
|
|
if (err) {
|
|
|
|
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
|
|
|
|
err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-23 05:37:41 +07:00
|
|
|
/* Init port */
|
|
|
|
en_warn(priv, "Initializing port\n");
|
|
|
|
err = mlx4_INIT_PORT(mdev->dev, priv->port);
|
|
|
|
if (err) {
|
|
|
|
en_err(priv, "Failed Initializing port\n");
|
|
|
|
goto out;
|
|
|
|
}
|
2008-10-23 05:47:49 +07:00
|
|
|
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
|
2013-04-25 12:22:24 +07:00
|
|
|
|
|
|
|
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
|
|
|
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
|
|
|
SERVICE_TASK_DELAY);
|
|
|
|
|
2008-10-23 05:47:49 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
mlx4_en_destroy_netdev(dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|