Merge branch 'aquantia-next'

Igor Russkikh says:

====================
Aquantia Marvell atlantic driver updates 11-2019

Here is a bunch of atlantic driver new features and updates.

Shortlist:
- Me adding ethtool private flags for various loopback test modes,
- Nikita is doing some work here on power management, implementing new PM API,
  He also did some checkpatch style cleanup of older driver parts.
- I'm also adding a new UDP GSO offload support and flags for loopback activation
- We are now Marvell, so I am changing email addresses on maintainers list.

v2: styling, ip6 correct handling in udpgso
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-11-07 19:54:43 -08:00
commit a9ae168303
24 changed files with 1118 additions and 552 deletions

View File

@ -1,5 +1,5 @@
aQuantia AQtion Driver for the aQuantia Multi-Gigabit PCI Express Family of
Ethernet Adapters
Marvell(Aquantia) AQtion Driver for the aQuantia Multi-Gigabit PCI Express
Family of Ethernet Adapters
=============================================================================
Contents
@ -325,6 +325,46 @@ Supported ethtool options
Example:
ethtool -N eth0 flow-type udp4 action 0 loc 32
UDP GSO hardware offload
---------------------------------
UDP GSO allows to boost UDP tx rates by offloading UDP headers allocation
into hardware. A special userspace socket option is required for this,
could be validated with /kernel/tools/testing/selftests/net/
udpgso_bench_tx -u -4 -D 10.0.1.1 -s 6300 -S 100
Will cause sending out of 100 byte sized UDP packets formed from single
6300 bytes user buffer.
UDP GSO is configured by:
ethtool -K eth0 tx-udp-segmentation on
Private flags (testing)
---------------------------------
Atlantic driver supports private flags for hardware custom features:
$ ethtool --show-priv-flags ethX
Private flags for ethX:
DMASystemLoopback : off
PKTSystemLoopback : off
DMANetworkLoopback : off
PHYInternalLoopback: off
PHYExternalLoopback: off
Example:
$ ethtool --set-priv-flags ethX DMASystemLoopback on
DMASystemLoopback: DMA Host loopback.
PKTSystemLoopback: Packet buffer host loopback.
DMANetworkLoopback: Network side loopback on DMA block.
PHYInternalLoopback: Internal loopback on Phy.
PHYExternalLoopback: External loopback on Phy (with loopback ethernet cable).
Command Line Parameters
=======================
The following command line parameters are available on atlantic driver:
@ -426,7 +466,7 @@ Support
If an issue is identified with the released source code on the supported
kernel with a supported adapter, email the specific information related
to the issue to support@aquantia.com
to the issue to aqn_support@marvell.com
License
=======

View File

@ -1182,10 +1182,10 @@ S: Maintained
F: drivers/media/i2c/aptina-pll.*
AQUANTIA ETHERNET DRIVER (atlantic)
M: Igor Russkikh <igor.russkikh@aquantia.com>
M: Igor Russkikh <irusskikh@marvell.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.aquantia.com
W: https://www.marvell.com/
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/aquantia/atlantic/
F: Documentation/networking/device_drivers/aquantia/atlantic.txt

View File

@ -4,15 +4,8 @@
# aQuantia Ethernet Controller AQtion Linux Driver
# Copyright(c) 2014-2017 aQuantia Corporation.
#
# Contact Information: <rdc-drv@aquantia.com>
# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
#
################################################################################
#
# Makefile for the AQtion(tm) Ethernet driver
#
obj-$(CONFIG_AQTION) += atlantic.o
atlantic-objs := aq_main.o \

View File

@ -70,14 +70,11 @@
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
#define AQ_NIC_FC_OFF 0U
#define AQ_NIC_FC_TX 1U
#define AQ_NIC_FC_RX 2U
#define AQ_NIC_FC_FULL 3U
#define AQ_NIC_FC_AUTO 4U
#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
/* Default WOL modes used on initialization */
#define AQ_CFG_WOL_MODES WAKE_MAGIC
#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
#define AQ_CFG_IS_AUTONEG_DEF 1U

View File

@ -18,7 +18,9 @@ static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 regs_count = aq_nic_get_regs_count(aq_nic);
u32 regs_count;
regs_count = aq_nic_get_regs_count(aq_nic);
memset(p, 0, regs_count * sizeof(u32));
aq_nic_get_regs(aq_nic, regs, p);
@ -27,7 +29,9 @@ static void aq_ethtool_get_regs(struct net_device *ndev,
static int aq_ethtool_get_regs_len(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 regs_count = aq_nic_get_regs_count(aq_nic);
u32 regs_count;
regs_count = aq_nic_get_regs_count(aq_nic);
return regs_count * sizeof(u32);
}
@ -92,11 +96,21 @@ static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
"Queue[%d] InErrors",
};
static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
"DMASystemLoopback",
"PKTSystemLoopback",
"DMANetworkLoopback",
"PHYInternalLoopback",
"PHYExternalLoopback",
};
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
cfg = aq_nic_get_cfg(aq_nic);
memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
ARRAY_SIZE(aq_ethtool_queue_stat_names) *
@ -107,11 +121,15 @@ static void aq_ethtool_stats(struct net_device *ndev,
static void aq_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
u32 firmware_version = aq_nic_get_fw_version(aq_nic);
u32 regs_count = aq_nic_get_regs_count(aq_nic);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
u32 firmware_version;
u32 regs_count;
cfg = aq_nic_get_cfg(aq_nic);
firmware_version = aq_nic_get_fw_version(aq_nic);
regs_count = aq_nic_get_regs_count(aq_nic);
strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
@ -132,12 +150,15 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
int i, si;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
u8 *p = data;
int i, si;
if (stringset == ETH_SS_STATS) {
cfg = aq_nic_get_cfg(aq_nic);
switch (stringset) {
case ETH_SS_STATS:
memcpy(p, aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names));
p = p + sizeof(aq_ethtool_stat_names);
@ -150,23 +171,63 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
p += ETH_GSTRING_LEN;
}
}
break;
case ETH_SS_PRIV_FLAGS:
memcpy(p, aq_ethtool_priv_flag_names,
sizeof(aq_ethtool_priv_flag_names));
break;
}
}
static int aq_ethtool_set_phys_id(struct net_device *ndev,
enum ethtool_phys_id_state state)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_hw_s *hw = aq_nic->aq_hw;
int ret = 0;
if (!aq_nic->aq_fw_ops->led_control)
return -EOPNOTSUPP;
mutex_lock(&aq_nic->fwreq_mutex);
switch (state) {
case ETHTOOL_ID_ACTIVE:
ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_BLINK |
AQ_HW_LED_BLINK << 2 | AQ_HW_LED_BLINK << 4);
break;
case ETHTOOL_ID_INACTIVE:
ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_DEFAULT);
break;
default:
break;
}
mutex_unlock(&aq_nic->fwreq_mutex);
return ret;
}
static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
{
int ret = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
int ret = 0;
cfg = aq_nic_get_cfg(aq_nic);
switch (stringset) {
case ETH_SS_STATS:
ret = ARRAY_SIZE(aq_ethtool_stat_names) +
cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
break;
case ETH_SS_PRIV_FLAGS:
ret = ARRAY_SIZE(aq_ethtool_priv_flag_names);
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
@ -178,7 +239,9 @@ static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev)
static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
cfg = aq_nic_get_cfg(aq_nic);
return sizeof(cfg->aq_rss.hash_secret_key);
}
@ -187,9 +250,11 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
unsigned int i = 0U;
cfg = aq_nic_get_cfg(aq_nic);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (indir) {
@ -199,6 +264,7 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
if (key)
memcpy(key, cfg->aq_rss.hash_secret_key,
sizeof(cfg->aq_rss.hash_secret_key));
return 0;
}
@ -242,9 +308,11 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
u32 *rule_locs)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
int err = 0;
cfg = aq_nic_get_cfg(aq_nic);
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
@ -269,8 +337,8 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
static int aq_ethtool_set_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd)
{
int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
@ -291,7 +359,9 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
cfg = aq_nic_get_cfg(aq_nic);
if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
@ -305,6 +375,7 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev,
coal->rx_max_coalesced_frames = 1;
coal->tx_max_coalesced_frames = 1;
}
return 0;
}
@ -312,7 +383,9 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
cfg = aq_nic_get_cfg(aq_nic);
/* This is not yet supported
*/
@ -354,13 +427,12 @@ static void aq_ethtool_get_wol(struct net_device *ndev,
struct ethtool_wolinfo *wol)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
cfg = aq_nic_get_cfg(aq_nic);
if (cfg->wol)
wol->wolopts |= WAKE_MAGIC;
wol->supported = AQ_NIC_WOL_MODES;
wol->wolopts = cfg->wol;
}
static int aq_ethtool_set_wol(struct net_device *ndev,
@ -368,14 +440,17 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
{
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
int err = 0;
if (wol->wolopts & WAKE_MAGIC)
cfg->wol |= AQ_NIC_WOL_ENABLED;
else
cfg->wol &= ~AQ_NIC_WOL_ENABLED;
err = device_set_wakeup_enable(&pdev->dev, wol->wolopts);
cfg = aq_nic_get_cfg(aq_nic);
if (wol->wolopts & ~AQ_NIC_WOL_MODES)
return -EOPNOTSUPP;
cfg->wol = wol->wolopts;
err = device_set_wakeup_enable(&pdev->dev, !!cfg->wol);
return err;
}
@ -513,7 +588,7 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 fc = aq_nic->aq_nic_cfg.flow_control;
u32 fc = aq_nic->aq_nic_cfg.fc.req;
pause->autoneg = 0;
@ -535,14 +610,14 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
return -EOPNOTSUPP;
if (pause->rx_pause)
aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_RX;
else
aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_RX;
if (pause->tx_pause)
aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_TX;
else
aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_TX;
mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
@ -555,23 +630,28 @@ static void aq_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
struct aq_nic_cfg_s *cfg;
ring->rx_pending = aq_nic_cfg->rxds;
ring->tx_pending = aq_nic_cfg->txds;
cfg = aq_nic_get_cfg(aq_nic);
ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
ring->rx_pending = cfg->rxds;
ring->tx_pending = cfg->txds;
ring->rx_max_pending = cfg->aq_hw_caps->rxds_max;
ring->tx_max_pending = cfg->aq_hw_caps->txds_max;
}
static int aq_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring)
{
int err = 0;
bool ndev_running = false;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
const struct aq_hw_caps_s *hw_caps;
bool ndev_running = false;
struct aq_nic_cfg_s *cfg;
int err = 0;
cfg = aq_nic_get_cfg(aq_nic);
hw_caps = cfg->aq_hw_caps;
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
err = -EOPNOTSUPP;
@ -585,18 +665,18 @@ static int aq_set_ringparam(struct net_device *ndev,
aq_nic_free_vectors(aq_nic);
aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
cfg->rxds = min(cfg->rxds, hw_caps->rxds_max);
cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE);
aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
cfg->txds = min(cfg->txds, hw_caps->txds_max);
cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE);
for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs;
aq_nic->aq_vecs++) {
aq_nic->aq_vec[aq_nic->aq_vecs] =
aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg);
if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
err = -ENOMEM;
goto err_exit;
@ -609,12 +689,61 @@ static int aq_set_ringparam(struct net_device *ndev,
return err;
}
static u32 aq_get_msg_level(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
return aq_nic->msg_enable;
}
static void aq_set_msg_level(struct net_device *ndev, u32 data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
aq_nic->msg_enable = data;
}
u32 aq_ethtool_get_priv_flags(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
return aq_nic->aq_nic_cfg.priv_flags;
}
int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
u32 priv_flags;
cfg = aq_nic_get_cfg(aq_nic);
priv_flags = cfg->priv_flags;
if (flags & ~AQ_PRIV_FLAGS_MASK)
return -EOPNOTSUPP;
cfg->priv_flags = flags;
if ((priv_flags ^ flags) & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
if (netif_running(ndev)) {
dev_close(ndev);
dev_open(ndev, NULL);
}
} else if ((priv_flags ^ flags) & AQ_HW_LOOPBACK_MASK) {
aq_nic_set_loopback(aq_nic);
}
return 0;
}
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
.get_regs = aq_ethtool_get_regs,
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
.set_phys_id = aq_ethtool_set_phys_id,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
.get_wol = aq_ethtool_get_wol,
.set_wol = aq_ethtool_set_wol,
@ -630,8 +759,12 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_rxfh = aq_ethtool_set_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
.set_rxnfc = aq_ethtool_set_rxnfc,
.get_msglevel = aq_get_msg_level,
.set_msglevel = aq_set_msg_level,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
.get_priv_flags = aq_ethtool_get_priv_flags,
.set_priv_flags = aq_ethtool_set_priv_flags,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
.set_link_ksettings = aq_ethtool_set_link_ksettings,
.get_coalesce = aq_ethtool_get_coalesce,

View File

@ -12,5 +12,6 @@
#include "aq_common.h"
extern const struct ethtool_ops aq_ethtool_ops;
#define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK)
#endif /* AQ_ETHTOOL_H */

View File

@ -119,6 +119,23 @@ struct aq_stats_s {
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
#define AQ_HW_LED_BLINK 0x2U
#define AQ_HW_LED_DEFAULT 0x0U
enum aq_priv_flags {
AQ_HW_LOOPBACK_DMA_SYS,
AQ_HW_LOOPBACK_PKT_SYS,
AQ_HW_LOOPBACK_DMA_NET,
AQ_HW_LOOPBACK_PHYINT_SYS,
AQ_HW_LOOPBACK_PHYEXT_SYS,
};
#define AQ_HW_LOOPBACK_MASK (BIT(AQ_HW_LOOPBACK_DMA_SYS) |\
BIT(AQ_HW_LOOPBACK_PKT_SYS) |\
BIT(AQ_HW_LOOPBACK_DMA_NET) |\
BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\
BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
@ -137,6 +154,7 @@ struct aq_hw_s {
atomic_t dpc;
u32 mbox_addr;
u32 rpc_addr;
u32 settings_addr;
u32 rpc_tid;
struct hw_atl_utils_fw_rpc rpc;
s64 ptp_clk_offset;
@ -276,6 +294,8 @@ struct aq_hw_ops {
u64 *timestamp);
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
};
struct aq_fw_ops {
@ -304,6 +324,10 @@ struct aq_fw_ops {
int (*set_flow_control)(struct aq_hw_s *self);
int (*led_control)(struct aq_hw_s *self, u32 mode);
int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
u8 *mac);

View File

@ -59,6 +59,7 @@ u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
u64 value = aq_hw_read_reg(hw, reg);
value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32;
return value;
}

View File

@ -53,8 +53,8 @@ struct net_device *aq_ndev_alloc(void)
static int aq_ndev_open(struct net_device *ndev)
{
int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
err = aq_nic_init(aq_nic);
if (err < 0)
@ -74,19 +74,20 @@ static int aq_ndev_open(struct net_device *ndev)
err_exit:
if (err < 0)
aq_nic_deinit(aq_nic);
aq_nic_deinit(aq_nic, true);
return err;
}
static int aq_ndev_close(struct net_device *ndev)
{
int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
err = aq_nic_stop(aq_nic);
if (err < 0)
goto err_exit;
aq_nic_deinit(aq_nic);
aq_nic_deinit(aq_nic, true);
err_exit:
return err;
@ -120,7 +121,9 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
int err;
err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
goto err_exit;
@ -133,8 +136,8 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
static int aq_ndev_set_features(struct net_device *ndev,
netdev_features_t features)
{
bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
bool need_ndev_restart = false;
struct aq_nic_cfg_s *aq_cfg;

View File

@ -41,10 +41,6 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
struct aq_rss_parameters *rss_params = &cfg->aq_rss;
int i = 0;
static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
@ -52,6 +48,11 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
};
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
struct aq_rss_parameters *rss_params;
int i = 0;
rss_params = &cfg->aq_rss;
rss_params->hash_secret_key_size = sizeof(rss_key);
memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
@ -78,7 +79,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
cfg->flow_control = AQ_CFG_FC_MODE;
cfg->fc.req = AQ_CFG_FC_MODE;
cfg->wol = AQ_CFG_WOL_MODES;
cfg->mtu = AQ_CFG_MTU_DEF;
cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
@ -142,10 +144,14 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (err)
return err;
if (self->aq_fw_ops->get_flow_control)
self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
self->aq_nic_cfg.fc.cur = fc;
if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
pr_info("%s: link change old %d new %d\n",
AQ_CFG_DRV_NAME, self->link_status.mbps,
self->aq_hw->aq_link_status.mbps);
netdev_info(self->ndev, "%s: link change old %d new %d\n",
AQ_CFG_DRV_NAME, self->link_status.mbps,
self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
if (self->aq_ptp) {
@ -159,8 +165,6 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
* on any link event.
* We should query FW whether it negotiated FC.
*/
if (self->aq_fw_ops->get_flow_control)
self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
if (self->aq_hw_ops->hw_set_fc)
self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
}
@ -179,6 +183,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
netif_tx_disable(self->ndev);
aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
}
return 0;
}
@ -193,6 +198,7 @@ static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
BIT(self->aq_nic_cfg.link_irq_vec));
return IRQ_HANDLED;
}
@ -223,7 +229,8 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, service_timer);
mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
mod_timer(&self->service_timer,
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
aq_ndev_schedule_work(&self->service_task);
}
@ -302,9 +309,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_RXHASH | NETIF_F_SG |
NETIF_F_LRO | NETIF_F_TSO;
self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
@ -324,8 +333,8 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
int aq_nic_init(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
int err = 0;
unsigned int i = 0U;
int err = 0;
self->power_state = AQ_HW_POWER_STATE_D0;
mutex_lock(&self->fwreq_mutex);
@ -369,8 +378,8 @@ int aq_nic_init(struct aq_nic_s *self)
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
int err = 0;
unsigned int i = 0U;
int err = 0;
err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
@ -404,6 +413,8 @@ int aq_nic_start(struct aq_nic_s *self)
INIT_WORK(&self->service_task, aq_nic_service_task);
aq_nic_set_loopback(self);
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
@ -460,26 +471,45 @@ int aq_nic_start(struct aq_nic_s *self)
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
unsigned int ret = 0U;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int frag_count = 0U;
unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *first = NULL;
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
u8 ipver = ip_hdr(skb)->version;
struct aq_ring_buff_s *dx_buff;
bool need_context_tag = false;
unsigned int frag_count = 0U;
unsigned int ret = 0U;
unsigned int dx;
u8 l4proto = 0;
if (ipver == 4)
l4proto = ip_hdr(skb)->protocol;
else if (ipver == 6)
l4proto = ipv6_hdr(skb)->nexthdr;
dx = ring->sw_tail;
dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
if (unlikely(skb_is_gso(skb))) {
dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_gso = 1U;
if (l4proto == IPPROTO_TCP) {
dx_buff->is_gso_tcp = 1U;
dx_buff->len_l4 = tcp_hdrlen(skb);
} else if (l4proto == IPPROTO_UDP) {
dx_buff->is_gso_udp = 1U;
dx_buff->len_l4 = sizeof(struct udphdr);
/* UDP GSO Hardware does not replace packet length. */
udp_hdr(skb)->len = htons(dx_buff->mss +
dx_buff->len_l4);
} else {
WARN_ONCE(true, "Bad GSO mode");
goto exit;
}
dx_buff->len_pkt = skb->len;
dx_buff->len_l2 = ETH_HLEN;
dx_buff->len_l3 = ip_hdrlen(skb);
dx_buff->len_l4 = tcp_hdrlen(skb);
dx_buff->len_l3 = skb_network_header_len(skb);
dx_buff->eop_index = 0xffffU;
dx_buff->is_ipv6 =
(ip_hdr(skb)->version == 6) ? 1U : 0U;
dx_buff->is_ipv6 = (ipver == 6);
need_context_tag = true;
}
@ -513,24 +543,9 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
++ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
1U : 0U;
if (ip_hdr(skb)->version == 4) {
dx_buff->is_tcp_cso =
(ip_hdr(skb)->protocol == IPPROTO_TCP) ?
1U : 0U;
dx_buff->is_udp_cso =
(ip_hdr(skb)->protocol == IPPROTO_UDP) ?
1U : 0U;
} else if (ip_hdr(skb)->version == 6) {
dx_buff->is_tcp_cso =
(ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
1U : 0U;
dx_buff->is_udp_cso =
(ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
1U : 0U;
}
dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
}
for (; nr_frags--; ++frag_count) {
@ -585,7 +600,8 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
--ret, dx = aq_ring_next_dx(ring, dx)) {
dx_buff = &ring->buff_ring[dx];
if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) {
if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
!dx_buff->is_vlan && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) {
dma_unmap_single(aq_nic_get_dev(self),
dx_buff->pa,
@ -606,11 +622,11 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
int err = NETDEV_TX_OK;
unsigned int tc = 0U;
frags = skb_shinfo(skb)->nr_frags + 1;
@ -623,6 +639,11 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
aq_ring_update_queue_state(ring);
if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
/* Above status update may stop the queue. Check this. */
if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
err = NETDEV_TX_BUSY;
@ -703,6 +724,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
if (err < 0)
return err;
}
return aq_nic_set_packet_filter(self, packet_filter);
}
@ -747,10 +769,10 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
unsigned int i = 0U;
unsigned int count = 0U;
struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
if (self->aq_fw_ops->update_stats) {
mutex_lock(&self->fwreq_mutex);
@ -800,8 +822,8 @@ err_exit:;
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
struct net_device *ndev = self->ndev;
struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
struct net_device *ndev = self->ndev;
ndev->stats.rx_packets = stats->dma_pkt_rc;
ndev->stats.rx_bytes = stats->dma_oct_rc;
@ -846,9 +868,12 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
if (self->aq_nic_cfg.aq_hw_caps->flow_control)
if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
ethtool_link_ksettings_add_link_mode(cmd, supported,
Asym_Pause);
}
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
@ -882,13 +907,13 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
/* Asym is when either RX or TX, but not both */
if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
@ -971,6 +996,44 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self)
return fw_version;
}
int aq_nic_set_loopback(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
if (!self->aq_hw_ops->hw_set_loopback ||
!self->aq_fw_ops->set_phyloopback)
return -ENOTSUPP;
mutex_lock(&self->fwreq_mutex);
self->aq_hw_ops->hw_set_loopback(self->aq_hw,
AQ_HW_LOOPBACK_DMA_SYS,
!!(cfg->priv_flags &
BIT(AQ_HW_LOOPBACK_DMA_SYS)));
self->aq_hw_ops->hw_set_loopback(self->aq_hw,
AQ_HW_LOOPBACK_PKT_SYS,
!!(cfg->priv_flags &
BIT(AQ_HW_LOOPBACK_PKT_SYS)));
self->aq_hw_ops->hw_set_loopback(self->aq_hw,
AQ_HW_LOOPBACK_DMA_NET,
!!(cfg->priv_flags &
BIT(AQ_HW_LOOPBACK_DMA_NET)));
self->aq_fw_ops->set_phyloopback(self->aq_hw,
AQ_HW_LOOPBACK_PHYINT_SYS,
!!(cfg->priv_flags &
BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));
self->aq_fw_ops->set_phyloopback(self->aq_hw,
AQ_HW_LOOPBACK_PHYEXT_SYS,
!!(cfg->priv_flags &
BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
mutex_unlock(&self->fwreq_mutex);
return 0;
}
int aq_nic_stop(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
@ -1000,7 +1063,20 @@ int aq_nic_stop(struct aq_nic_s *self)
return self->aq_hw_ops->hw_stop(self->aq_hw);
}
void aq_nic_deinit(struct aq_nic_s *self)
void aq_nic_set_power(struct aq_nic_s *self)
{
if (self->power_state != AQ_HW_POWER_STATE_D0 ||
self->aq_hw->aq_nic_cfg->wol)
if (likely(self->aq_fw_ops->set_power)) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->set_power(self->aq_hw,
self->power_state,
self->ndev->dev_addr);
mutex_unlock(&self->fwreq_mutex);
}
}
void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
{
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
@ -1017,23 +1093,12 @@ void aq_nic_deinit(struct aq_nic_s *self)
aq_ptp_ring_free(self);
aq_ptp_free(self);
if (likely(self->aq_fw_ops->deinit)) {
if (likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
}
if (self->power_state != AQ_HW_POWER_STATE_D0 ||
self->aq_hw->aq_nic_cfg->wol)
if (likely(self->aq_fw_ops->set_power)) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->set_power(self->aq_hw,
self->power_state,
self->ndev->dev_addr);
mutex_unlock(&self->fwreq_mutex);
}
err_exit:;
}
@ -1054,44 +1119,6 @@ void aq_nic_free_vectors(struct aq_nic_s *self)
err_exit:;
}
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
{
int err = 0;
if (!netif_running(self->ndev)) {
err = 0;
goto out;
}
rtnl_lock();
if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
self->power_state = AQ_HW_POWER_STATE_D3;
netif_device_detach(self->ndev);
netif_tx_stop_all_queues(self->ndev);
err = aq_nic_stop(self);
if (err < 0)
goto err_exit;
aq_nic_deinit(self);
} else {
err = aq_nic_init(self);
if (err < 0)
goto err_exit;
err = aq_nic_start(self);
if (err < 0)
goto err_exit;
netif_device_attach(self->ndev);
netif_tx_start_all_queues(self->ndev);
}
err_exit:
rtnl_unlock();
out:
return err;
}
void aq_nic_shutdown(struct aq_nic_s *self)
{
int err = 0;
@ -1108,7 +1135,8 @@ void aq_nic_shutdown(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
}
aq_nic_deinit(self);
aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
aq_nic_set_power(self);
err_exit:
rtnl_unlock();

View File

@ -20,6 +20,18 @@ struct aq_vec_s;
struct aq_ptp_s;
enum aq_rx_filter_type;
enum aq_fc_mode {
AQ_NIC_FC_OFF = 0,
AQ_NIC_FC_TX,
AQ_NIC_FC_RX,
AQ_NIC_FC_FULL,
};
struct aq_fc_info {
enum aq_fc_mode req;
enum aq_fc_mode cur;
};
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
u64 features;
@ -34,7 +46,7 @@ struct aq_nic_cfg_s {
u32 rxpageorder;
u32 num_rss_queues;
u32 mtu;
u32 flow_control;
struct aq_fc_info fc;
u32 link_speed_msk;
u32 wol;
u8 is_vlan_rx_strip;
@ -46,6 +58,7 @@ struct aq_nic_cfg_s {
bool is_polling;
bool is_rss;
bool is_lro;
u32 priv_flags;
u8 tcs;
struct aq_rss_parameters aq_rss;
u32 eee_speeds;
@ -60,7 +73,8 @@ struct aq_nic_cfg_s {
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
#define AQ_NIC_WOL_ENABLED BIT(0)
#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
WAKE_PHY)
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
@ -70,8 +84,8 @@ struct aq_hw_rx_fl2 {
};
struct aq_hw_rx_fl3l4 {
u8 active_ipv4;
u8 active_ipv6:2;
u8 active_ipv4;
u8 active_ipv6:2;
u8 is_ipv6;
u8 reserved_count;
};
@ -87,6 +101,7 @@ struct aq_hw_rx_fltrs_s {
struct aq_nic_s {
atomic_t flags;
u32 msg_enable;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
struct aq_hw_s *aq_hw;
@ -141,7 +156,8 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
void aq_nic_deinit(struct aq_nic_s *self);
void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
void aq_nic_set_power(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
void aq_nic_free_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
@ -155,7 +171,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
const struct ethtool_link_ksettings *cmd);
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
int aq_nic_set_loopback(struct aq_nic_s *self);
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
void aq_nic_shutdown(struct aq_nic_s *self);
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);

View File

@ -185,6 +185,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
return AQ_HW_IRQ_MSIX;
if (self->pdev->msi_enabled)
return AQ_HW_IRQ_MSI;
return AQ_HW_IRQ_LEGACY;
}
@ -196,12 +197,12 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
struct aq_nic_s *self;
int err;
struct net_device *ndev;
resource_size_t mmio_pa;
u32 bar;
struct aq_nic_s *self;
u32 numvecs;
u32 bar;
int err;
err = pci_enable_device(pdev);
if (err)
@ -311,6 +312,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
err_pci_func:
pci_disable_device(pdev);
return err;
}
@ -347,29 +349,98 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
static int aq_suspend_common(struct device *dev, bool deep)
{
struct aq_nic_s *self = pci_get_drvdata(pdev);
struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
return aq_nic_change_pm_state(self, &pm_msg);
rtnl_lock();
nic->power_state = AQ_HW_POWER_STATE_D3;
netif_device_detach(nic->ndev);
netif_tx_stop_all_queues(nic->ndev);
aq_nic_stop(nic);
if (deep) {
aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
aq_nic_set_power(nic);
}
rtnl_unlock();
return 0;
}
static int aq_pci_resume(struct pci_dev *pdev)
static int atl_resume_common(struct device *dev, bool deep)
{
struct aq_nic_s *self = pci_get_drvdata(pdev);
pm_message_t pm_msg = PMSG_RESTORE;
struct pci_dev *pdev = to_pci_dev(dev);
struct aq_nic_s *nic;
int ret;
return aq_nic_change_pm_state(self, &pm_msg);
nic = pci_get_drvdata(pdev);
rtnl_lock();
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (deep) {
ret = aq_nic_init(nic);
if (ret)
goto err_exit;
}
ret = aq_nic_start(nic);
if (ret)
goto err_exit;
netif_device_attach(nic->ndev);
netif_tx_start_all_queues(nic->ndev);
err_exit:
rtnl_unlock();
return ret;
}
static int aq_pm_freeze(struct device *dev)
{
return aq_suspend_common(dev, false);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
return aq_suspend_common(dev, true);
}
static int aq_pm_thaw(struct device *dev)
{
return atl_resume_common(dev, false);
}
static int aq_pm_resume_restore(struct device *dev)
{
return atl_resume_common(dev, true);
}
const struct dev_pm_ops aq_pm_ops = {
.suspend = aq_pm_suspend_poweroff,
.poweroff = aq_pm_suspend_poweroff,
.freeze = aq_pm_freeze,
.resume = aq_pm_resume_restore,
.restore = aq_pm_resume_restore,
.thaw = aq_pm_thaw,
};
static struct pci_driver aq_pci_ops = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
.remove = aq_pci_remove,
.suspend = aq_pci_suspend,
.resume = aq_pci_resume,
.shutdown = aq_pci_shutdown,
#ifdef CONFIG_PM
.driver.pm = &aq_pm_ops,
#endif
};
int aq_pci_func_register_driver(void)

View File

@ -1057,7 +1057,7 @@ static struct ptp_clock_info aq_ptp_clock = {
ptp_offset[__idx].ingress = (__ingress); } \
while (0)
static void aq_ptp_offset_init_from_fw(const struct hw_aq_ptp_offset *offsets)
static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets)
{
int i;
@ -1098,7 +1098,7 @@ static void aq_ptp_offset_init_from_fw(const struct hw_aq_ptp_offset *offsets)
}
}
static void aq_ptp_offset_init(const struct hw_aq_ptp_offset *offsets)
static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets)
{
memset(ptp_offset, 0, sizeof(ptp_offset));
@ -1106,7 +1106,7 @@ static void aq_ptp_offset_init(const struct hw_aq_ptp_offset *offsets)
}
static void aq_ptp_gpio_init(struct ptp_clock_info *info,
struct hw_aq_info *hw_info)
struct hw_atl_info *hw_info)
{
struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
u32 extts_pin_cnt = 0;

View File

@ -30,8 +30,8 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
struct device *dev)
{
struct page *page;
dma_addr_t daddr;
int ret = -ENOMEM;
dma_addr_t daddr;
page = dev_alloc_pages(order);
if (unlikely(!page))
@ -118,6 +118,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
aq_ring_free(self);
self = NULL;
}
return self;
}
@ -144,6 +145,7 @@ struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
aq_ring_free(self);
self = NULL;
}
return self;
}
@ -175,6 +177,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
aq_ring_free(self);
self = NULL;
}
return self;
}
@ -207,6 +210,7 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
return 0;
}

View File

@ -65,19 +65,20 @@ struct __packed aq_ring_buff_s {
};
union {
struct {
u16 len;
u32 len:16;
u32 is_ip_cso:1;
u32 is_udp_cso:1;
u32 is_tcp_cso:1;
u32 is_cso_err:1;
u32 is_sop:1;
u32 is_eop:1;
u32 is_gso:1;
u32 is_gso_tcp:1;
u32 is_gso_udp:1;
u32 is_mapped:1;
u32 is_cleaned:1;
u32 is_error:1;
u32 is_vlan:1;
u32 rsvd3:5;
u32 rsvd3:4;
u16 eop_index;
u16 rsvd4;
};

View File

@ -103,8 +103,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
struct aq_vec_s *self = NULL;
struct aq_ring_s *ring = NULL;
struct aq_vec_s *self = NULL;
unsigned int i = 0U;
int err = 0;
@ -159,6 +159,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
aq_vec_free(self);
self = NULL;
}
return self;
}
@ -263,6 +264,7 @@ void aq_vec_deinit(struct aq_vec_s *self)
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
err_exit:;
}
@ -305,8 +307,8 @@ irqreturn_t aq_vec_isr(int irq, void *private)
irqreturn_t aq_vec_isr_legacy(int irq, void *private)
{
struct aq_vec_s *self = private;
irqreturn_t err = 0;
u64 irq_mask = 0U;
int err;
if (!self)
return IRQ_NONE;
@ -361,9 +363,9 @@ void aq_vec_add_stats(struct aq_vec_s *self,
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
{
unsigned int count = 0U;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
unsigned int count = 0U;
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));

View File

@ -119,10 +119,10 @@ static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
bool is_rx_flow_control = false;
unsigned int i_priority = 0U;
u32 buff_size = 0U;
u32 tc = 0U;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@ -155,7 +155,7 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->fc.req);
buff_size = HW_ATL_A0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@ -180,9 +180,9 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
unsigned int i = 0U;
int err = 0;
u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
@ -207,12 +207,12 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
u8 *indirection_table = rss_params->indirection_table;
u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
int err = 0;
u8 *indirection_table = rss_params->indirection_table;
u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX *
HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
int err = 0;
u32 i = 0U;
u32 val;
memset(bitary, 0, sizeof(bitary));
@ -321,9 +321,9 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
int err = 0;
if (!mac_addr) {
err = -EINVAL;
@ -352,10 +352,9 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
int err = 0;
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
@ -404,6 +403,7 @@ static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
@ -411,6 +411,7 @@ static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
@ -418,6 +419,7 @@ static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
@ -425,6 +427,7 @@ static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@ -435,8 +438,8 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
unsigned int pkt_len = 0U;
bool is_gso = false;
buff = &ring->buff_ring[ring->sw_tail];
@ -451,7 +454,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
if (buff->is_gso) {
if (buff->is_gso_tcp) {
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) |
HW_ATL_A0_TXD_CTL_CMD_TCP |
@ -500,6 +503,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
}
hw_atl_a0_hw_tx_ring_tail_update(self, ring);
return aq_hw_err_from_flags(self);
}
@ -507,8 +511,8 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
@ -549,8 +553,8 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
@ -599,8 +603,8 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
int err = 0;
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
@ -720,6 +724,7 @@ static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
return aq_hw_err_from_flags(self);
}
@ -737,6 +742,7 @@ static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@ -859,6 +865,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
{
hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
return aq_hw_err_from_flags(self);
}
@ -866,6 +873,7 @@ static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
@ -873,6 +881,7 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}

View File

@ -43,7 +43,9 @@
NETIF_F_NTUPLE | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX, \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_GSO_UDP_L4 | \
NETIF_F_GSO_PARTIAL, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@ -107,14 +109,15 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
{
hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
return 0;
}
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
u32 buff_size = 0U;
u32 tc = 0U;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@ -167,7 +170,7 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(1024U / 32U) * 50U) /
100U, tc);
hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
/* Init TC2 for PTP_RX */
tc = 2;
@ -188,9 +191,9 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
unsigned int i = 0U;
int err = 0;
u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
@ -215,12 +218,12 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
u8 *indirection_table = rss_params->indirection_table;
u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
int err = 0;
u8 *indirection_table = rss_params->indirection_table;
u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
int err = 0;
u32 i = 0U;
u32 val;
memset(bitary, 0, sizeof(bitary));
@ -304,6 +307,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_itr_rsc_delay_set(self, 1U);
}
return aq_hw_err_from_flags(self);
}
@ -382,9 +386,9 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
int err = 0;
if (!mac_addr) {
err = -EINVAL;
@ -413,11 +417,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
int err = 0;
u32 val;
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
@ -460,8 +463,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
/* Interrupts */
hw_atl_reg_gen_irq_map_set(self,
((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
((HW_ATL_B0_ERR_INT << 0x18) |
(1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) |
(1U << 0x17)), 0U);
/* Enable link interrupt */
if (aq_nic_cfg->link_irq_vec)
@ -478,6 +483,7 @@ static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
@ -485,6 +491,7 @@ static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
@ -492,6 +499,7 @@ static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
@ -499,6 +507,7 @@ static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@ -509,8 +518,8 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
unsigned int pkt_len = 0U;
bool is_vlan = false;
bool is_gso = false;
@ -526,8 +535,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
if (buff->is_gso) {
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
if (buff->is_gso_tcp || buff->is_gso_udp) {
if (buff->is_gso_tcp)
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24);
@ -547,7 +557,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
txd->ctl |= buff->vlan_tx_tag << 4;
is_vlan = true;
}
if (!buff->is_gso && !buff->is_vlan) {
if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) {
buff_pa_len = buff->len;
txd->buf_addr = buff->pa;
@ -586,6 +596,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
}
hw_atl_b0_hw_tx_ring_tail_update(self, ring);
return aq_hw_err_from_flags(self);
}
@ -593,9 +604,9 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
@ -636,8 +647,8 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
@ -726,8 +737,10 @@ static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
unsigned int hw_head_;
int err = 0;
unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
@ -843,6 +856,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
@ -852,12 +866,14 @@ static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@ -866,8 +882,8 @@ static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
unsigned int i = 0U;
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int i = 0U;
hw_atl_rpfl2promiscuous_mode_en_set(self,
IS_FILTER_ENABLED(IFF_PROMISC));
@ -905,29 +921,30 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u32 count)
{
int err = 0;
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
err = -EBADRQC;
goto err_exit;
}
for (self->aq_nic_cfg->mc_list_count = 0U;
self->aq_nic_cfg->mc_list_count < count;
++self->aq_nic_cfg->mc_list_count) {
u32 i = self->aq_nic_cfg->mc_list_count;
for (cfg->mc_list_count = 0U;
cfg->mc_list_count < count;
++cfg->mc_list_count) {
u32 i = cfg->mc_list_count;
u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
hw_atl_rpfl2unicast_dest_addresslsw_set(self,
l, HW_ATL_B0_MAC_MIN + i);
hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
HW_ATL_B0_MAC_MIN + i);
hw_atl_rpfl2unicast_dest_addressmsw_set(self,
h, HW_ATL_B0_MAC_MIN + i);
hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
HW_ATL_B0_MAC_MIN + i);
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled),
(cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
@ -1054,6 +1071,7 @@ static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
@ -1061,6 +1079,7 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
@ -1427,6 +1446,31 @@ static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
{
switch (mode) {
case AQ_HW_LOOPBACK_DMA_SYS:
hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
hw_atl_rpb_dma_sys_lbk_set(self, enable);
break;
case AQ_HW_LOOPBACK_PKT_SYS:
hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
break;
case AQ_HW_LOOPBACK_DMA_NET:
hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
hw_atl_rpb_dma_net_lbk_set(self, enable);
break;
default:
return -EINVAL;
}
return 0;
}
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@ -1481,5 +1525,9 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.rx_extract_ts = hw_atl_b0_rx_extract_ts,
.extract_hwts = hw_atl_b0_extract_hwts,
.hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_fc = hw_atl_b0_set_fc,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
.hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_loopback = hw_atl_b0_set_loopback,
.hw_set_fc = hw_atl_b0_set_fc,
};

View File

@ -563,6 +563,13 @@ void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
}
void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_NET_LBK_ADR,
HW_ATL_RPB_DMA_NET_LBK_MSK,
HW_ATL_RPB_DMA_NET_LBK_SHIFT, dma_net_lbk);
}
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode)
{
@ -1341,7 +1348,26 @@ void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_
tx_dma_sys_lbk_en);
}
void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
u32 tx_dma_net_lbk_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_NET_LBK_ADR,
HW_ATL_TPB_DMA_NET_LBK_MSK,
HW_ATL_TPB_DMA_NET_LBK_SHIFT,
tx_dma_net_lbk_en);
}
void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
u32 tx_clk_gate_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_CLK_GATE_EN_ADR,
HW_ATL_TPB_TX_CLK_GATE_EN_MSK,
HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT,
tx_clk_gate_en);
}
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),

View File

@ -288,6 +288,9 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
/* set dma system loopback */
void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
/* set dma network loopback */
void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk);
/* set rx traffic class mode */
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode);
@ -629,6 +632,14 @@ void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
/* set tx dma system loopback enable */
void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
/* set tx dma network loopback enable */
void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
u32 tx_dma_net_lbk_en);
/* set tx clock gating enable */
void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
u32 tx_clk_gate_en);
/* set tx packet buffer size (per tc) */
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_buff_size_per_tc,

View File

@ -554,6 +554,24 @@
/* default value of bitfield dma_sys_loopback */
#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
/* rx dma_net_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_net_loopback".
* port="pif_rpb_dma_net_lbk_i"
*/
/* register address for bitfield dma_net_loopback */
#define HW_ATL_RPB_DMA_NET_LBK_ADR 0x00005000
/* bitmask for bitfield dma_net_loopback */
#define HW_ATL_RPB_DMA_NET_LBK_MSK 0x00000010
/* inverted bitmask for bitfield dma_net_loopback */
#define HW_ATL_RPB_DMA_NET_LBK_MSKN 0xffffffef
/* lower bit position of bitfield dma_net_loopback */
#define HW_ATL_RPB_DMA_NET_LBK_SHIFT 4
/* width of bitfield dma_net_loopback */
#define HW_ATL_RPB_DMA_NET_LBK_WIDTH 1
/* default value of bitfield dma_net_loopback */
#define HW_ATL_RPB_DMA_NET_LBK_DEFAULT 0x0
/* rx rx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "rx_tc_mode".
* port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
@ -2107,6 +2125,24 @@
/* default value of bitfield dma_sys_loopback */
#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
/* tx dma_net_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_net_loopback".
* port="pif_tpb_dma_net_lbk_i"
*/
/* register address for bitfield dma_net_loopback */
#define HW_ATL_TPB_DMA_NET_LBK_ADR 0x00007000
/* bitmask for bitfield dma_net_loopback */
#define HW_ATL_TPB_DMA_NET_LBK_MSK 0x00000010
/* inverted bitmask for bitfield dma_net_loopback */
#define HW_ATL_TPB_DMA_NET_LBK_MSKN 0xffffffef
/* lower bit position of bitfield dma_net_loopback */
#define HW_ATL_TPB_DMA_NET_LBK_SHIFT 4
/* width of bitfield dma_net_loopback */
#define HW_ATL_TPB_DMA_NET_LBK_WIDTH 1
/* default value of bitfield dma_net_loopback */
#define HW_ATL_TPB_DMA_NET_LBK_DEFAULT 0x0
/* tx tx{b}_buf_size[7:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
@ -2144,6 +2180,24 @@
/* default value of bitfield tx_scp_ins_en */
#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
/* tx tx_clk_gate_en bitfield definitions
* preprocessor definitions for the bitfield "tx_clk_gate_en".
* port="pif_tpb_clk_gate_en_i"
*/
/* register address for bitfield tx_clk_gate_en */
#define HW_ATL_TPB_TX_CLK_GATE_EN_ADR 0x00007900
/* bitmask for bitfield tx_clk_gate_en */
#define HW_ATL_TPB_TX_CLK_GATE_EN_MSK 0x00000010
/* inverted bitmask for bitfield tx_clk_gate_en */
#define HW_ATL_TPB_TX_CLK_GATE_EN_MSKN 0xffffffef
/* lower bit position of bitfield tx_clk_gate_en */
#define HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT 4
/* width of bitfield tx_clk_gate_en */
#define HW_ATL_TPB_TX_CLK_GATE_EN_WIDTH 1
/* default value of bitfield tx_clk_gate_en */
#define HW_ATL_TPB_TX_CLK_GATE_EN_DEFAULT 0x1
/* tx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_tpo_ipv4_chk_en_i"

View File

@ -47,6 +47,11 @@
#define FORCE_FLASHLESS 0
enum mcp_area {
MCP_AREA_CONFIG = 0x80000000,
MCP_AREA_SETTINGS = 0x20000000,
};
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
@ -87,6 +92,7 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
}
self->aq_fw_ops = *fw_ops;
err = self->aq_fw_ops->init(self);
return err;
}
@ -237,9 +243,9 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
int hw_atl_utils_soft_reset(struct aq_hw_s *self)
{
int k;
u32 boot_exit_code = 0;
u32 val;
int k;
for (k = 0; k < 1000; ++k) {
u32 flb_status = aq_hw_read_reg(self,
@ -327,10 +333,75 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
return err;
}
int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt)
static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr,
u32 *p, u32 cnt, enum mcp_area area)
{
u32 val;
u32 data_offset = 0;
u32 offset = addr;
int err = 0;
u32 val;
switch (area) {
case MCP_AREA_CONFIG:
offset -= self->rpc_addr;
break;
case MCP_AREA_SETTINGS:
offset -= self->settings_addr;
break;
}
offset = offset / sizeof(u32);
for (; data_offset < cnt; ++data_offset, ++offset) {
aq_hw_write_reg(self, 0x328, p[data_offset]);
aq_hw_write_reg(self, 0x32C,
(area | (0xFFFF & (offset * 4))));
hw_atl_mcp_up_force_intr_set(self, 1);
/* 1000 times by 10us = 10ms */
err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
self, val,
(val & 0xF0000000) !=
area,
10U, 10000U);
if (err < 0)
break;
}
return err;
}
static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr,
u32 *p, u32 cnt)
{
u32 offset = 0;
int err = 0;
u32 val;
aq_hw_write_reg(self, 0x208, addr);
for (; offset < cnt; ++offset) {
aq_hw_write_reg(self, 0x20C, p[offset]);
aq_hw_write_reg(self, 0x200, 0xC000);
err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
self, val,
(val & 0x100) == 0U,
10U, 10000U);
if (err < 0)
break;
}
return err;
}
static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
u32 cnt, enum mcp_area area)
{
int err = 0;
u32 val;
err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
val, val == 1U,
@ -338,54 +409,47 @@ int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt)
if (err < 0)
goto err_exit;
if (IS_CHIP_FEATURE(REVISION_B1)) {
u32 offset = 0;
for (; offset < cnt; ++offset) {
aq_hw_write_reg(self, 0x328, p[offset]);
aq_hw_write_reg(self, 0x32C,
(0x80000000 | (0xFFFF & (offset * 4))));
hw_atl_mcp_up_force_intr_set(self, 1);
/* 1000 times by 10us = 10ms */
err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
self, val,
(val & 0xF0000000) !=
0x80000000,
10U, 10000U);
}
} else {
u32 offset = 0;
aq_hw_write_reg(self, 0x208, a);
for (; offset < cnt; ++offset) {
aq_hw_write_reg(self, 0x20C, p[offset]);
aq_hw_write_reg(self, 0x200, 0xC000);
err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
self, val,
(val & 0x100) == 0,
1000U, 10000U);
}
}
if (IS_CHIP_FEATURE(REVISION_B1))
err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
else
err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
if (err < 0)
goto err_exit;
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt)
{
return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p,
cnt, MCP_AREA_CONFIG);
}
int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
u32 cnt)
{
return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset,
p, cnt, MCP_AREA_SETTINGS);
}
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
{
int err = 0;
const u32 dw_major_mask = 0xff000000U;
const u32 dw_minor_mask = 0x00ffffffU;
int err = 0;
err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
if (err < 0)
goto err_exit;
err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
-EOPNOTSUPP : 0;
err_exit:
return err;
}
@ -430,17 +494,16 @@ struct aq_hw_atl_utils_fw_rpc_tid_s {
int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
{
int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
int err = 0;
if (!IS_CHIP_FEATURE(MIPS)) {
err = -1;
goto err_exit;
}
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
(u32 *)(void *)&self->rpc,
(rpc_size + sizeof(u32) -
sizeof(u8)) / sizeof(u32));
err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc,
(rpc_size + sizeof(u32) -
sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
@ -455,9 +518,9 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
struct hw_atl_utils_fw_rpc **rpc)
{
int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
struct aq_hw_atl_utils_fw_rpc_tid_s fw;
int err = 0;
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
@ -561,10 +624,10 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
int err = 0;
u32 transaction_id = 0;
struct hw_atl_utils_mbox_header mbox;
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
struct hw_atl_utils_mbox_header mbox;
u32 transaction_id = 0;
int err = 0;
if (state == MPI_RESET) {
hw_atl_utils_mpi_read_mbox(self, &mbox);
@ -592,20 +655,26 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
val |= state & HW_ATL_MPI_STATE_MSK;
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
err_exit:
return err;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
u32 cp0x036C = hw_atl_utils_mpi_get_state(self);
u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
u32 mpi_state;
u32 speed;
if (!link_speed_mask) {
mpi_state = hw_atl_utils_mpi_get_state(self);
speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
FW2X_RATE_2G5 | FW2X_RATE_5G |
FW2X_RATE_10G);
if (!speed) {
link_status->mbps = 0U;
} else {
switch (link_speed_mask) {
switch (speed) {
case HAL_ATLANTIC_RATE_10G:
link_status->mbps = 10000U;
break;
@ -638,14 +707,15 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
u8 *mac)
{
u32 mac_addr[2];
u32 efuse_addr;
int err = 0;
u32 h = 0U;
u32 l = 0U;
u32 mac_addr[2];
if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
unsigned int rnd = 0;
unsigned int ucp_0x370 = 0;
unsigned int rnd = 0;
get_random_bytes(&rnd, sizeof(unsigned int));
@ -653,11 +723,10 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
err = hw_atl_utils_fw_downld_dwords(self,
aq_hw_read_reg(self, 0x00000374U) +
(40U * 4U),
mac_addr,
ARRAY_SIZE(mac_addr));
efuse_addr = aq_hw_read_reg(self, 0x00000374U);
err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U),
mac_addr, ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
@ -719,14 +788,15 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
default:
break;
}
return ret;
}
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
u32 chip_features = 0U;
u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
u32 chip_features = 0U;
if ((0xFU & mif_rev) == 1U) {
chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
@ -753,13 +823,14 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
{
hw_atl_utils_mpi_set_speed(self, 0);
hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
return 0;
}
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
struct hw_atl_utils_mbox mbox;
struct aq_stats_s *cs = &self->curr_stats;
struct hw_atl_utils_mbox mbox;
hw_atl_utils_mpi_read_stats(self, &mbox);
@ -836,16 +907,19 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
regs_buff[i] = aq_hw_read_reg(self,
hw_atl_utils_hw_mac_regs[i]);
return 0;
}
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
{
*fw_version = aq_hw_read_reg(self, 0x18U);
return 0;
}
static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
@ -858,22 +932,26 @@ static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
memset(prpc, 0, sizeof(*prpc));
if (wol_enabled) {
rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol);
rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) +
sizeof(prpc->msg_wol_add);
prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
prpc->msg_wol.priority =
prpc->msg_wol_add.priority =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
prpc->msg_wol.pattern_id =
prpc->msg_wol_add.pattern_id =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
prpc->msg_wol.wol_packet_type =
prpc->msg_wol_add.packet_type =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
ether_addr_copy((u8 *)&prpc->msg_wol.wol_pattern, mac);
ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern,
mac);
} else {
rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id);
rpc_size = sizeof(prpc->msg_wol_remove) +
offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove);
prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
prpc->msg_wol.pattern_id =
prpc->msg_wol_add.pattern_id =
HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
}
@ -890,8 +968,8 @@ static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
unsigned int rpc_size = 0U;
int err = 0;
if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
err = aq_fw1x_set_wol(self, 1, mac);
if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
err = aq_fw1x_set_wake_magic(self, 1, mac);
if (err < 0)
goto err_exit;
@ -965,4 +1043,5 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_flow_control = NULL,
.send_fw_request = NULL,
.enable_ptp = NULL,
.led_control = NULL,
};

View File

@ -70,113 +70,50 @@ struct __packed hw_atl_stats_s {
u32 dpc;
};
union __packed ip_addr {
struct {
u8 addr[16];
} v6;
struct {
u8 padding[12];
u8 addr[4];
} v4;
};
struct __packed drv_msg_enable_wakeup {
union {
u32 pattern_mask;
struct __packed hw_atl_utils_fw_rpc {
u32 msg_id;
struct {
u32 reason_arp_v4_pkt : 1;
u32 reason_ipv4_ping_pkt : 1;
u32 reason_ipv6_ns_pkt : 1;
u32 reason_ipv6_ping_pkt : 1;
u32 reason_link_up : 1;
u32 reason_link_down : 1;
u32 reason_maximum : 1;
};
};
union {
struct {
u32 pong;
} msg_ping;
struct {
u8 mac_addr[6];
u32 ip_addr_cnt;
struct {
union ip_addr addr;
union ip_addr mask;
} ip[1];
} msg_arp;
struct {
u32 len;
u8 packet[1514U];
} msg_inject;
struct {
u32 priority;
u32 wol_packet_type;
u32 pattern_id;
u32 next_wol_pattern_offset;
union {
struct {
u32 flags;
u8 ipv4_source_address[4];
u8 ipv4_dest_address[4];
u16 tcp_source_port_number;
u16 tcp_dest_port_number;
} ipv4_tcp_syn_parameters;
struct {
u32 flags;
u8 ipv6_source_address[16];
u8 ipv6_dest_address[16];
u16 tcp_source_port_number;
u16 tcp_dest_port_number;
} ipv6_tcp_syn_parameters;
struct {
u32 flags;
} eapol_request_id_message_parameters;
struct {
u32 flags;
u32 mask_offset;
u32 mask_size;
u32 pattern_offset;
u32 pattern_size;
} wol_bit_map_pattern;
struct {
u8 mac_addr[ETH_ALEN];
} wol_magic_packet_patter;
} wol_pattern;
} msg_wol;
struct {
union {
u32 pattern_mask;
struct {
u32 reason_arp_v4_pkt : 1;
u32 reason_ipv4_ping_pkt : 1;
u32 reason_ipv6_ns_pkt : 1;
u32 reason_ipv6_ping_pkt : 1;
u32 reason_link_up : 1;
u32 reason_link_down : 1;
u32 reason_maximum : 1;
};
};
union {
u32 offload_mask;
};
} msg_enable_wakeup;
struct {
u32 id;
} msg_del_id;
u32 offload_mask;
};
};
struct __packed magic_packet_pattern_s {
u8 mac_addr[ETH_ALEN];
};
struct __packed drv_msg_wol_add {
u32 priority;
u32 packet_type;
u32 pattern_id;
u32 next_pattern_offset;
struct magic_packet_pattern_s magic_packet_pattern;
};
struct __packed drv_msg_wol_remove {
u32 id;
};
struct __packed hw_atl_utils_mbox_header {
u32 version;
u32 transaction_id;
u32 error;
};
struct __packed hw_aq_ptp_offset {
struct __packed hw_atl_ptp_offset {
u16 ingress_100;
u16 egress_100;
u16 ingress_1000;
@ -189,6 +126,13 @@ struct __packed hw_aq_ptp_offset {
u16 egress_10000;
};
struct __packed hw_atl_cable_diag {
u8 fault;
u8 distance;
u8 far_distance;
u8 reserved;
};
enum gpio_pin_function {
GPIO_PIN_FUNCTION_NC,
GPIO_PIN_FUNCTION_VAUX_ENABLE,
@ -204,14 +148,14 @@ enum gpio_pin_function {
GPIO_PIN_FUNCTION_SIZE
};
struct __packed hw_aq_info {
struct __packed hw_atl_info {
u8 reserved[6];
u16 phy_fault_code;
u16 phy_temperature;
u8 cable_len;
u8 reserved1;
u32 cable_diag_data[4];
struct hw_aq_ptp_offset ptp_offset;
struct hw_atl_cable_diag cable_diag_data[4];
struct hw_atl_ptp_offset ptp_offset;
u8 reserved2[12];
u32 caps_lo;
u32 caps_hi;
@ -233,28 +177,25 @@ struct __packed hw_aq_info {
struct __packed hw_atl_utils_mbox {
struct hw_atl_utils_mbox_header header;
struct hw_atl_stats_s stats;
struct hw_aq_info info;
struct hw_atl_info info;
};
/* fw2x */
typedef u32 fw_offset_t;
struct __packed offload_ip_info {
u8 v4_local_addr_count;
u8 v4_addr_count;
u8 v6_local_addr_count;
u8 v6_addr_count;
fw_offset_t v4_addr;
fw_offset_t v4_prefix;
fw_offset_t v6_addr;
fw_offset_t v6_prefix;
u32 v4_addr;
u32 v4_prefix;
u32 v6_addr;
u32 v6_prefix;
};
struct __packed offload_port_info {
u16 udp_port_count;
u16 tcp_port_count;
fw_offset_t udp_port;
fw_offset_t tcp_port;
u32 udp_port;
u32 tcp_port;
};
struct __packed offload_ka_info {
@ -262,15 +203,15 @@ struct __packed offload_ka_info {
u16 v6_ka_count;
u32 retry_count;
u32 retry_interval;
fw_offset_t v4_ka;
fw_offset_t v6_ka;
u32 v4_ka;
u32 v6_ka;
};
struct __packed offload_rr_info {
u32 rr_count;
u32 rr_buf_len;
fw_offset_t rr_id_x;
fw_offset_t rr_buf;
u32 rr_id_x;
u32 rr_buf;
};
struct __packed offload_info {
@ -287,6 +228,19 @@ struct __packed offload_info {
u8 buf[0];
};
struct __packed hw_atl_utils_fw_rpc {
u32 msg_id;
union {
/* fw1x structures */
struct drv_msg_wol_add msg_wol_add;
struct drv_msg_wol_remove msg_wol_remove;
struct drv_msg_enable_wakeup msg_enable_wakeup;
/* fw2x structures */
struct offload_info fw2x_offloads;
};
};
/* Mailbox FW Request interface */
struct __packed hw_fw_request_ptp_gpio_ctrl {
u32 index;
@ -323,9 +277,54 @@ struct __packed hw_fw_request_iface {
};
};
struct __packed hw_atl_utils_settings {
u32 mtu;
u32 downshift_retry_count;
u32 link_pause_frame_quanta_100m;
u32 link_pause_frame_threshold_100m;
u32 link_pause_frame_quanta_1g;
u32 link_pause_frame_threshold_1g;
u32 link_pause_frame_quanta_2p5g;
u32 link_pause_frame_threshold_2p5g;
u32 link_pause_frame_quanta_5g;
u32 link_pause_frame_threshold_5g;
u32 link_pause_frame_quanta_10g;
u32 link_pause_frame_threshold_10g;
u32 pfc_quanta_class_0;
u32 pfc_threshold_class_0;
u32 pfc_quanta_class_1;
u32 pfc_threshold_class_1;
u32 pfc_quanta_class_2;
u32 pfc_threshold_class_2;
u32 pfc_quanta_class_3;
u32 pfc_threshold_class_3;
u32 pfc_quanta_class_4;
u32 pfc_threshold_class_4;
u32 pfc_quanta_class_5;
u32 pfc_threshold_class_5;
u32 pfc_quanta_class_6;
u32 pfc_threshold_class_6;
u32 pfc_quanta_class_7;
u32 pfc_threshold_class_7;
u32 eee_link_down_timeout;
u32 eee_link_up_timeout;
u32 eee_max_link_drops;
u32 eee_rates_mask;
u32 wake_timer;
u32 thermal_shutdown_off_temp;
u32 thermal_shutdown_warning_temp;
u32 thermal_shutdown_cold_temp;
u32 msm_options;
u32 dac_cable_serdes_modes;
u32 media_detect;
};
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
HW_ATL_RX_MNGMNT,
HW_ATL_RX_HOST_AND_MNGMNT,
HW_ATL_RX_WOL
};
struct aq_rx_filter_vlan {
@ -407,20 +406,12 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
#define HAL_ATLANTIC_UTILS_FW_MSG_PING 0x1U
#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 0x2U
#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 0x3U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U
#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U
#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U
#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 0x7U
#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 0x8U
#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 0x9U
#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 0xAU
#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 0xDU
enum hw_atl_fw2x_rate {
FW2X_RATE_100M = 0x20,
@ -605,7 +596,10 @@ struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt);
int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt);
int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt);
int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
u32 cnt);
int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac);

View File

@ -17,6 +17,7 @@
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#define HW_ATL_FW2X_MPI_LED_ADDR 0x31c
#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
@ -34,12 +35,16 @@
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
#define HW_ATL_FW2X_CTRL_WAKE_ON_LINK BIT(CTRL_WAKE_ON_LINK)
#define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY)
#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL)
#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP)
#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE)
#define HW_ATL_FW2X_CTRL_TEMPERATURE BIT(CTRL_TEMPERATURE)
#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CTRL_INT_LOOPBACK BIT(CTRL_INT_LOOPBACK)
#define HW_ATL_FW2X_CTRL_EXT_LOOPBACK BIT(CTRL_EXT_LOOPBACK)
#define HW_ATL_FW2X_CTRL_DOWNSHIFT BIT(CTRL_DOWNSHIFT)
#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT)
#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE)
@ -50,6 +55,9 @@
#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
#define HW_ATL_FW_VER_LED 0x03010026U
#define HW_ATL_FW_VER_MEDIA_CONTROL 0x0301005aU
struct __packed fw2x_msg_wol_pattern {
u8 mask[16];
u32 crc;
@ -74,6 +82,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
static u32 aq_fw2x_mbox_get(struct aq_hw_s *self);
static u32 aq_fw2x_rpc_get(struct aq_hw_s *self);
static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr);
static u32 aq_fw2x_state2_get(struct aq_hw_s *self);
static int aq_fw2x_init(struct aq_hw_s *self)
@ -91,6 +100,8 @@ static int aq_fw2x_init(struct aq_hw_s *self)
self->rpc_addr != 0U,
1000U, 100000U);
err = aq_fw2x_settings_get(self, &self->settings_addr);
return err;
}
@ -170,17 +181,26 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
return 0;
}
static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
static void aq_fw2x_upd_flow_control_bits(struct aq_hw_s *self,
u32 *mpi_state, u32 fc)
{
if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
*mpi_state |= BIT(CAPS_HI_PAUSE);
else
*mpi_state &= ~BIT(CAPS_HI_PAUSE);
*mpi_state &= ~(HW_ATL_FW2X_CTRL_PAUSE |
HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE);
if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
*mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
else
*mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
switch (fc) {
/* There is not explicit mode of RX only pause frames,
* thus, we join this mode with FC full.
* FC full is either Rx, either Tx, or both.
*/
case AQ_NIC_FC_FULL:
case AQ_NIC_FC_RX:
*mpi_state |= HW_ATL_FW2X_CTRL_PAUSE |
HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
break;
case AQ_NIC_FC_TX:
*mpi_state |= HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE;
break;
}
}
static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts,
@ -204,7 +224,8 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
case MPI_INIT:
mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds);
aq_fw2x_set_mpi_flow_control(self, &mpi_state);
aq_fw2x_upd_flow_control_bits(self, &mpi_state,
self->aq_nic_cfg->fc.req);
break;
case MPI_DEINIT:
mpi_state |= BIT(CAPS_HI_LINK_DROP);
@ -215,15 +236,20 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
break;
}
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
return 0;
}
static int aq_fw2x_update_link_status(struct aq_hw_s *self)
{
u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
u32 mpi_state;
u32 speed;
mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
FW2X_RATE_2G5 | FW2X_RATE_5G |
FW2X_RATE_10G);
if (speed) {
if (speed & FW2X_RATE_10G)
@ -247,11 +273,11 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self)
static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
{
u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
u32 mac_addr[2] = { 0 };
int err = 0;
u32 h = 0U;
u32 l = 0U;
u32 mac_addr[2] = { 0 };
u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@ -285,15 +311,16 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
h >>= 8;
mac[0] = (u8)(0xFFU & h);
}
return err;
}
static int aq_fw2x_update_stats(struct aq_hw_s *self)
{
int err = 0;
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
u32 stats_val;
int err = 0;
/* Toggle statistics bit for FW to update */
mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
@ -320,9 +347,9 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
int err = 0;
u32 val;
phy_temp_offset = self->mbox_addr +
offsetof(struct hw_atl_utils_mbox, info) +
offsetof(struct hw_aq_info, phy_temperature);
phy_temp_offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
info.phy_temperature);
/* Toggle statistics bit for FW to 0x36C.18 (CTRL_TEMPERATURE) */
mpi_opts = mpi_opts ^ HW_ATL_FW2X_CTRL_TEMPERATURE;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
@ -345,87 +372,46 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
return 0;
}
static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
struct offload_info *cfg = NULL;
unsigned int rpc_size = 0U;
u32 mpi_opts;
struct offload_info *info = NULL;
u32 wol_bits = 0;
u32 rpc_size;
int err = 0;
u32 val;
rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg);
if (self->aq_nic_cfg->wol & WAKE_PHY) {
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR,
HW_ATL_FW2X_CTRL_LINK_DROP);
readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
(val &
HW_ATL_FW2X_CTRL_LINK_DROP) != 0,
1000, 100000);
wol_bits |= HW_ATL_FW2X_CTRL_WAKE_ON_LINK;
}
err = hw_atl_utils_fw_rpc_wait(self, &rpc);
if (err < 0)
goto err_exit;
if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
wol_bits |= HW_ATL_FW2X_CTRL_SLEEP_PROXY |
HW_ATL_FW2X_CTRL_WOL;
memset(rpc, 0, rpc_size);
cfg = (struct offload_info *)(&rpc->msg_id + 1);
err = hw_atl_utils_fw_rpc_wait(self, &rpc);
if (err < 0)
goto err_exit;
memcpy(cfg->mac_addr, mac, ETH_ALEN);
cfg->len = sizeof(*cfg);
rpc_size = sizeof(*info) +
offsetof(struct hw_atl_utils_fw_rpc, fw2x_offloads);
memset(rpc, 0, rpc_size);
info = &rpc->fw2x_offloads;
memcpy(info->mac_addr, mac, ETH_ALEN);
info->len = sizeof(*info);
/* Clear bit 0x36C.23 and 0x36C.22 */
mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
mpi_opts &= ~HW_ATL_FW2X_CTRL_SLEEP_PROXY;
mpi_opts &= ~HW_ATL_FW2X_CTRL_LINK_DROP;
err = hw_atl_utils_fw_rpc_call(self, rpc_size);
if (err < 0)
goto err_exit;
}
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
err = hw_atl_utils_fw_rpc_call(self, rpc_size);
if (err < 0)
goto err_exit;
/* Set bit 0x36C.23 */
mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
self, val,
val & HW_ATL_FW2X_CTRL_SLEEP_PROXY,
1U, 100000U);
err_exit:
return err;
}
static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
struct fw2x_msg_wol *msg = NULL;
u32 mpi_opts;
int err = 0;
u32 val;
err = hw_atl_utils_fw_rpc_wait(self, &rpc);
if (err < 0)
goto err_exit;
msg = (struct fw2x_msg_wol *)rpc;
memset(msg, 0, sizeof(*msg));
msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
msg->magic_packet_enabled = true;
memcpy(msg->hw_addr, mac, ETH_ALEN);
mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
mpi_opts &= ~(HW_ATL_FW2X_CTRL_SLEEP_PROXY | HW_ATL_FW2X_CTRL_WOL);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg));
if (err < 0)
goto err_exit;
/* Set bit 0x36C.24 */
mpi_opts |= HW_ATL_FW2X_CTRL_WOL;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
self, val, val & HW_ATL_FW2X_CTRL_WOL,
1U, 10000U);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, wol_bits);
err_exit:
return err;
@ -436,14 +422,9 @@ static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
{
int err = 0;
if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
err = aq_fw2x_set_sleep_proxy(self, mac);
if (err < 0)
goto err_exit;
err = aq_fw2x_set_wol_params(self, mac);
}
if (self->aq_nic_cfg->wol)
err = aq_fw2x_set_wol(self, mac);
err_exit:
return err;
}
@ -460,8 +441,7 @@ static int aq_fw2x_send_fw_request(struct aq_hw_s *self,
dword_cnt = size / sizeof(u32);
if (size % sizeof(u32))
dword_cnt++;
err = hw_atl_utils_fw_upload_dwords(self, aq_fw2x_rpc_get(self),
(void *)fw_req, dword_cnt);
err = hw_atl_write_fwcfg_dwords(self, (void *)fw_req, dword_cnt);
if (err < 0)
goto err_exit;
@ -495,6 +475,16 @@ static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
}
static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
{
if (self->fw_ver_actual < HW_ATL_FW_VER_LED)
return -EOPNOTSUPP;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_LED_ADDR, mode);
return 0;
}
static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
@ -512,11 +502,12 @@ static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
u32 mpi_state;
u32 caps_hi;
int err = 0;
u32 addr = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, info) +
offsetof(struct hw_aq_info, caps_hi);
u32 offset;
err = hw_atl_utils_fw_downld_dwords(self, addr, &caps_hi,
sizeof(caps_hi) / sizeof(u32));
offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
info.caps_hi);
err = hw_atl_utils_fw_downld_dwords(self, offset, &caps_hi, 1);
if (err)
return err;
@ -544,7 +535,8 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
{
u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
aq_fw2x_set_mpi_flow_control(self, &mpi_state);
aq_fw2x_upd_flow_control_bits(self, &mpi_state,
self->aq_nic_cfg->fc.req);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
@ -554,17 +546,41 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
{
u32 mpi_state = aq_fw2x_state2_get(self);
*fcmode = 0;
if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
*fcmode = AQ_NIC_FC_RX;
*fcmode |= AQ_NIC_FC_RX;
if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
*fcmode |= AQ_NIC_FC_TX;
return 0;
}
static int aq_fw2x_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable)
{
u32 mpi_opts;
switch (mode) {
case AQ_HW_LOOPBACK_PHYINT_SYS:
mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
if (enable)
mpi_opts |= HW_ATL_FW2X_CTRL_INT_LOOPBACK;
else
*fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
else
if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
*fcmode = AQ_NIC_FC_TX;
mpi_opts &= ~HW_ATL_FW2X_CTRL_INT_LOOPBACK;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
break;
case AQ_HW_LOOPBACK_PHYEXT_SYS:
mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
if (enable)
mpi_opts |= HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
else
*fcmode = 0;
mpi_opts &= ~HW_ATL_FW2X_CTRL_EXT_LOOPBACK;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
break;
default:
return -EINVAL;
}
return 0;
}
@ -579,6 +595,19 @@ static u32 aq_fw2x_rpc_get(struct aq_hw_s *self)
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR);
}
static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr)
{
int err = 0;
u32 offset;
offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox,
info.setting_address);
err = hw_atl_utils_fw_downld_dwords(self, offset, addr, 1);
return err;
}
static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
{
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
@ -602,4 +631,6 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.get_flow_control = aq_fw2x_get_flow_control,
.send_fw_request = aq_fw2x_send_fw_request,
.enable_ptp = aq_fw3x_enable_ptp,
.led_control = aq_fw2x_led_control,
.set_phyloopback = aq_fw2x_set_phyloopback,
};