mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 15:36:40 +07:00
Merge branch 'hns3-next'
Huazhong Tan says: ==================== net: hns3: add some bugfixes & optimizations & cleanups for HNS3 driver This patch-set includes code optimizations, bugfixes and cleanups for the HNS3 ethernet controller driver. [patch 01/12] fixes a GFP flag error. [patch 02/12] fixes a VF interrupt error. [patch 03/12] adds a cleanup for VLAN handling. [patch 04/12] fixes a bug in debugfs. [patch 05/12] modifies pause displaying format. [patch 06/12] adds more DFX information for ethtool -d. [patch 07/12] adds more TX statistics information. [patch 08/12] adds a check for TX BD number. [patch 09/12] adds a cleanup for dumping NCL_CONFIG. [patch 10/12] refines function for querying MAC pause statistics. [patch 11/12] adds a handshake with VF when doing PF reset. [patch 12/12] refines some macro definitions. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f52ea3c55a
@ -58,10 +58,10 @@
|
||||
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
|
||||
|
||||
#define hnae3_dev_roce_supported(hdev) \
|
||||
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
|
||||
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
|
||||
|
||||
#define hnae3_dev_dcb_supported(hdev) \
|
||||
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
|
||||
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
|
||||
|
||||
#define hnae3_dev_fd_supported(hdev) \
|
||||
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
|
||||
@ -91,6 +91,11 @@ struct hnae3_queue {
|
||||
u16 rx_desc_num;/* total number of rx desc */
|
||||
};
|
||||
|
||||
struct hns3_mac_stats {
|
||||
u64 tx_pause_cnt;
|
||||
u64 rx_pause_cnt;
|
||||
};
|
||||
|
||||
/*hnae3 loop mode*/
|
||||
enum hnae3_loop {
|
||||
HNAE3_LOOP_APP,
|
||||
@ -298,6 +303,8 @@ struct hnae3_ae_dev {
|
||||
* Remove multicast address from mac table
|
||||
* update_stats()
|
||||
* Update Old network device statistics
|
||||
* get_mac_stats()
|
||||
* get mac pause statistics including tx_cnt and rx_cnt
|
||||
* get_ethtool_stats()
|
||||
* Get ethtool network device statistics
|
||||
* get_strings()
|
||||
@ -426,8 +433,8 @@ struct hnae3_ae_ops {
|
||||
void (*update_stats)(struct hnae3_handle *handle,
|
||||
struct net_device_stats *net_stats);
|
||||
void (*get_stats)(struct hnae3_handle *handle, u64 *data);
|
||||
void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
|
||||
u64 *rx_cnt);
|
||||
void (*get_mac_stats)(struct hnae3_handle *handle,
|
||||
struct hns3_mac_stats *mac_stats);
|
||||
void (*get_strings)(struct hnae3_handle *handle,
|
||||
u32 stringset, u8 *data);
|
||||
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "hns3_enet.h"
|
||||
|
||||
#define HNS3_DBG_READ_LEN 256
|
||||
#define HNS3_DBG_WRITE_LEN 1024
|
||||
|
||||
static struct dentry *hns3_dbgfs_root;
|
||||
|
||||
@ -322,6 +323,9 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
|
||||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
|
||||
return 0;
|
||||
|
||||
if (count > HNS3_DBG_WRITE_LEN)
|
||||
return -ENOSPC;
|
||||
|
||||
cmd_buf = kzalloc(count + 1, GFP_KERNEL);
|
||||
if (!cmd_buf)
|
||||
return count;
|
||||
|
@ -28,6 +28,12 @@
|
||||
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
|
||||
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
|
||||
|
||||
#define hns3_rl_err(fmt, ...) \
|
||||
do { \
|
||||
if (net_ratelimit()) \
|
||||
netdev_err(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
|
||||
static void hns3_remove_hw_addr(struct net_device *netdev);
|
||||
|
||||
@ -45,6 +51,9 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
|
||||
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
|
||||
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
|
||||
|
||||
#define HNS3_INNER_VLAN_TAG 1
|
||||
#define HNS3_OUTER_VLAN_TAG 2
|
||||
|
||||
/* hns3_pci_tbl - PCI Device ID Table
|
||||
*
|
||||
* Last entry must be all 0s
|
||||
@ -961,16 +970,16 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
|
||||
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
|
||||
}
|
||||
|
||||
static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
||||
struct hns3_enet_ring *tx_ring,
|
||||
u32 *inner_vlan_flag,
|
||||
u32 *out_vlan_flag,
|
||||
u16 *inner_vtag,
|
||||
u16 *out_vtag)
|
||||
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
#define HNS3_TX_VLAN_PRIO_SHIFT 13
|
||||
|
||||
struct hnae3_handle *handle = tx_ring->tqp->handle;
|
||||
struct vlan_ethhdr *vhdr;
|
||||
int rc;
|
||||
|
||||
if (!(skb->protocol == htons(ETH_P_8021Q) ||
|
||||
skb_vlan_tag_present(skb)))
|
||||
return 0;
|
||||
|
||||
/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
|
||||
* header is allowed in skb, otherwise it will cause RAS error.
|
||||
@ -981,8 +990,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
||||
return -EINVAL;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_8021Q) &&
|
||||
!(tx_ring->tqp->handle->kinfo.netdev->features &
|
||||
NETIF_F_HW_VLAN_CTAG_TX)) {
|
||||
!(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
|
||||
/* When HW VLAN acceleration is turned off, and the stack
|
||||
* sets the protocol to 802.1q, the driver just need to
|
||||
* set the protocol to the encapsulated ethertype.
|
||||
@ -992,45 +1000,107 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
u16 vlan_tag;
|
||||
|
||||
vlan_tag = skb_vlan_tag_get(skb);
|
||||
vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
|
||||
|
||||
/* Based on hw strategy, use out_vtag in two layer tag case,
|
||||
* and use inner_vtag in one tag case.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_8021Q)) {
|
||||
if (handle->port_base_vlan_state ==
|
||||
HNAE3_PORT_BASE_VLAN_DISABLE){
|
||||
hns3_set_field(*out_vlan_flag,
|
||||
HNS3_TXD_OVLAN_B, 1);
|
||||
*out_vtag = vlan_tag;
|
||||
} else {
|
||||
hns3_set_field(*inner_vlan_flag,
|
||||
HNS3_TXD_VLAN_B, 1);
|
||||
*inner_vtag = vlan_tag;
|
||||
}
|
||||
} else {
|
||||
hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
|
||||
*inner_vtag = vlan_tag;
|
||||
}
|
||||
} else if (skb->protocol == htons(ETH_P_8021Q)) {
|
||||
struct vlan_ethhdr *vhdr;
|
||||
int rc;
|
||||
if (skb->protocol == htons(ETH_P_8021Q) &&
|
||||
handle->port_base_vlan_state ==
|
||||
HNAE3_PORT_BASE_VLAN_DISABLE)
|
||||
rc = HNS3_OUTER_VLAN_TAG;
|
||||
else
|
||||
rc = HNS3_INNER_VLAN_TAG;
|
||||
|
||||
rc = skb_cow_head(skb, 0);
|
||||
if (unlikely(rc < 0))
|
||||
return rc;
|
||||
vhdr = (struct vlan_ethhdr *)skb->data;
|
||||
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
|
||||
<< HNS3_TX_VLAN_PRIO_SHIFT);
|
||||
skb->protocol = vlan_get_protocol(skb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = skb_cow_head(skb, 0);
|
||||
if (unlikely(rc < 0))
|
||||
return rc;
|
||||
|
||||
vhdr = (struct vlan_ethhdr *)skb->data;
|
||||
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
|
||||
& VLAN_PRIO_MASK);
|
||||
|
||||
skb->protocol = vlan_get_protocol(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
|
||||
struct sk_buff *skb, struct hns3_desc *desc)
|
||||
{
|
||||
u32 ol_type_vlan_len_msec = 0;
|
||||
u32 type_cs_vlan_tso = 0;
|
||||
u32 paylen = skb->len;
|
||||
u16 inner_vtag = 0;
|
||||
u16 out_vtag = 0;
|
||||
u16 mss = 0;
|
||||
int ret;
|
||||
|
||||
ret = hns3_handle_vtags(ring, skb);
|
||||
if (unlikely(ret < 0)) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.tx_vlan_err++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
return ret;
|
||||
} else if (ret == HNS3_INNER_VLAN_TAG) {
|
||||
inner_vtag = skb_vlan_tag_get(skb);
|
||||
inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
|
||||
VLAN_PRIO_MASK;
|
||||
hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
|
||||
} else if (ret == HNS3_OUTER_VLAN_TAG) {
|
||||
out_vtag = skb_vlan_tag_get(skb);
|
||||
out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
|
||||
VLAN_PRIO_MASK;
|
||||
hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
|
||||
1);
|
||||
}
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
u8 ol4_proto, il4_proto;
|
||||
|
||||
skb_reset_mac_len(skb);
|
||||
|
||||
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
|
||||
if (unlikely(ret)) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.tx_l4_proto_err++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
|
||||
&type_cs_vlan_tso,
|
||||
&ol_type_vlan_len_msec);
|
||||
if (unlikely(ret)) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.tx_l2l3l4_err++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hns3_set_tso(skb, &paylen, &mss,
|
||||
&type_cs_vlan_tso);
|
||||
if (unlikely(ret)) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.tx_tso_err++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set txbd */
|
||||
desc->tx.ol_type_vlan_len_msec =
|
||||
cpu_to_le32(ol_type_vlan_len_msec);
|
||||
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
|
||||
desc->tx.paylen = cpu_to_le32(paylen);
|
||||
desc->tx.mss = cpu_to_le16(mss);
|
||||
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
|
||||
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
unsigned int size, int frag_end,
|
||||
enum hns_desc_type type)
|
||||
@ -1045,50 +1115,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
|
||||
if (type == DESC_TYPE_SKB) {
|
||||
struct sk_buff *skb = (struct sk_buff *)priv;
|
||||
u32 ol_type_vlan_len_msec = 0;
|
||||
u32 type_cs_vlan_tso = 0;
|
||||
u32 paylen = skb->len;
|
||||
u16 inner_vtag = 0;
|
||||
u16 out_vtag = 0;
|
||||
u16 mss = 0;
|
||||
int ret;
|
||||
|
||||
ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
|
||||
&ol_type_vlan_len_msec,
|
||||
&inner_vtag, &out_vtag);
|
||||
ret = hns3_fill_skb_desc(ring, skb, desc);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
u8 ol4_proto, il4_proto;
|
||||
|
||||
skb_reset_mac_len(skb);
|
||||
|
||||
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
|
||||
&type_cs_vlan_tso,
|
||||
&ol_type_vlan_len_msec);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
ret = hns3_set_tso(skb, &paylen, &mss,
|
||||
&type_cs_vlan_tso);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Set txbd */
|
||||
desc->tx.ol_type_vlan_len_msec =
|
||||
cpu_to_le32(ol_type_vlan_len_msec);
|
||||
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
|
||||
desc->tx.paylen = cpu_to_le32(paylen);
|
||||
desc->tx.mss = cpu_to_le16(mss);
|
||||
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
|
||||
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
|
||||
|
||||
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
||||
} else {
|
||||
frag = (skb_frag_t *)priv;
|
||||
@ -1096,7 +1128,9 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
}
|
||||
|
||||
if (unlikely(dma_mapping_error(dev, dma))) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.sw_err_cnt++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -1152,28 +1186,20 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_nic_bd_num(struct sk_buff *skb)
|
||||
static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
|
||||
{
|
||||
int size = skb_headlen(skb);
|
||||
int i, bd_num;
|
||||
unsigned int bd_num;
|
||||
int i;
|
||||
|
||||
/* if the total len is within the max bd limit */
|
||||
if (likely(skb->len <= HNS3_MAX_BD_SIZE))
|
||||
return skb_shinfo(skb)->nr_frags + 1;
|
||||
|
||||
bd_num = hns3_tx_bd_count(size);
|
||||
bd_num = hns3_tx_bd_count(skb_headlen(skb));
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
int frag_bd_num;
|
||||
|
||||
size = skb_frag_size(frag);
|
||||
frag_bd_num = hns3_tx_bd_count(size);
|
||||
|
||||
if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
|
||||
return -ENOMEM;
|
||||
|
||||
bd_num += frag_bd_num;
|
||||
bd_num += hns3_tx_bd_count(skb_frag_size(frag));
|
||||
}
|
||||
|
||||
return bd_num;
|
||||
@ -1194,7 +1220,7 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
|
||||
*/
|
||||
static bool hns3_skb_need_linearized(struct sk_buff *skb)
|
||||
{
|
||||
int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
|
||||
int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
|
||||
unsigned int tot_len = 0;
|
||||
int i;
|
||||
|
||||
@ -1224,21 +1250,16 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
||||
struct sk_buff **out_skb)
|
||||
{
|
||||
struct sk_buff *skb = *out_skb;
|
||||
int bd_num;
|
||||
unsigned int bd_num;
|
||||
|
||||
bd_num = hns3_nic_bd_num(skb);
|
||||
if (bd_num < 0)
|
||||
return bd_num;
|
||||
|
||||
if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
|
||||
if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
|
||||
struct sk_buff *new_skb;
|
||||
|
||||
if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
|
||||
if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
|
||||
!hns3_skb_need_linearized(skb))
|
||||
goto out;
|
||||
|
||||
bd_num = hns3_tx_bd_count(skb->len);
|
||||
if (unlikely(ring_space(ring) < bd_num))
|
||||
return -EBUSY;
|
||||
/* manual split the send packet */
|
||||
new_skb = skb_copy(skb, GFP_ATOMIC);
|
||||
if (!new_skb)
|
||||
@ -1246,6 +1267,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
||||
dev_kfree_skb_any(skb);
|
||||
*out_skb = new_skb;
|
||||
|
||||
bd_num = hns3_nic_bd_num(new_skb);
|
||||
if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
|
||||
(!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
|
||||
return -ENOMEM;
|
||||
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.tx_copy++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
@ -1319,9 +1345,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
}
|
||||
|
||||
if (net_ratelimit())
|
||||
netdev_err(netdev, "xmit error: %d!\n", buf_num);
|
||||
|
||||
hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
|
||||
goto out_err_tx_ok;
|
||||
}
|
||||
|
||||
@ -1487,7 +1511,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
|
||||
tx_bytes += ring->stats.tx_bytes;
|
||||
tx_pkts += ring->stats.tx_pkts;
|
||||
tx_drop += ring->stats.sw_err_cnt;
|
||||
tx_drop += ring->stats.tx_vlan_err;
|
||||
tx_drop += ring->stats.tx_l4_proto_err;
|
||||
tx_drop += ring->stats.tx_l2l3l4_err;
|
||||
tx_drop += ring->stats.tx_tso_err;
|
||||
tx_errors += ring->stats.sw_err_cnt;
|
||||
tx_errors += ring->stats.tx_vlan_err;
|
||||
tx_errors += ring->stats.tx_l4_proto_err;
|
||||
tx_errors += ring->stats.tx_l2l3l4_err;
|
||||
tx_errors += ring->stats.tx_tso_err;
|
||||
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
||||
|
||||
/* fetch the rx stats */
|
||||
@ -1694,15 +1726,12 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
|
||||
/* When mac received many pause frames continuous, it's unable to send
|
||||
* packets, which may cause tx timeout
|
||||
*/
|
||||
if (h->ae_algo->ops->update_stats &&
|
||||
h->ae_algo->ops->get_mac_pause_stats) {
|
||||
u64 tx_pause_cnt, rx_pause_cnt;
|
||||
if (h->ae_algo->ops->get_mac_stats) {
|
||||
struct hns3_mac_stats mac_stats;
|
||||
|
||||
h->ae_algo->ops->update_stats(h, &ndev->stats);
|
||||
h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
|
||||
&rx_pause_cnt);
|
||||
h->ae_algo->ops->get_mac_stats(h, &mac_stats);
|
||||
netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
|
||||
tx_pause_cnt, rx_pause_cnt);
|
||||
mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
|
||||
}
|
||||
|
||||
hw_head = readl_relaxed(tx_ring->tqp->io_base +
|
||||
@ -2371,8 +2400,9 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
||||
ring->stats.sw_err_cnt++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
|
||||
netdev_err(ring->tqp->handle->kinfo.netdev,
|
||||
"hnae reserve buffer map failed.\n");
|
||||
hns3_rl_err(ring->tqp_vector->napi.dev,
|
||||
"alloc rx buffer failed: %d\n",
|
||||
ret);
|
||||
break;
|
||||
}
|
||||
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
|
||||
@ -2457,9 +2487,9 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
|
||||
th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
|
||||
&iph->daddr, 0);
|
||||
} else {
|
||||
netdev_err(skb->dev,
|
||||
"Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
|
||||
be16_to_cpu(type), depth);
|
||||
hns3_rl_err(skb->dev,
|
||||
"Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
|
||||
be16_to_cpu(type), depth);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@ -2601,7 +2631,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
|
||||
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
|
||||
skb = ring->skb;
|
||||
if (unlikely(!skb)) {
|
||||
netdev_err(netdev, "alloc rx skb fail\n");
|
||||
hns3_rl_err(netdev, "alloc rx skb fail\n");
|
||||
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.sw_err_cnt++;
|
||||
@ -2676,8 +2706,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
|
||||
new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
|
||||
HNS3_RX_HEAD_SIZE);
|
||||
if (unlikely(!new_skb)) {
|
||||
netdev_err(ring->tqp->handle->kinfo.netdev,
|
||||
"alloc rx skb frag fail\n");
|
||||
hns3_rl_err(ring->tqp_vector->napi.dev,
|
||||
"alloc rx fraglist skb fail\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
ring->frag_num = 0;
|
||||
|
@ -195,7 +195,8 @@ enum hns3_nic_state {
|
||||
#define HNS3_VECTOR_INITED 1
|
||||
|
||||
#define HNS3_MAX_BD_SIZE 65535
|
||||
#define HNS3_MAX_BD_PER_FRAG 8
|
||||
#define HNS3_MAX_BD_NUM_NORMAL 8
|
||||
#define HNS3_MAX_BD_NUM_TSO 63
|
||||
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
|
||||
|
||||
#define HNS3_VECTOR_GL0_OFFSET 0x100
|
||||
@ -377,6 +378,10 @@ struct ring_stats {
|
||||
u64 restart_queue;
|
||||
u64 tx_busy;
|
||||
u64 tx_copy;
|
||||
u64 tx_vlan_err;
|
||||
u64 tx_l4_proto_err;
|
||||
u64 tx_l2l3l4_err;
|
||||
u64 tx_tso_err;
|
||||
};
|
||||
struct {
|
||||
u64 rx_pkts;
|
||||
|
@ -30,6 +30,10 @@ static const struct hns3_stats hns3_txq_stats[] = {
|
||||
HNS3_TQP_STAT("wake", restart_queue),
|
||||
HNS3_TQP_STAT("busy", tx_busy),
|
||||
HNS3_TQP_STAT("copy", tx_copy),
|
||||
HNS3_TQP_STAT("vlan_err", tx_vlan_err),
|
||||
HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
|
||||
HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
|
||||
HNS3_TQP_STAT("tso_err", tx_tso_err),
|
||||
};
|
||||
|
||||
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
|
||||
|
@ -87,6 +87,7 @@ enum hclge_opcode_type {
|
||||
HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
|
||||
HCLGE_OPC_GET_CFG_PARAM = 0x0025,
|
||||
HCLGE_OPC_PF_RST_DONE = 0x0026,
|
||||
HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
|
||||
|
||||
HCLGE_OPC_STATS_64_BIT = 0x0030,
|
||||
HCLGE_OPC_STATS_32_BIT = 0x0031,
|
||||
@ -588,6 +589,12 @@ struct hclge_config_mac_mode_cmd {
|
||||
u8 rsv[20];
|
||||
};
|
||||
|
||||
struct hclge_pf_rst_sync_cmd {
|
||||
#define HCLGE_PF_RST_ALL_VF_RDY_B 0
|
||||
u8 all_vf_ready;
|
||||
u8 rsv[23];
|
||||
};
|
||||
|
||||
#define HCLGE_CFG_SPEED_S 0
|
||||
#define HCLGE_CFG_SPEED_M GENMASK(5, 0)
|
||||
|
||||
|
@ -14,16 +14,8 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
|
||||
struct hclge_desc desc[4];
|
||||
int ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
|
||||
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
|
||||
desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
|
||||
desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, 4);
|
||||
if (ret != HCLGE_CMD_EXEC_SUCCESS) {
|
||||
ret = hclge_query_bd_num_cmd_send(hdev, desc);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"get dfx bdnum fail, status is %d.\n", ret);
|
||||
return ret;
|
||||
@ -1003,6 +995,33 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
|
||||
kfree(desc_src);
|
||||
}
|
||||
|
||||
#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
|
||||
|
||||
static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
|
||||
struct hclge_desc *desc, int *offset,
|
||||
int *length)
|
||||
{
|
||||
#define HCLGE_CMD_DATA_NUM 6
|
||||
|
||||
int i;
|
||||
int j;
|
||||
|
||||
for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
|
||||
for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
|
||||
if (i == 0 && j == 0)
|
||||
continue;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
|
||||
*offset,
|
||||
le32_to_cpu(desc[i].data[j]));
|
||||
*offset += sizeof(u32);
|
||||
*length -= sizeof(u32);
|
||||
if (*length <= 0)
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
|
||||
* @hdev: pointer to struct hclge_dev
|
||||
* @cmd_buf: string that contains offset and length
|
||||
@ -1012,17 +1031,13 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
|
||||
{
|
||||
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
|
||||
#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
|
||||
#define HCLGE_CMD_DATA_NUM 6
|
||||
|
||||
struct hclge_desc desc[5];
|
||||
u32 byte_offset;
|
||||
int bd_num = 5;
|
||||
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
|
||||
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
|
||||
int offset;
|
||||
int length;
|
||||
int data0;
|
||||
int ret;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
|
||||
if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
|
||||
@ -1048,22 +1063,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
byte_offset = offset;
|
||||
for (i = 0; i < bd_num; i++) {
|
||||
for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
|
||||
if (i == 0 && j == 0)
|
||||
continue;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
|
||||
byte_offset,
|
||||
le32_to_cpu(desc[i].data[j]));
|
||||
byte_offset += sizeof(u32);
|
||||
length -= sizeof(u32);
|
||||
if (length <= 0)
|
||||
return;
|
||||
}
|
||||
}
|
||||
offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
|
||||
hclge_ncl_config_data_print(hdev, desc, &offset, &length);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,23 @@
|
||||
#define BUF_RESERVE_PERCENT 90
|
||||
|
||||
#define HCLGE_RESET_MAX_FAIL_CNT 5
|
||||
#define HCLGE_RESET_SYNC_TIME 100
|
||||
#define HCLGE_PF_RESET_SYNC_TIME 20
|
||||
#define HCLGE_PF_RESET_SYNC_CNT 1500
|
||||
|
||||
/* Get DFX BD number offset */
|
||||
#define HCLGE_DFX_BIOS_BD_OFFSET 1
|
||||
#define HCLGE_DFX_SSU_0_BD_OFFSET 2
|
||||
#define HCLGE_DFX_SSU_1_BD_OFFSET 3
|
||||
#define HCLGE_DFX_IGU_BD_OFFSET 4
|
||||
#define HCLGE_DFX_RPU_0_BD_OFFSET 5
|
||||
#define HCLGE_DFX_RPU_1_BD_OFFSET 6
|
||||
#define HCLGE_DFX_NCSI_BD_OFFSET 7
|
||||
#define HCLGE_DFX_RTC_BD_OFFSET 8
|
||||
#define HCLGE_DFX_PPP_BD_OFFSET 9
|
||||
#define HCLGE_DFX_RCB_BD_OFFSET 10
|
||||
#define HCLGE_DFX_TQP_BD_OFFSET 11
|
||||
#define HCLGE_DFX_SSU_2_BD_OFFSET 12
|
||||
|
||||
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
|
||||
static int hclge_init_vlan_config(struct hclge_dev *hdev);
|
||||
@ -317,6 +334,36 @@ static const u8 hclge_hash_key[] = {
|
||||
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
|
||||
};
|
||||
|
||||
static const u32 hclge_dfx_bd_offset_list[] = {
|
||||
HCLGE_DFX_BIOS_BD_OFFSET,
|
||||
HCLGE_DFX_SSU_0_BD_OFFSET,
|
||||
HCLGE_DFX_SSU_1_BD_OFFSET,
|
||||
HCLGE_DFX_IGU_BD_OFFSET,
|
||||
HCLGE_DFX_RPU_0_BD_OFFSET,
|
||||
HCLGE_DFX_RPU_1_BD_OFFSET,
|
||||
HCLGE_DFX_NCSI_BD_OFFSET,
|
||||
HCLGE_DFX_RTC_BD_OFFSET,
|
||||
HCLGE_DFX_PPP_BD_OFFSET,
|
||||
HCLGE_DFX_RCB_BD_OFFSET,
|
||||
HCLGE_DFX_TQP_BD_OFFSET,
|
||||
HCLGE_DFX_SSU_2_BD_OFFSET
|
||||
};
|
||||
|
||||
static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
|
||||
HCLGE_OPC_DFX_BIOS_COMMON_REG,
|
||||
HCLGE_OPC_DFX_SSU_REG_0,
|
||||
HCLGE_OPC_DFX_SSU_REG_1,
|
||||
HCLGE_OPC_DFX_IGU_EGU_REG,
|
||||
HCLGE_OPC_DFX_RPU_REG_0,
|
||||
HCLGE_OPC_DFX_RPU_REG_1,
|
||||
HCLGE_OPC_DFX_NCSI_REG,
|
||||
HCLGE_OPC_DFX_RTC_REG,
|
||||
HCLGE_OPC_DFX_PPP_REG,
|
||||
HCLGE_OPC_DFX_RCB_REG,
|
||||
HCLGE_OPC_DFX_TQP_REG,
|
||||
HCLGE_OPC_DFX_SSU_REG_2
|
||||
};
|
||||
|
||||
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_MAC_CMD_NUM 21
|
||||
@ -364,9 +411,13 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
|
||||
u16 i, k, n;
|
||||
int ret;
|
||||
|
||||
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
|
||||
/* This may be called inside atomic sections,
|
||||
* so GFP_ATOMIC is more suitalbe here
|
||||
*/
|
||||
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
|
||||
if (!desc)
|
||||
return -ENOMEM;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
|
||||
if (ret) {
|
||||
@ -702,14 +753,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
|
||||
p = hclge_tqps_get_stats(handle, p);
|
||||
}
|
||||
|
||||
static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
|
||||
u64 *rx_cnt)
|
||||
static void hclge_get_mac_stat(struct hnae3_handle *handle,
|
||||
struct hns3_mac_stats *mac_stats)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
*tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
|
||||
*rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
|
||||
hclge_update_stats(handle, NULL);
|
||||
|
||||
mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
|
||||
mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
|
||||
}
|
||||
|
||||
static int hclge_parse_func_status(struct hclge_dev *hdev,
|
||||
@ -3134,6 +3187,39 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_pf_rst_sync_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
int cnt = 0;
|
||||
int ret;
|
||||
|
||||
req = (struct hclge_pf_rst_sync_cmd *)desc.data;
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
|
||||
|
||||
do {
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
/* for compatible with old firmware, wait
|
||||
* 100 ms for VF to stop IO
|
||||
*/
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
msleep(HCLGE_RESET_SYNC_TIME);
|
||||
return 0;
|
||||
} else if (ret) {
|
||||
dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
|
||||
ret);
|
||||
return ret;
|
||||
} else if (req->all_vf_ready) {
|
||||
return 0;
|
||||
}
|
||||
msleep(HCLGE_PF_RESET_SYNC_TIME);
|
||||
hclge_cmd_reuse_desc(&desc, true);
|
||||
} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
|
||||
|
||||
dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
|
||||
{
|
||||
struct hclge_desc desc;
|
||||
@ -3300,17 +3386,18 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
|
||||
|
||||
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_RESET_SYNC_TIME 100
|
||||
|
||||
u32 reg_val;
|
||||
int ret = 0;
|
||||
|
||||
switch (hdev->reset_type) {
|
||||
case HNAE3_FUNC_RESET:
|
||||
/* There is no mechanism for PF to know if VF has stopped IO
|
||||
* for now, just wait 100 ms for VF to stop IO
|
||||
/* to confirm whether all running VF is ready
|
||||
* before request PF reset
|
||||
*/
|
||||
msleep(HCLGE_RESET_SYNC_TIME);
|
||||
ret = hclge_func_reset_sync_vf(hdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hclge_func_reset_cmd(hdev, 0);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
@ -3327,10 +3414,13 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
|
||||
hdev->rst_stats.pf_rst_cnt++;
|
||||
break;
|
||||
case HNAE3_FLR_RESET:
|
||||
/* There is no mechanism for PF to know if VF has stopped IO
|
||||
* for now, just wait 100 ms for VF to stop IO
|
||||
/* to confirm whether all running VF is ready
|
||||
* before request PF reset
|
||||
*/
|
||||
msleep(HCLGE_RESET_SYNC_TIME);
|
||||
ret = hclge_func_reset_sync_vf(hdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
|
||||
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
|
||||
hdev->rst_stats.flr_rst_cnt++;
|
||||
@ -8203,28 +8293,15 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (rx_en && tx_en)
|
||||
hdev->fc_mode_last_time = HCLGE_FC_FULL;
|
||||
else if (rx_en && !tx_en)
|
||||
hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
|
||||
else if (!rx_en && tx_en)
|
||||
hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
|
||||
else
|
||||
hdev->fc_mode_last_time = HCLGE_FC_NONE;
|
||||
|
||||
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
|
||||
return 0;
|
||||
|
||||
ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"configure pauseparam error, ret = %d.\n", ret);
|
||||
|
||||
hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hclge_cfg_flowctrl(struct hclge_dev *hdev)
|
||||
@ -8289,6 +8366,21 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
|
||||
}
|
||||
}
|
||||
|
||||
static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
|
||||
u32 rx_en, u32 tx_en)
|
||||
{
|
||||
if (rx_en && tx_en)
|
||||
hdev->fc_mode_last_time = HCLGE_FC_FULL;
|
||||
else if (rx_en && !tx_en)
|
||||
hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
|
||||
else if (!rx_en && tx_en)
|
||||
hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
|
||||
else
|
||||
hdev->fc_mode_last_time = HCLGE_FC_NONE;
|
||||
|
||||
hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
|
||||
}
|
||||
|
||||
static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
|
||||
u32 rx_en, u32 tx_en)
|
||||
{
|
||||
@ -8314,6 +8406,8 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
|
||||
|
||||
hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
|
||||
|
||||
hclge_record_user_pauseparam(hdev, rx_en, tx_en);
|
||||
|
||||
if (!auto_neg)
|
||||
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
|
||||
|
||||
@ -9324,9 +9418,222 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
|
||||
}
|
||||
|
||||
#define MAX_SEPARATE_NUM 4
|
||||
#define SEPARATOR_VALUE 0xFFFFFFFF
|
||||
#define SEPARATOR_VALUE 0xFDFCFBFA
|
||||
#define REG_NUM_PER_LINE 4
|
||||
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
|
||||
#define REG_SEPARATOR_LINE 1
|
||||
#define REG_NUM_REMAIN_MASK 3
|
||||
#define BD_LIST_MAX_NUM 30
|
||||
|
||||
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
|
||||
{
|
||||
/*prepare 4 commands to query DFX BD number*/
|
||||
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
|
||||
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
|
||||
desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
|
||||
desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
|
||||
|
||||
return hclge_cmd_send(&hdev->hw, desc, 4);
|
||||
}
|
||||
|
||||
static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
|
||||
int *bd_num_list,
|
||||
u32 type_num)
|
||||
{
|
||||
#define HCLGE_DFX_REG_BD_NUM 4
|
||||
|
||||
u32 entries_per_desc, desc_index, index, offset, i;
|
||||
struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
|
||||
int ret;
|
||||
|
||||
ret = hclge_query_bd_num_cmd_send(hdev, desc);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get dfx bd num fail, status is %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
entries_per_desc = ARRAY_SIZE(desc[0].data);
|
||||
for (i = 0; i < type_num; i++) {
|
||||
offset = hclge_dfx_bd_offset_list[i];
|
||||
index = offset % entries_per_desc;
|
||||
desc_index = offset / entries_per_desc;
|
||||
bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
|
||||
struct hclge_desc *desc_src, int bd_num,
|
||||
enum hclge_opcode_type cmd)
|
||||
{
|
||||
struct hclge_desc *desc = desc_src;
|
||||
int i, ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(desc, cmd, true);
|
||||
for (i = 0; i < bd_num - 1; i++) {
|
||||
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
desc++;
|
||||
hclge_cmd_setup_basic_desc(desc, cmd, true);
|
||||
}
|
||||
|
||||
desc = desc_src;
|
||||
ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
|
||||
if (ret)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
|
||||
cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
|
||||
void *data)
|
||||
{
|
||||
int entries_per_desc, reg_num, separator_num, desc_index, index, i;
|
||||
struct hclge_desc *desc = desc_src;
|
||||
u32 *reg = data;
|
||||
|
||||
entries_per_desc = ARRAY_SIZE(desc->data);
|
||||
reg_num = entries_per_desc * bd_num;
|
||||
separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
|
||||
for (i = 0; i < reg_num; i++) {
|
||||
index = i % entries_per_desc;
|
||||
desc_index = i / entries_per_desc;
|
||||
*reg++ = le32_to_cpu(desc[desc_index].data[index]);
|
||||
}
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
|
||||
return reg_num + separator_num;
|
||||
}
|
||||
|
||||
static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
|
||||
{
|
||||
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
|
||||
int data_len_per_desc, data_len, bd_num, i;
|
||||
int bd_num_list[BD_LIST_MAX_NUM];
|
||||
int ret;
|
||||
|
||||
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get dfx reg bd num fail, status is %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
|
||||
*len = 0;
|
||||
for (i = 0; i < dfx_reg_type_num; i++) {
|
||||
bd_num = bd_num_list[i];
|
||||
data_len = data_len_per_desc * bd_num;
|
||||
*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
|
||||
{
|
||||
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
|
||||
int bd_num, bd_num_max, buf_len, i;
|
||||
int bd_num_list[BD_LIST_MAX_NUM];
|
||||
struct hclge_desc *desc_src;
|
||||
u32 *reg = data;
|
||||
int ret;
|
||||
|
||||
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get dfx reg bd num fail, status is %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bd_num_max = bd_num_list[0];
|
||||
for (i = 1; i < dfx_reg_type_num; i++)
|
||||
bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
|
||||
|
||||
buf_len = sizeof(*desc_src) * bd_num_max;
|
||||
desc_src = kzalloc(buf_len, GFP_KERNEL);
|
||||
if (!desc_src) {
|
||||
dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < dfx_reg_type_num; i++) {
|
||||
bd_num = bd_num_list[i];
|
||||
ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
|
||||
hclge_dfx_reg_opcode_list[i]);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get dfx reg fail, status is %d.\n", ret);
|
||||
break;
|
||||
}
|
||||
|
||||
reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
|
||||
}
|
||||
|
||||
kfree(desc_src);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
|
||||
struct hnae3_knic_private_info *kinfo)
|
||||
{
|
||||
#define HCLGE_RING_REG_OFFSET 0x200
|
||||
#define HCLGE_RING_INT_REG_OFFSET 0x4
|
||||
|
||||
int i, j, reg_num, separator_num;
|
||||
int data_num_sum;
|
||||
u32 *reg = data;
|
||||
|
||||
/* fetching per-PF registers valus from PF PCIe register space */
|
||||
reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
|
||||
separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
|
||||
for (i = 0; i < reg_num; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
data_num_sum = reg_num + separator_num;
|
||||
|
||||
reg_num = ARRAY_SIZE(common_reg_addr_list);
|
||||
separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
|
||||
for (i = 0; i < reg_num; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
data_num_sum += reg_num + separator_num;
|
||||
|
||||
reg_num = ARRAY_SIZE(ring_reg_addr_list);
|
||||
separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
|
||||
for (j = 0; j < kinfo->num_tqps; j++) {
|
||||
for (i = 0; i < reg_num; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
HCLGE_RING_REG_OFFSET * j);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
}
|
||||
data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
|
||||
|
||||
reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
|
||||
separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
|
||||
for (j = 0; j < hdev->num_msi_used - 1; j++) {
|
||||
for (i = 0; i < reg_num; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw,
|
||||
tqp_intr_reg_addr_list[i] +
|
||||
HCLGE_RING_INT_REG_OFFSET * j);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
}
|
||||
data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
|
||||
|
||||
return data_num_sum;
|
||||
}
|
||||
|
||||
static int hclge_get_regs_len(struct hnae3_handle *handle)
|
||||
{
|
||||
@ -9334,24 +9641,40 @@ static int hclge_get_regs_len(struct hnae3_handle *handle)
|
||||
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u32 regs_num_32_bit, regs_num_64_bit;
|
||||
int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
|
||||
int regs_lines_32_bit, regs_lines_64_bit;
|
||||
int ret;
|
||||
|
||||
ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get register number failed, ret = %d.\n", ret);
|
||||
return -EOPNOTSUPP;
|
||||
return ret;
|
||||
}
|
||||
|
||||
cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
|
||||
common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
|
||||
ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
|
||||
tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
|
||||
ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get dfx reg len failed, ret = %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
|
||||
REG_SEPARATOR_LINE;
|
||||
common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
|
||||
REG_SEPARATOR_LINE;
|
||||
ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
|
||||
REG_SEPARATOR_LINE;
|
||||
tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
|
||||
REG_SEPARATOR_LINE;
|
||||
regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
|
||||
REG_SEPARATOR_LINE;
|
||||
regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
|
||||
REG_SEPARATOR_LINE;
|
||||
|
||||
return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
|
||||
tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
|
||||
regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
|
||||
tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
|
||||
regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
|
||||
}
|
||||
|
||||
static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
@ -9361,9 +9684,8 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u32 regs_num_32_bit, regs_num_64_bit;
|
||||
int i, j, reg_um, separator_num;
|
||||
int i, reg_num, separator_num, ret;
|
||||
u32 *reg = data;
|
||||
int ret;
|
||||
|
||||
*version = hdev->fw_version;
|
||||
|
||||
@ -9374,56 +9696,36 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
return;
|
||||
}
|
||||
|
||||
/* fetching per-PF registers valus from PF PCIe register space */
|
||||
reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
|
||||
separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
|
||||
for (i = 0; i < reg_um; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
|
||||
|
||||
reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
|
||||
separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
|
||||
for (i = 0; i < reg_um; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
|
||||
reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
|
||||
separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
|
||||
for (j = 0; j < kinfo->num_tqps; j++) {
|
||||
for (i = 0; i < reg_um; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
0x200 * j);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
}
|
||||
|
||||
reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
|
||||
separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
|
||||
for (j = 0; j < hdev->num_msi_used - 1; j++) {
|
||||
for (i = 0; i < reg_um; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw,
|
||||
tqp_intr_reg_addr_list[i] +
|
||||
4 * j);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
}
|
||||
|
||||
/* fetching PF common registers values from firmware */
|
||||
ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get 32 bit register failed, ret = %d.\n", ret);
|
||||
return;
|
||||
}
|
||||
reg_num = regs_num_32_bit;
|
||||
reg += reg_num;
|
||||
separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
|
||||
reg += regs_num_32_bit;
|
||||
ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get 64 bit register failed, ret = %d.\n", ret);
|
||||
return;
|
||||
}
|
||||
reg_num = regs_num_64_bit * 2;
|
||||
reg += reg_num;
|
||||
separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
|
||||
for (i = 0; i < separator_num; i++)
|
||||
*reg++ = SEPARATOR_VALUE;
|
||||
|
||||
ret = hclge_get_dfx_reg(hdev, reg);
|
||||
if (ret)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Get dfx register failed, ret = %d.\n", ret);
|
||||
}
|
||||
|
||||
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
|
||||
@ -9538,7 +9840,7 @@ static const struct hnae3_ae_ops hclge_ops = {
|
||||
.set_mtu = hclge_set_mtu,
|
||||
.reset_queue = hclge_reset_tqp,
|
||||
.get_stats = hclge_get_stats,
|
||||
.get_mac_pause_stats = hclge_get_mac_pause_stat,
|
||||
.get_mac_stats = hclge_get_mac_stat,
|
||||
.update_stats = hclge_update_stats,
|
||||
.get_strings = hclge_get_strings,
|
||||
.get_sset_count = hclge_get_sset_count,
|
||||
|
@ -1029,4 +1029,6 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
|
||||
u16 state, u16 vlan_tag, u16 qos,
|
||||
u16 vlan_proto);
|
||||
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
|
||||
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
|
||||
struct hclge_desc *desc);
|
||||
#endif
|
||||
|
@ -1889,21 +1889,20 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
|
||||
static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
|
||||
u32 *clearval)
|
||||
{
|
||||
u32 val, cmdq_src_reg, rst_ing_reg;
|
||||
u32 val, cmdq_stat_reg, rst_ing_reg;
|
||||
|
||||
/* fetch the events from their corresponding regs */
|
||||
cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
|
||||
HCLGEVF_VECTOR0_CMDQ_SRC_REG);
|
||||
cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
|
||||
HCLGEVF_VECTOR0_CMDQ_STAT_REG);
|
||||
|
||||
if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
|
||||
if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
|
||||
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"receive reset interrupt 0x%x!\n", rst_ing_reg);
|
||||
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
|
||||
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
|
||||
*clearval = cmdq_src_reg;
|
||||
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
|
||||
hdev->rst_stats.vf_rst_cnt++;
|
||||
/* set up VF hardware reset status, its PF will clear
|
||||
* this status when PF has initialized done.
|
||||
@ -1915,9 +1914,20 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
|
||||
}
|
||||
|
||||
/* check for vector0 mailbox(=CMDQ RX) event source */
|
||||
if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
|
||||
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
|
||||
*clearval = cmdq_src_reg;
|
||||
if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
|
||||
/* for revision 0x21, clearing interrupt is writing bit 0
|
||||
* to the clear register, writing bit 1 means to keep the
|
||||
* old value.
|
||||
* for revision 0x20, the clear register is a read & write
|
||||
* register, so we should just write 0 to the bit we are
|
||||
* handling, and keep other bits as cmdq_stat_reg.
|
||||
*/
|
||||
if (hdev->pdev->revision >= 0x21)
|
||||
*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
|
||||
else
|
||||
*clearval = cmdq_stat_reg &
|
||||
~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
|
||||
|
||||
return HCLGEVF_VECTOR0_EVENT_MBX;
|
||||
}
|
||||
|
||||
|
@ -87,6 +87,8 @@
|
||||
|
||||
/* Vector0 interrupt CMDQ event source register(RW) */
|
||||
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
|
||||
/* Vector0 interrupt CMDQ event status register(RO) */
|
||||
#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104
|
||||
/* CMDQ register bits for RX event(=MBX event) */
|
||||
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
|
||||
/* RST register bits for RESET event */
|
||||
@ -123,7 +125,7 @@
|
||||
#define HCLGEVF_S_IP_BIT BIT(3)
|
||||
#define HCLGEVF_V_TAG_BIT BIT(4)
|
||||
|
||||
#define HCLGEVF_STATS_TIMER_INTERVAL (36)
|
||||
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
|
||||
|
||||
enum hclgevf_evt_cause {
|
||||
HCLGEVF_VECTOR0_EVENT_RST,
|
||||
|
Loading…
Reference in New Issue
Block a user