mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
Merge branch 'hns3-next'
Huazhong Tan says: ==================== code optimizations & bugfixes for HNS3 driver This patchset includes bugfixes and code optimizations for the HNS3 ethernet controller driver. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d075961b05
@ -42,6 +42,8 @@ enum HCLGE_MBX_OPCODE {
|
||||
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
|
||||
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
|
||||
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
|
||||
|
||||
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
|
||||
};
|
||||
|
||||
/* below are per-VF mac-vlan subcodes */
|
||||
|
@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
|
||||
void hnae3_set_client_init_flag(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev, int inited)
|
||||
{
|
||||
if (!client || !ae_dev)
|
||||
return;
|
||||
|
||||
switch (client->type) {
|
||||
case HNAE3_CLIENT_KNIC:
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
|
||||
@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client)
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
int ret = 0;
|
||||
|
||||
if (!client)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* one system should only have one client for every type */
|
||||
list_for_each_entry(client_tmp, &hnae3_client_list, node) {
|
||||
@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
|
||||
if (!client)
|
||||
return;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* un-initialize the client on every matched port */
|
||||
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
|
||||
@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
struct hnae3_client *client;
|
||||
int ret = 0;
|
||||
|
||||
if (!ae_algo)
|
||||
return;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
|
||||
list_add_tail(&ae_algo->node, &hnae3_ae_algo_list);
|
||||
@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
if (!id)
|
||||
continue;
|
||||
|
||||
/* ae_dev init should set flag */
|
||||
if (!ae_algo->ops) {
|
||||
dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
|
||||
continue;
|
||||
}
|
||||
ae_dev->ops = ae_algo->ops;
|
||||
|
||||
ret = ae_algo->ops->init_ae_dev(ae_dev);
|
||||
if (ret) {
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* ae_dev init should set flag */
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
|
||||
|
||||
/* check the client list for the match with this ae_dev type and
|
||||
@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
struct hnae3_client *client;
|
||||
|
||||
if (!ae_algo)
|
||||
return;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* Check if there are matched ae_dev */
|
||||
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
|
||||
@ -245,6 +265,9 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
struct hnae3_client *client;
|
||||
int ret = 0;
|
||||
|
||||
if (!ae_dev)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
|
||||
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
|
||||
@ -255,15 +278,13 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
if (!id)
|
||||
continue;
|
||||
|
||||
ae_dev->ops = ae_algo->ops;
|
||||
|
||||
if (!ae_dev->ops) {
|
||||
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
|
||||
if (!ae_algo->ops) {
|
||||
dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out_err;
|
||||
}
|
||||
ae_dev->ops = ae_algo->ops;
|
||||
|
||||
/* ae_dev init should set flag */
|
||||
ret = ae_dev->ops->init_ae_dev(ae_dev);
|
||||
if (ret) {
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
@ -271,6 +292,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* ae_dev init should set flag */
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
|
||||
break;
|
||||
}
|
||||
@ -307,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
struct hnae3_ae_algo *ae_algo;
|
||||
struct hnae3_client *client;
|
||||
|
||||
if (!ae_dev)
|
||||
return;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* Check if there are matched ae_algo */
|
||||
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
|
||||
|
@ -621,12 +621,11 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
|
||||
hns3_get_ksettings(h, cmd);
|
||||
break;
|
||||
case HNAE3_MEDIA_TYPE_COPPER:
|
||||
if (!netdev->phydev)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cmd->base.port = PORT_TP;
|
||||
if (!netdev->phydev)
|
||||
hns3_get_ksettings(h, cmd);
|
||||
else
|
||||
phy_ethtool_ksettings_get(netdev->phydev, cmd);
|
||||
|
||||
break;
|
||||
default:
|
||||
|
||||
|
@ -390,6 +390,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
|
||||
{
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
|
||||
}
|
||||
|
||||
static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
|
||||
{
|
||||
spin_lock(&ring->lock);
|
||||
@ -402,3 +416,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw)
|
||||
hclge_destroy_queue(&hw->cmq.csq);
|
||||
hclge_destroy_queue(&hw->cmq.crq);
|
||||
}
|
||||
|
||||
void hclge_cmd_uninit(struct hclge_dev *hdev)
|
||||
{
|
||||
spin_lock_bh(&hdev->hw.cmq.csq.lock);
|
||||
spin_lock(&hdev->hw.cmq.crq.lock);
|
||||
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
|
||||
hclge_cmd_uninit_regs(&hdev->hw);
|
||||
spin_unlock(&hdev->hw.cmq.crq.lock);
|
||||
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
|
||||
|
||||
hclge_destroy_cmd_queue(&hdev->hw);
|
||||
}
|
||||
|
@ -975,6 +975,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
|
||||
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
|
||||
struct hclge_desc *desc);
|
||||
|
||||
void hclge_destroy_cmd_queue(struct hclge_hw *hw);
|
||||
void hclge_cmd_uninit(struct hclge_dev *hdev);
|
||||
int hclge_cmd_queue_init(struct hclge_dev *hdev);
|
||||
#endif
|
||||
|
@ -80,7 +80,7 @@ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
|
||||
{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
|
||||
{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
|
||||
{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
|
||||
{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" },
|
||||
{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
|
||||
{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
|
||||
{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
|
||||
{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
|
||||
@ -219,6 +219,12 @@ static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
|
||||
{ .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
|
||||
{ .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
|
||||
{ .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
|
||||
{ .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" },
|
||||
{ .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" },
|
||||
{ .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" },
|
||||
{ .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" },
|
||||
{ .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" },
|
||||
{ .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
@ -277,6 +283,45 @@ static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
#define HCLGE_SSU_MEM_ECC_ERR(x) \
|
||||
{ .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" }
|
||||
|
||||
static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
|
||||
HCLGE_SSU_MEM_ECC_ERR(0),
|
||||
HCLGE_SSU_MEM_ECC_ERR(1),
|
||||
HCLGE_SSU_MEM_ECC_ERR(2),
|
||||
HCLGE_SSU_MEM_ECC_ERR(3),
|
||||
HCLGE_SSU_MEM_ECC_ERR(4),
|
||||
HCLGE_SSU_MEM_ECC_ERR(5),
|
||||
HCLGE_SSU_MEM_ECC_ERR(6),
|
||||
HCLGE_SSU_MEM_ECC_ERR(7),
|
||||
HCLGE_SSU_MEM_ECC_ERR(8),
|
||||
HCLGE_SSU_MEM_ECC_ERR(9),
|
||||
HCLGE_SSU_MEM_ECC_ERR(10),
|
||||
HCLGE_SSU_MEM_ECC_ERR(11),
|
||||
HCLGE_SSU_MEM_ECC_ERR(12),
|
||||
HCLGE_SSU_MEM_ECC_ERR(13),
|
||||
HCLGE_SSU_MEM_ECC_ERR(14),
|
||||
HCLGE_SSU_MEM_ECC_ERR(15),
|
||||
HCLGE_SSU_MEM_ECC_ERR(16),
|
||||
HCLGE_SSU_MEM_ECC_ERR(17),
|
||||
HCLGE_SSU_MEM_ECC_ERR(18),
|
||||
HCLGE_SSU_MEM_ECC_ERR(19),
|
||||
HCLGE_SSU_MEM_ECC_ERR(20),
|
||||
HCLGE_SSU_MEM_ECC_ERR(21),
|
||||
HCLGE_SSU_MEM_ECC_ERR(22),
|
||||
HCLGE_SSU_MEM_ECC_ERR(23),
|
||||
HCLGE_SSU_MEM_ECC_ERR(24),
|
||||
HCLGE_SSU_MEM_ECC_ERR(25),
|
||||
HCLGE_SSU_MEM_ECC_ERR(26),
|
||||
HCLGE_SSU_MEM_ECC_ERR(27),
|
||||
HCLGE_SSU_MEM_ECC_ERR(28),
|
||||
HCLGE_SSU_MEM_ECC_ERR(29),
|
||||
HCLGE_SSU_MEM_ECC_ERR(30),
|
||||
HCLGE_SSU_MEM_ECC_ERR(31),
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
|
||||
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
|
||||
{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
|
||||
@ -835,13 +880,15 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
|
||||
desc_data = (__le32 *)&desc[2];
|
||||
status = le32_to_cpu(*(desc_data + 2));
|
||||
if (status) {
|
||||
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n");
|
||||
hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
|
||||
&hclge_ssu_mem_ecc_err_int[0], status);
|
||||
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
|
||||
}
|
||||
|
||||
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
|
||||
if (status) {
|
||||
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n");
|
||||
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
|
||||
status);
|
||||
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
|
||||
}
|
||||
|
||||
@ -997,6 +1044,13 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
|
||||
hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
|
||||
&hclge_igu_egu_tnl_int[0], status);
|
||||
|
||||
/* log PPU(RCB) errors */
|
||||
desc_data = (__le32 *)&desc[3];
|
||||
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
|
||||
if (status)
|
||||
hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
|
||||
&hclge_ppu_pf_abnormal_int[0], status);
|
||||
|
||||
/* clear all PF RAS errors */
|
||||
hclge_cmd_reuse_desc(&desc[0], false);
|
||||
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
|
||||
@ -1332,14 +1386,13 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
|
||||
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
|
||||
}
|
||||
|
||||
/* log PPU(RCB) errors */
|
||||
/* log PPU(RCB) MPF errors */
|
||||
desc_data = (__le32 *)&desc[5];
|
||||
status = le32_to_cpu(*(desc_data + 2)) &
|
||||
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
|
||||
if (status) {
|
||||
dev_warn(dev,
|
||||
"PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n",
|
||||
status);
|
||||
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
|
||||
&hclge_ppu_mpf_abnormal_int_st2[0], status);
|
||||
set_bit(HNAE3_CORE_RESET, reset_requests);
|
||||
}
|
||||
|
||||
@ -1386,7 +1439,7 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
|
||||
hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
|
||||
&hclge_ppp_pf_abnormal_int[0], status);
|
||||
|
||||
/* PPU(RCB) PF errors */
|
||||
/* log PPU(RCB) PF errors */
|
||||
desc_data = (__le32 *)&desc[3];
|
||||
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
|
||||
if (status)
|
||||
|
@ -45,8 +45,8 @@
|
||||
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
|
||||
#define HCLGE_NCSI_ERR_INT_EN 0x3
|
||||
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
|
||||
#define HCLGE_MAC_COMMON_ERR_INT_EN GENMASK(7, 0)
|
||||
#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK GENMASK(7, 0)
|
||||
#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
|
||||
#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
|
||||
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
|
||||
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
|
||||
#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
|
||||
@ -79,6 +79,7 @@
|
||||
#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
|
||||
#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
|
||||
#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
|
||||
#define HCLGE_PPU_PF_INT_RAS_MASK 0x18
|
||||
#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27
|
||||
#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
|
||||
#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
|
||||
|
@ -839,37 +839,67 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
|
||||
unsigned long *supported = hdev->hw.mac.supported;
|
||||
|
||||
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
|
||||
set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
|
||||
supported);
|
||||
|
||||
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
|
||||
set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
|
||||
supported);
|
||||
|
||||
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
|
||||
set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
|
||||
supported);
|
||||
|
||||
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
|
||||
set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
|
||||
supported);
|
||||
|
||||
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
|
||||
set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
|
||||
supported);
|
||||
|
||||
set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
|
||||
set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
|
||||
}
|
||||
|
||||
static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
|
||||
u8 speed_ability)
|
||||
{
|
||||
unsigned long *supported = hdev->hw.mac.supported;
|
||||
|
||||
/* default to support all speed for GE port */
|
||||
if (!speed_ability)
|
||||
speed_ability = HCLGE_SUPPORT_GE;
|
||||
|
||||
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
|
||||
supported);
|
||||
|
||||
if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
|
||||
supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
|
||||
supported);
|
||||
}
|
||||
|
||||
if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
|
||||
}
|
||||
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
|
||||
}
|
||||
|
||||
static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
|
||||
{
|
||||
u8 media_type = hdev->hw.mac.media_type;
|
||||
|
||||
if (media_type != HNAE3_MEDIA_TYPE_FIBER)
|
||||
return;
|
||||
|
||||
if (media_type == HNAE3_MEDIA_TYPE_FIBER)
|
||||
hclge_parse_fiber_link_mode(hdev, speed_ability);
|
||||
else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
|
||||
hclge_parse_copper_link_mode(hdev, speed_ability);
|
||||
}
|
||||
|
||||
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
|
||||
@ -1299,6 +1329,9 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
|
||||
vport->back = hdev;
|
||||
vport->vport_id = i;
|
||||
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
|
||||
INIT_LIST_HEAD(&vport->vlan_list);
|
||||
INIT_LIST_HEAD(&vport->uc_mac_list);
|
||||
INIT_LIST_HEAD(&vport->mc_mac_list);
|
||||
|
||||
if (i == 0)
|
||||
ret = hclge_vport_setup(vport, tqp_main_vport);
|
||||
@ -6044,6 +6077,103 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
|
||||
return status;
|
||||
}
|
||||
|
||||
void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
|
||||
enum HCLGE_MAC_ADDR_TYPE mac_type)
|
||||
{
|
||||
struct hclge_vport_mac_addr_cfg *mac_cfg;
|
||||
struct list_head *list;
|
||||
|
||||
if (!vport->vport_id)
|
||||
return;
|
||||
|
||||
mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
|
||||
if (!mac_cfg)
|
||||
return;
|
||||
|
||||
mac_cfg->hd_tbl_status = true;
|
||||
memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
|
||||
|
||||
list = (mac_type == HCLGE_MAC_ADDR_UC) ?
|
||||
&vport->uc_mac_list : &vport->mc_mac_list;
|
||||
|
||||
list_add_tail(&mac_cfg->node, list);
|
||||
}
|
||||
|
||||
void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
|
||||
bool is_write_tbl,
|
||||
enum HCLGE_MAC_ADDR_TYPE mac_type)
|
||||
{
|
||||
struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
|
||||
struct list_head *list;
|
||||
bool uc_flag, mc_flag;
|
||||
|
||||
list = (mac_type == HCLGE_MAC_ADDR_UC) ?
|
||||
&vport->uc_mac_list : &vport->mc_mac_list;
|
||||
|
||||
uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
|
||||
mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
|
||||
|
||||
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
|
||||
if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
|
||||
if (uc_flag && mac_cfg->hd_tbl_status)
|
||||
hclge_rm_uc_addr_common(vport, mac_addr);
|
||||
|
||||
if (mc_flag && mac_cfg->hd_tbl_status)
|
||||
hclge_rm_mc_addr_common(vport, mac_addr);
|
||||
|
||||
list_del(&mac_cfg->node);
|
||||
kfree(mac_cfg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
|
||||
enum HCLGE_MAC_ADDR_TYPE mac_type)
|
||||
{
|
||||
struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
|
||||
struct list_head *list;
|
||||
|
||||
list = (mac_type == HCLGE_MAC_ADDR_UC) ?
|
||||
&vport->uc_mac_list : &vport->mc_mac_list;
|
||||
|
||||
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
|
||||
if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
|
||||
hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
|
||||
|
||||
if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
|
||||
hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
|
||||
|
||||
mac_cfg->hd_tbl_status = false;
|
||||
if (is_del_list) {
|
||||
list_del(&mac_cfg->node);
|
||||
kfree(mac_cfg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_vport_mac_addr_cfg *mac, *tmp;
|
||||
struct hclge_vport *vport;
|
||||
int i;
|
||||
|
||||
mutex_lock(&hdev->vport_cfg_mutex);
|
||||
for (i = 0; i < hdev->num_alloc_vport; i++) {
|
||||
vport = &hdev->vport[i];
|
||||
list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
|
||||
list_del(&mac->node);
|
||||
kfree(mac);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
|
||||
list_del(&mac->node);
|
||||
kfree(mac);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&hdev->vport_cfg_mutex);
|
||||
}
|
||||
|
||||
static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
|
||||
u16 cmdq_resp, u8 resp_code)
|
||||
{
|
||||
@ -6617,6 +6747,84 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
|
||||
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
|
||||
}
|
||||
|
||||
void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
|
||||
{
|
||||
struct hclge_vport_vlan_cfg *vlan;
|
||||
|
||||
/* vlan 0 is reserved */
|
||||
if (!vlan_id)
|
||||
return;
|
||||
|
||||
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
|
||||
if (!vlan)
|
||||
return;
|
||||
|
||||
vlan->hd_tbl_status = true;
|
||||
vlan->vlan_id = vlan_id;
|
||||
|
||||
list_add_tail(&vlan->node, &vport->vlan_list);
|
||||
}
|
||||
|
||||
void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
|
||||
bool is_write_tbl)
|
||||
{
|
||||
struct hclge_vport_vlan_cfg *vlan, *tmp;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
|
||||
if (vlan->vlan_id == vlan_id) {
|
||||
if (is_write_tbl && vlan->hd_tbl_status)
|
||||
hclge_set_vlan_filter_hw(hdev,
|
||||
htons(ETH_P_8021Q),
|
||||
vport->vport_id,
|
||||
vlan_id, 0,
|
||||
true);
|
||||
|
||||
list_del(&vlan->node);
|
||||
kfree(vlan);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
|
||||
{
|
||||
struct hclge_vport_vlan_cfg *vlan, *tmp;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
|
||||
if (vlan->hd_tbl_status)
|
||||
hclge_set_vlan_filter_hw(hdev,
|
||||
htons(ETH_P_8021Q),
|
||||
vport->vport_id,
|
||||
vlan->vlan_id, 0,
|
||||
true);
|
||||
|
||||
vlan->hd_tbl_status = false;
|
||||
if (is_del_list) {
|
||||
list_del(&vlan->node);
|
||||
kfree(vlan);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_vport_vlan_cfg *vlan, *tmp;
|
||||
struct hclge_vport *vport;
|
||||
int i;
|
||||
|
||||
mutex_lock(&hdev->vport_cfg_mutex);
|
||||
for (i = 0; i < hdev->num_alloc_vport; i++) {
|
||||
vport = &hdev->vport[i];
|
||||
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
|
||||
list_del(&vlan->node);
|
||||
kfree(vlan);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&hdev->vport_cfg_mutex);
|
||||
}
|
||||
|
||||
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
|
||||
{
|
||||
struct hclge_vport *vport = hclge_get_vport(handle);
|
||||
@ -7299,6 +7507,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
|
||||
|
||||
mutex_init(&hdev->vport_lock);
|
||||
mutex_init(&hdev->vport_cfg_mutex);
|
||||
|
||||
ret = hclge_pci_init(hdev);
|
||||
if (ret) {
|
||||
@ -7460,7 +7669,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
err_msi_uninit:
|
||||
pci_free_irq_vectors(pdev);
|
||||
err_cmd_uninit:
|
||||
hclge_destroy_cmd_queue(&hdev->hw);
|
||||
hclge_cmd_uninit(hdev);
|
||||
err_pci_uninit:
|
||||
pcim_iounmap(pdev, hdev->hw.io_base);
|
||||
pci_clear_master(pdev);
|
||||
@ -7587,10 +7796,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
synchronize_irq(hdev->misc_vector.vector_irq);
|
||||
|
||||
hclge_hw_error_set_state(hdev, false);
|
||||
hclge_destroy_cmd_queue(&hdev->hw);
|
||||
hclge_cmd_uninit(hdev);
|
||||
hclge_misc_irq_uninit(hdev);
|
||||
hclge_pci_uninit(hdev);
|
||||
mutex_destroy(&hdev->vport_lock);
|
||||
hclge_uninit_vport_mac_table(hdev);
|
||||
hclge_uninit_vport_vlan_table(hdev);
|
||||
mutex_destroy(&hdev->vport_cfg_mutex);
|
||||
ae_dev->priv = NULL;
|
||||
}
|
||||
|
||||
|
@ -188,6 +188,10 @@ enum HLCGE_PORT_TYPE {
|
||||
#define HCLGE_SUPPORT_25G_BIT BIT(2)
|
||||
#define HCLGE_SUPPORT_50G_BIT BIT(3)
|
||||
#define HCLGE_SUPPORT_100G_BIT BIT(4)
|
||||
#define HCLGE_SUPPORT_100M_BIT BIT(6)
|
||||
#define HCLGE_SUPPORT_10M_BIT BIT(7)
|
||||
#define HCLGE_SUPPORT_GE \
|
||||
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
|
||||
|
||||
enum HCLGE_DEV_STATE {
|
||||
HCLGE_STATE_REINITING,
|
||||
@ -628,6 +632,23 @@ struct hclge_fd_ad_data {
|
||||
u16 rule_id;
|
||||
};
|
||||
|
||||
struct hclge_vport_mac_addr_cfg {
|
||||
struct list_head node;
|
||||
int hd_tbl_status;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
};
|
||||
|
||||
enum HCLGE_MAC_ADDR_TYPE {
|
||||
HCLGE_MAC_ADDR_UC,
|
||||
HCLGE_MAC_ADDR_MC
|
||||
};
|
||||
|
||||
struct hclge_vport_vlan_cfg {
|
||||
struct list_head node;
|
||||
int hd_tbl_status;
|
||||
u16 vlan_id;
|
||||
};
|
||||
|
||||
/* For each bit of TCAM entry, it uses a pair of 'x' and
|
||||
* 'y' to indicate which value to match, like below:
|
||||
* ----------------------------------
|
||||
@ -767,6 +788,8 @@ struct hclge_dev {
|
||||
/* unicast mac vlan space shared by PF and its VFs */
|
||||
u16 share_umv_size;
|
||||
struct mutex umv_mutex; /* protect share_umv_size */
|
||||
|
||||
struct mutex vport_cfg_mutex; /* Protect stored vf table */
|
||||
};
|
||||
|
||||
/* VPort level vlan tag configuration for TX direction */
|
||||
@ -834,6 +857,10 @@ struct hclge_vport {
|
||||
unsigned long state;
|
||||
unsigned long last_active_jiffies;
|
||||
u32 mps; /* Max packet size */
|
||||
|
||||
struct list_head uc_mac_list; /* Store VF unicast table */
|
||||
struct list_head mc_mac_list; /* Store VF multicast table */
|
||||
struct list_head vlan_list; /* Store VF vlan table */
|
||||
};
|
||||
|
||||
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
|
||||
@ -888,4 +915,17 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
|
||||
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
|
||||
int hclge_notify_client(struct hclge_dev *hdev,
|
||||
enum hnae3_reset_notify_type type);
|
||||
void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
|
||||
enum HCLGE_MAC_ADDR_TYPE mac_type);
|
||||
void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
|
||||
bool is_write_tbl,
|
||||
enum HCLGE_MAC_ADDR_TYPE mac_type);
|
||||
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
|
||||
enum HCLGE_MAC_ADDR_TYPE mac_type);
|
||||
void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
|
||||
void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id);
|
||||
void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
|
||||
bool is_write_tbl);
|
||||
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
|
||||
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
|
||||
#endif
|
||||
|
@ -224,12 +224,24 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
|
||||
|
||||
hclge_rm_uc_addr_common(vport, old_addr);
|
||||
status = hclge_add_uc_addr_common(vport, mac_addr);
|
||||
if (status)
|
||||
if (status) {
|
||||
hclge_add_uc_addr_common(vport, old_addr);
|
||||
} else {
|
||||
hclge_rm_vport_mac_table(vport, mac_addr,
|
||||
false, HCLGE_MAC_ADDR_UC);
|
||||
hclge_add_vport_mac_table(vport, mac_addr,
|
||||
HCLGE_MAC_ADDR_UC);
|
||||
}
|
||||
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
|
||||
status = hclge_add_uc_addr_common(vport, mac_addr);
|
||||
if (!status)
|
||||
hclge_add_vport_mac_table(vport, mac_addr,
|
||||
HCLGE_MAC_ADDR_UC);
|
||||
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
|
||||
status = hclge_rm_uc_addr_common(vport, mac_addr);
|
||||
if (!status)
|
||||
hclge_rm_vport_mac_table(vport, mac_addr,
|
||||
false, HCLGE_MAC_ADDR_UC);
|
||||
} else {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to set unicast mac addr, unknown subcode %d\n",
|
||||
@ -255,8 +267,14 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
|
||||
|
||||
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
|
||||
status = hclge_add_mc_addr_common(vport, mac_addr);
|
||||
if (!status)
|
||||
hclge_add_vport_mac_table(vport, mac_addr,
|
||||
HCLGE_MAC_ADDR_MC);
|
||||
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
|
||||
status = hclge_rm_mc_addr_common(vport, mac_addr);
|
||||
if (!status)
|
||||
hclge_rm_vport_mac_table(vport, mac_addr,
|
||||
false, HCLGE_MAC_ADDR_MC);
|
||||
} else {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to set mcast mac addr, unknown subcode %d\n",
|
||||
@ -287,6 +305,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
|
||||
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
|
||||
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
|
||||
vlan, is_kill);
|
||||
if (!status)
|
||||
is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false)
|
||||
: hclge_add_vport_vlan_table(vport, vlan);
|
||||
} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
|
||||
struct hnae3_handle *handle = &vport->nic;
|
||||
bool en = mbx_req->msg[2] ? true : false;
|
||||
@ -585,6 +606,15 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
|
||||
case HCLGE_MBX_GET_LINK_MODE:
|
||||
hclge_get_link_mode(vport, req);
|
||||
break;
|
||||
case HCLGE_MBX_GET_VF_FLR_STATUS:
|
||||
mutex_lock(&hdev->vport_cfg_mutex);
|
||||
hclge_rm_vport_all_mac_table(vport, true,
|
||||
HCLGE_MAC_ADDR_UC);
|
||||
hclge_rm_vport_all_mac_table(vport, true,
|
||||
HCLGE_MAC_ADDR_MC);
|
||||
hclge_rm_vport_all_vlan_table(vport, true);
|
||||
mutex_unlock(&hdev->vport_cfg_mutex);
|
||||
break;
|
||||
default:
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"un-supported mailbox message, code = %d\n",
|
||||
|
@ -8,12 +8,6 @@
|
||||
#include "hclge_main.h"
|
||||
#include "hclge_mdio.h"
|
||||
|
||||
#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
|
||||
SUPPORTED_TP | \
|
||||
PHY_10BT_FEATURES | \
|
||||
PHY_100BT_FEATURES | \
|
||||
SUPPORTED_1000baseT_Full)
|
||||
|
||||
enum hclge_mdio_c22_op_seq {
|
||||
HCLGE_MDIO_C22_WRITE = 1,
|
||||
HCLGE_MDIO_C22_READ = 2
|
||||
@ -217,16 +211,9 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
|
||||
linkmode_set_bit_array(phy_10_100_features_array,
|
||||
ARRAY_SIZE(phy_10_100_features_array),
|
||||
mask);
|
||||
linkmode_set_bit_array(phy_gbit_features_array,
|
||||
ARRAY_SIZE(phy_gbit_features_array),
|
||||
mask);
|
||||
linkmode_copy(mask, hdev->hw.mac.supported);
|
||||
linkmode_and(phydev->supported, phydev->supported, mask);
|
||||
phy_support_asym_pause(phydev);
|
||||
linkmode_copy(phydev->advertising, phydev->supported);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -362,8 +362,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
|
||||
{
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
|
||||
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
|
||||
}
|
||||
|
||||
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
|
||||
{
|
||||
spin_lock_bh(&hdev->hw.cmq.csq.lock);
|
||||
spin_lock(&hdev->hw.cmq.crq.lock);
|
||||
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
|
||||
hclgevf_cmd_uninit_regs(&hdev->hw);
|
||||
spin_unlock(&hdev->hw.cmq.crq.lock);
|
||||
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
|
||||
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
|
||||
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user