mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-29 20:46:41 +07:00
Merge branch 'mlx4'
Or Gerlitz says: ==================== mlx4 driver fixes for 3.19-rc1 Just fixes for two small issues introduced in the 3.19 merge window ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c5e44b6985
@ -787,11 +787,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||||||
if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
|
if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
|
||||||
field = 3;
|
field = 3;
|
||||||
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
|
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
|
||||||
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
|
|
||||||
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
|
|
||||||
} else {
|
} else {
|
||||||
dev_cap->bf_reg_size = 0;
|
dev_cap->bf_reg_size = 0;
|
||||||
mlx4_dbg(dev, "BlueFlame not available\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
|
||||||
@ -902,9 +899,6 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
|
|
||||||
dev_cap->bmme_flags, dev_cap->reserved_lkey);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
|
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
|
||||||
* we can't use any EQs whose doorbell falls on that page,
|
* we can't use any EQs whose doorbell falls on that page,
|
||||||
@ -916,6 +910,21 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||||||
else
|
else
|
||||||
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
|
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
|
||||||
|
|
||||||
|
out:
|
||||||
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
||||||
|
{
|
||||||
|
if (dev_cap->bf_reg_size > 0)
|
||||||
|
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
|
||||||
|
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
|
||||||
|
else
|
||||||
|
mlx4_dbg(dev, "BlueFlame not available\n");
|
||||||
|
|
||||||
|
mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
|
||||||
|
dev_cap->bmme_flags, dev_cap->reserved_lkey);
|
||||||
mlx4_dbg(dev, "Max ICM size %lld MB\n",
|
mlx4_dbg(dev, "Max ICM size %lld MB\n",
|
||||||
(unsigned long long) dev_cap->max_icm_sz >> 20);
|
(unsigned long long) dev_cap->max_icm_sz >> 20);
|
||||||
mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
|
mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
|
||||||
@ -949,13 +958,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||||||
dev_cap->dmfs_high_rate_qpn_base);
|
dev_cap->dmfs_high_rate_qpn_base);
|
||||||
mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
|
mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
|
||||||
dev_cap->dmfs_high_rate_qpn_range);
|
dev_cap->dmfs_high_rate_qpn_range);
|
||||||
|
|
||||||
dump_dev_cap_flags(dev, dev_cap->flags);
|
dump_dev_cap_flags(dev, dev_cap->flags);
|
||||||
dump_dev_cap_flags2(dev, dev_cap->flags2);
|
dump_dev_cap_flags2(dev, dev_cap->flags2);
|
||||||
|
|
||||||
out:
|
|
||||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
|
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
|
||||||
|
@ -224,6 +224,7 @@ struct mlx4_set_ib_param {
|
|||||||
u32 cap_mask;
|
u32 cap_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
|
||||||
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
|
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
|
||||||
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap);
|
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap);
|
||||||
int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
|
int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
|
||||||
|
@ -305,6 +305,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||||||
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
|
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
mlx4_dev_cap_dump(dev, dev_cap);
|
||||||
|
|
||||||
if (dev_cap->min_page_sz > PAGE_SIZE) {
|
if (dev_cap->min_page_sz > PAGE_SIZE) {
|
||||||
mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
|
mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
|
||||||
@ -2488,29 +2489,30 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
|
|||||||
u8 total_vfs, int existing_vfs)
|
u8 total_vfs, int existing_vfs)
|
||||||
{
|
{
|
||||||
u64 dev_flags = dev->flags;
|
u64 dev_flags = dev->flags;
|
||||||
|
|
||||||
dev->dev_vfs = kzalloc(
|
|
||||||
total_vfs * sizeof(*dev->dev_vfs),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (NULL == dev->dev_vfs) {
|
|
||||||
mlx4_err(dev, "Failed to allocate memory for VFs\n");
|
|
||||||
goto disable_sriov;
|
|
||||||
} else if (!(dev->flags & MLX4_FLAG_SRIOV)) {
|
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
atomic_inc(&pf_loading);
|
atomic_inc(&pf_loading);
|
||||||
if (existing_vfs) {
|
if (dev->flags & MLX4_FLAG_SRIOV) {
|
||||||
if (existing_vfs != total_vfs)
|
if (existing_vfs != total_vfs) {
|
||||||
mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
|
mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
|
||||||
existing_vfs, total_vfs);
|
existing_vfs, total_vfs);
|
||||||
} else {
|
total_vfs = existing_vfs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
|
||||||
|
if (NULL == dev->dev_vfs) {
|
||||||
|
mlx4_err(dev, "Failed to allocate memory for VFs\n");
|
||||||
|
goto disable_sriov;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(dev->flags & MLX4_FLAG_SRIOV)) {
|
||||||
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
|
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
|
||||||
err = pci_enable_sriov(pdev, total_vfs);
|
err = pci_enable_sriov(pdev, total_vfs);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
|
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
|
||||||
err);
|
err);
|
||||||
atomic_dec(&pf_loading);
|
|
||||||
goto disable_sriov;
|
goto disable_sriov;
|
||||||
} else {
|
} else {
|
||||||
mlx4_warn(dev, "Running in master mode\n");
|
mlx4_warn(dev, "Running in master mode\n");
|
||||||
@ -2519,10 +2521,10 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
|
|||||||
dev_flags &= ~MLX4_FLAG_SLAVE;
|
dev_flags &= ~MLX4_FLAG_SLAVE;
|
||||||
dev->num_vfs = total_vfs;
|
dev->num_vfs = total_vfs;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return dev_flags;
|
return dev_flags;
|
||||||
|
|
||||||
disable_sriov:
|
disable_sriov:
|
||||||
|
atomic_dec(&pf_loading);
|
||||||
dev->num_vfs = 0;
|
dev->num_vfs = 0;
|
||||||
kfree(dev->dev_vfs);
|
kfree(dev->dev_vfs);
|
||||||
return dev_flags & ~MLX4_FLAG_MASTER;
|
return dev_flags & ~MLX4_FLAG_MASTER;
|
||||||
@ -2606,8 +2608,10 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (total_vfs) {
|
if (total_vfs) {
|
||||||
existing_vfs = pci_num_vf(pdev);
|
|
||||||
dev->flags = MLX4_FLAG_MASTER;
|
dev->flags = MLX4_FLAG_MASTER;
|
||||||
|
existing_vfs = pci_num_vf(pdev);
|
||||||
|
if (existing_vfs)
|
||||||
|
dev->flags |= MLX4_FLAG_SRIOV;
|
||||||
dev->num_vfs = total_vfs;
|
dev->num_vfs = total_vfs;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2643,6 +2647,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (mlx4_is_master(dev)) {
|
if (mlx4_is_master(dev)) {
|
||||||
|
/* when we hit the goto slave_start below, dev_cap already initialized */
|
||||||
if (!dev_cap) {
|
if (!dev_cap) {
|
||||||
dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
|
dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
|
||||||
|
|
||||||
@ -2849,6 +2854,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
|
|||||||
if (mlx4_is_master(dev) && dev->num_vfs)
|
if (mlx4_is_master(dev) && dev->num_vfs)
|
||||||
atomic_dec(&pf_loading);
|
atomic_dec(&pf_loading);
|
||||||
|
|
||||||
|
kfree(dev_cap);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_port:
|
err_port:
|
||||||
|
Loading…
Reference in New Issue
Block a user