mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
228914504c
I see no good reason for the "If unsure, say N" advice in the description of the NVME_HWMON configuration option. It is not dangerous, it does not select any other option, and has a fairly low overhead. As the option is already not enabled by default, further suggesting hesitant users to not enable it is not useful anyway. Unlike some other options where the description alone may not be sufficient for users to make a decision, NVME_HWMON is pretty simple to grasp in my opinion, so just let the user do what they want. Signed-off-by: Jean Delvare <jdelvare@suse.de> Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Reviewed-by: Guenter Roeck <linux@roeck-us.net> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <kbusch@kernel.org>
86 lines
2.6 KiB
Plaintext
86 lines
2.6 KiB
Plaintext
# SPDX-License-Identifier: GPL-2.0-only
|
|
config NVME_CORE
|
|
tristate
|
|
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
|
|
|
|
config BLK_DEV_NVME
|
|
tristate "NVM Express block device"
|
|
depends on PCI && BLOCK
|
|
select NVME_CORE
|
|
---help---
|
|
The NVM Express driver is for solid state drives directly
|
|
connected to the PCI or PCI Express bus. If you know you
|
|
don't have one of these, it is safe to answer N.
|
|
|
|
To compile this driver as a module, choose M here: the
|
|
module will be called nvme.
|
|
|
|
config NVME_MULTIPATH
|
|
bool "NVMe multipath support"
|
|
depends on NVME_CORE
|
|
---help---
|
|
This option enables support for multipath access to NVMe
|
|
subsystems. If this option is enabled only a single
|
|
/dev/nvmeXnY device will show up for each NVMe namespaces,
|
|
even if it is accessible through multiple controllers.
|
|
|
|
config NVME_HWMON
|
|
bool "NVMe hardware monitoring"
|
|
depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
|
|
help
|
|
This provides support for NVMe hardware monitoring. If enabled,
|
|
a hardware monitoring device will be created for each NVMe drive
|
|
in the system.
|
|
|
|
config NVME_FABRICS
|
|
tristate
|
|
|
|
config NVME_RDMA
|
|
tristate "NVM Express over Fabrics RDMA host driver"
|
|
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
|
|
select NVME_CORE
|
|
select NVME_FABRICS
|
|
select SG_POOL
|
|
help
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
the RDMA (Infiniband, RoCE, iWarp) transport. This allows you
|
|
to use remote block devices exported using the NVMe protocol set.
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_FC
|
|
tristate "NVM Express over Fabrics FC host driver"
|
|
depends on BLOCK
|
|
depends on HAS_DMA
|
|
select NVME_CORE
|
|
select NVME_FABRICS
|
|
select SG_POOL
|
|
help
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
the FC transport. This allows you to use remote block devices
|
|
exported using the NVMe protocol set.
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_TCP
|
|
tristate "NVM Express over Fabrics TCP host driver"
|
|
depends on INET
|
|
depends on BLK_DEV_NVME
|
|
select NVME_FABRICS
|
|
select CRYPTO_CRC32C
|
|
help
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
the TCP transport. This allows you to use remote block devices
|
|
exported using the NVMe protocol set.
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
If unsure, say N.
|