mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 20:47:00 +07:00
a754bd5f18
Currently t10-pi can only be built into the block layer which via crc-t10dif pulls in a whole chunk of the Crypto API. In fact all users of t10-pi work as modules and there is no reason for it to always be built-in. This patch adds a new hidden option for t10-pi that is selected automatically based on BLK_DEV_INTEGRITY and whether the users of t10-pi are built-in or not. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Jens Axboe <axboe@kernel.dk>
88 lines
2.6 KiB
Plaintext
88 lines
2.6 KiB
Plaintext
# SPDX-License-Identifier: GPL-2.0-only
|
|
config NVME_CORE
|
|
tristate
|
|
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
|
|
|
|
config BLK_DEV_NVME
|
|
tristate "NVM Express block device"
|
|
depends on PCI && BLOCK
|
|
select NVME_CORE
|
|
---help---
|
|
The NVM Express driver is for solid state drives directly
|
|
connected to the PCI or PCI Express bus. If you know you
|
|
don't have one of these, it is safe to answer N.
|
|
|
|
To compile this driver as a module, choose M here: the
|
|
module will be called nvme.
|
|
|
|
config NVME_MULTIPATH
|
|
bool "NVMe multipath support"
|
|
depends on NVME_CORE
|
|
---help---
|
|
This option enables support for multipath access to NVMe
|
|
subsystems. If this option is enabled only a single
|
|
/dev/nvmeXnY device will show up for each NVMe namespaces,
|
|
even if it is accessible through multiple controllers.
|
|
|
|
config NVME_HWMON
|
|
bool "NVMe hardware monitoring"
|
|
depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
|
|
help
|
|
This provides support for NVMe hardware monitoring. If enabled,
|
|
a hardware monitoring device will be created for each NVMe drive
|
|
in the system.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_FABRICS
|
|
tristate
|
|
|
|
config NVME_RDMA
|
|
tristate "NVM Express over Fabrics RDMA host driver"
|
|
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
|
|
select NVME_CORE
|
|
select NVME_FABRICS
|
|
select SG_POOL
|
|
help
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
the RDMA (Infiniband, RoCE, iWarp) transport. This allows you
|
|
to use remote block devices exported using the NVMe protocol set.
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_FC
|
|
tristate "NVM Express over Fabrics FC host driver"
|
|
depends on BLOCK
|
|
depends on HAS_DMA
|
|
select NVME_CORE
|
|
select NVME_FABRICS
|
|
select SG_POOL
|
|
help
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
the FC transport. This allows you to use remote block devices
|
|
exported using the NVMe protocol set.
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_TCP
|
|
tristate "NVM Express over Fabrics TCP host driver"
|
|
depends on INET
|
|
depends on BLK_DEV_NVME
|
|
select NVME_FABRICS
|
|
select CRYPTO_CRC32C
|
|
help
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
the TCP transport. This allows you to use remote block devices
|
|
exported using the NVMe protocol set.
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
If unsure, say N.
|